Browse Source

Initial commit

Jan Sušnik 8 years ago
commit
18444d70b0
100 changed files with 16350 additions and 0 deletions
  1. 25 0
      Gemfile
  2. 202 0
      LICENSE
  3. 9 0
      MANIFEST.in
  4. 166 0
      Puppetfile
  5. 166 0
      Puppetfile.ironic
  6. 57 0
      README.md
  7. 230 0
      README_ORIGINAL
  8. 29 0
      Rakefile
  9. 5 0
      bindep.txt
  10. 153 0
      docs/Makefile
  11. 255 0
      docs/conf.py
  12. 1171 0
      docs/packstack.rst
  13. 0 0
      packstack/__init__.py
  14. 3 0
      packstack/installer/.gitignore
  15. 202 0
      packstack/installer/LICENSE
  16. 0 0
      packstack/installer/__init__.py
  17. 104 0
      packstack/installer/basedefs.py
  18. 0 0
      packstack/installer/core/__init__.py
  19. 409 0
      packstack/installer/core/drones.py
  20. 60 0
      packstack/installer/core/parameters.py
  21. 109 0
      packstack/installer/core/sequences.py
  22. 89 0
      packstack/installer/exceptions.py
  23. 100 0
      packstack/installer/output_messages.py
  24. 159 0
      packstack/installer/processors.py
  25. 1045 0
      packstack/installer/run_setup.py
  26. 150 0
      packstack/installer/setup_controller.py
  27. 41 0
      packstack/installer/utils/__init__.py
  28. 137 0
      packstack/installer/utils/datastructures.py
  29. 41 0
      packstack/installer/utils/decorators.py
  30. 137 0
      packstack/installer/utils/network.py
  31. 152 0
      packstack/installer/utils/shell.py
  32. 63 0
      packstack/installer/utils/shortcuts.py
  33. 69 0
      packstack/installer/utils/strings.py
  34. 398 0
      packstack/installer/validators.py
  35. 0 0
      packstack/modules/__init__.py
  36. 99 0
      packstack/modules/common.py
  37. 83 0
      packstack/modules/documentation.py
  38. 172 0
      packstack/modules/ospluginutils.py
  39. 121 0
      packstack/modules/puppet.py
  40. 0 0
      packstack/plugins/__init__.py
  41. 203 0
      packstack/plugins/amqp_002.py
  42. 117 0
      packstack/plugins/aodh_810.py
  43. 261 0
      packstack/plugins/ceilometer_800.py
  44. 756 0
      packstack/plugins/cinder_250.py
  45. 211 0
      packstack/plugins/dashboard_500.py
  46. 130 0
      packstack/plugins/glance_200.py
  47. 103 0
      packstack/plugins/gnocchi_790.py
  48. 216 0
      packstack/plugins/heat_650.py
  49. 108 0
      packstack/plugins/ironic_275.py
  50. 791 0
      packstack/plugins/keystone_100.py
  51. 114 0
      packstack/plugins/magnum_920.py
  52. 636 0
      packstack/plugins/manila_355.py
  53. 117 0
      packstack/plugins/mariadb_003.py
  54. 111 0
      packstack/plugins/nagios_910.py
  55. 882 0
      packstack/plugins/neutron_350.py
  56. 533 0
      packstack/plugins/nova_300.py
  57. 78 0
      packstack/plugins/openstack_client_400.py
  58. 103 0
      packstack/plugins/panko_820.py
  59. 63 0
      packstack/plugins/postscript_951.py
  60. 1497 0
      packstack/plugins/prescript_000.py
  61. 353 0
      packstack/plugins/provision_700.py
  62. 279 0
      packstack/plugins/puppet_950.py
  63. 108 0
      packstack/plugins/sahara_900.py
  64. 294 0
      packstack/plugins/ssl_001.py
  65. 326 0
      packstack/plugins/swift_600.py
  66. 158 0
      packstack/plugins/trove_850.py
  67. 14 0
      packstack/puppet/modules/packstack/Gemfile
  68. 6 0
      packstack/puppet/modules/packstack/Rakefile
  69. 16 0
      packstack/puppet/modules/packstack/lib/facter/default_hypervisor.rb
  70. 8 0
      packstack/puppet/modules/packstack/lib/facter/home_dir.rb
  71. 14 0
      packstack/puppet/modules/packstack/lib/facter/mariadb.rb
  72. 218 0
      packstack/puppet/modules/packstack/lib/facter/netns.py
  73. 16 0
      packstack/puppet/modules/packstack/lib/facter/netns_support.rb
  74. 4 0
      packstack/puppet/modules/packstack/lib/facter/network.rb
  75. 30 0
      packstack/puppet/modules/packstack/lib/puppet/parser/functions/choose_my_ip.rb
  76. 55 0
      packstack/puppet/modules/packstack/lib/puppet/parser/functions/force_interface.rb
  77. 20 0
      packstack/puppet/modules/packstack/lib/puppet/parser/functions/force_ip.rb
  78. 21 0
      packstack/puppet/modules/packstack/lib/puppet/parser/functions/hiera_undef.rb
  79. 89 0
      packstack/puppet/modules/packstack/manifests/amqp.pp
  80. 40 0
      packstack/puppet/modules/packstack/manifests/aodh.pp
  81. 37 0
      packstack/puppet/modules/packstack/manifests/aodh/rabbitmq.pp
  82. 40 0
      packstack/puppet/modules/packstack/manifests/apache.pp
  83. 90 0
      packstack/puppet/modules/packstack/manifests/ceilometer.pp
  84. 8 0
      packstack/puppet/modules/packstack/manifests/ceilometer/nova_disabled.pp
  85. 31 0
      packstack/puppet/modules/packstack/manifests/ceilometer/rabbitmq.pp
  86. 101 0
      packstack/puppet/modules/packstack/manifests/chrony.pp
  87. 73 0
      packstack/puppet/modules/packstack/manifests/cinder.pp
  88. 16 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/gluster.pp
  89. 96 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/lvm.pp
  90. 133 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/netapp.pp
  91. 16 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/nfs.pp
  92. 20 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/solidfire.pp
  93. 14 0
      packstack/puppet/modules/packstack/manifests/cinder/backend/vmdk.pp
  94. 12 0
      packstack/puppet/modules/packstack/manifests/cinder/backup.pp
  95. 4 0
      packstack/puppet/modules/packstack/manifests/cinder/ceilometer.pp
  96. 34 0
      packstack/puppet/modules/packstack/manifests/cinder/rabbitmq.pp
  97. 52 0
      packstack/puppet/modules/packstack/manifests/firewall.pp
  98. 56 0
      packstack/puppet/modules/packstack/manifests/glance.pp
  99. 7 0
      packstack/puppet/modules/packstack/manifests/glance/backend/file.pp
  100. 0 0
      packstack/puppet/modules/packstack/manifests/glance/backend/swift.pp

+ 25 - 0
Gemfile

@@ -0,0 +1,25 @@
+source ENV["GEM_SOURCE"] || "https://rubygems.org"
+
+group :development, :test do
+  gem 'puppetlabs_spec_helper', :require => false
+
+  gem 'puppet-lint-param-docs'
+  gem 'puppet-lint-absolute_classname-check'
+  gem 'puppet-lint-absolute_template_path'
+  gem 'puppet-lint-trailing_newline-check'
+
+  # Puppet 4.x related lint checks
+  gem 'puppet-lint-unquoted_string-check'
+  gem 'puppet-lint-leading_zero-check'
+  gem 'puppet-lint-variable_contains_upcase'
+  gem 'puppet-lint-numericvariable'
+
+end
+
+if puppetversion = ENV['PUPPET_GEM_VERSION']
+  gem 'puppet', puppetversion, :require => false
+else
+  gem 'puppet', :require => false
+end
+
+# vim:ft=ruby

+ 202 - 0
LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 9 - 0
MANIFEST.in

@@ -0,0 +1,9 @@
+include docs/*
+include LICENSE
+include README.md
+recursive-include packstack/puppet *
+recursive-include packstack/templates *
+global-exclude .gitignore
+global-exclude .gitmodules
+global-exclude .git
+prune packstack/puppet/modules/*/.git

+ 166 - 0
Puppetfile

@@ -0,0 +1,166 @@
+moduledir '/usr/share/openstack-puppet/modules'
+
+## OpenStack modules
+
+mod 'aodh',
+  :git => 'https://github.com/openstack/puppet-aodh',
+  :ref => 'master'
+
+mod 'ceilometer',
+  :git => 'https://github.com/openstack/puppet-ceilometer',
+  :ref => 'master'
+
+mod 'cinder',
+  :git => 'https://github.com/openstack/puppet-cinder',
+  :ref => 'master'
+
+mod 'glance',
+  :git => 'https://github.com/openstack/puppet-glance',
+  :ref => 'master'
+
+mod 'gnocchi',
+  :git => 'https://github.com/openstack/puppet-gnocchi',
+  :ref => 'master'
+
+mod 'heat',
+  :git => 'https://github.com/openstack/puppet-heat',
+  :ref => 'master'
+
+mod 'magnum',
+  :git => 'https://github.com/openstack/puppet-magnum',
+  :ref => 'master'
+
+mod 'horizon',
+  :git => 'https://github.com/openstack/puppet-horizon',
+  :ref => 'master'
+
+mod 'ironic',
+  :git => 'https://github.com/openstack/puppet-ironic',
+  :ref => 'master'
+
+mod 'keystone',
+  :git => 'https://github.com/openstack/puppet-keystone',
+  :ref => 'master'
+
+mod 'manila',
+  :git => 'https://github.com/openstack/puppet-manila',
+  :ref => 'master'
+
+mod 'neutron',
+  :git => 'https://github.com/openstack/puppet-neutron',
+  :ref => 'master'
+
+mod 'nova',
+  :git => 'https://github.com/openstack/puppet-nova',
+  :ref => 'master'
+
+mod 'openstack_extras',
+  :git => 'https://github.com/openstack/puppet-openstack_extras',
+  :ref => 'master'
+
+mod 'openstacklib',
+  :git => 'https://github.com/openstack/puppet-openstacklib',
+  :ref => 'master'
+
+mod 'oslo',
+  :git => 'https://github.com/openstack/puppet-oslo',
+  :ref => 'master'
+
+mod 'sahara',
+  :git => 'https://github.com/openstack/puppet-sahara',
+  :ref => 'master'
+
+mod 'swift',
+  :git => 'https://github.com/openstack/puppet-swift',
+  :ref => 'master'
+
+mod 'tempest',
+  :git => 'https://github.com/openstack/puppet-tempest',
+  :ref => 'master'
+
+mod 'trove',
+  :git => 'https://github.com/openstack/puppet-trove',
+  :ref => 'master'
+
+mod 'vswitch',
+  :git => 'https://github.com/openstack/puppet-vswitch',
+  :ref => 'master'
+
+## Non-OpenStack modules
+
+mod 'apache',
+  :git => 'https://github.com/puppetlabs/puppetlabs-apache',
+  :ref => 'master'
+
+mod 'certmonger',
+  :git => 'https://github.com/rcritten/puppet-certmonger',
+  :ref => 'master'
+
+mod 'concat',
+  :git => 'https://github.com/puppetlabs/puppetlabs-concat',
+  :ref => 'master'
+
+mod 'firewall',
+  :git => 'https://github.com/puppetlabs/puppetlabs-firewall',
+  :ref => 'master'
+
+mod 'inifile',
+  :git => 'https://github.com/puppetlabs/puppetlabs-inifile',
+  :ref => 'master'
+
+mod 'memcached',
+  :git => 'https://github.com/saz/puppet-memcached',
+  :ref => 'master'
+
+mod 'mongodb',
+  :git => 'https://github.com/puppetlabs/puppetlabs-mongodb',
+  :ref => 'master'
+
+mod 'mysql',
+  :git => 'https://github.com/puppetlabs/puppetlabs-mysql',
+  :ref => 'master'
+
+mod 'nssdb',
+  :git => 'https://github.com/rcritten/puppet-nssdb',
+  :ref => 'master'
+
+mod 'panko',
+  :git => 'https://github.com/openstack/puppet-panko',
+  :ref => 'master'
+
+mod 'rabbitmq',
+  :git => 'https://github.com/puppetlabs/puppetlabs-rabbitmq',
+  :ref => 'master'
+
+mod 'redis',
+  :git => 'https://github.com/arioch/puppet-redis',
+  :ref => 'master'
+
+mod 'remote',
+  :git => 'https://github.com/paramite/puppet-remote',
+  :ref => 'master'
+
+mod 'rsync',
+  :git => 'https://github.com/puppetlabs/puppetlabs-rsync',
+  :ref => 'master'
+
+mod 'ssh',
+  :git => 'https://github.com/saz/puppet-ssh',
+  :ref => 'master'
+
+mod 'stdlib',
+  :git => 'https://github.com/puppetlabs/puppetlabs-stdlib',
+  :ref => 'master'
+
+mod 'sysctl',
+  :git => 'https://github.com/duritong/puppet-sysctl',
+  :ref => 'master'
+
+mod 'vcsrepo',
+  :git => 'https://github.com/puppetlabs/puppetlabs-vcsrepo',
+  :ref => 'master'
+
+mod 'xinetd',
+  :git => 'https://github.com/puppetlabs/puppetlabs-xinetd',
+  :ref => 'master'
+

+ 166 - 0
Puppetfile.ironic

@@ -0,0 +1,166 @@
+moduledir '/usr/share/openstack-puppet/modules'
+
+## OpenStack modules
+
+mod 'aodh',
+  :git => 'https://github.com/openstack/puppet-aodh',
+  :ref => 'master'
+
+mod 'ceilometer',
+  :git => 'https://github.com/openstack/puppet-ceilometer',
+  :ref => 'master'
+
+mod 'cinder',
+  :git => 'https://github.com/openstack/puppet-cinder',
+  :ref => 'master'
+
+mod 'glance',
+  :git => 'https://github.com/openstack/puppet-glance',
+  :ref => 'master'
+
+mod 'gnocchi',
+  :git => 'https://github.com/openstack/puppet-gnocchi',
+  :ref => 'master'
+
+mod 'heat',
+  :git => 'https://github.com/openstack/puppet-heat',
+  :ref => 'master'
+
+mod 'magnum',
+  :git => 'https://github.com/openstack/puppet-magnum',
+  :ref => 'master'
+
+mod 'horizon',
+  :git => 'https://github.com/openstack/puppet-horizon',
+  :ref => 'master'
+
+mod 'ironic',
+  :git => 'https://github.com/openstack/puppet-ironic',
+  :ref => 'master'
+
+mod 'keystone',
+  :git => 'https://github.com/openstack/puppet-keystone',
+  :ref => 'master'
+
+mod 'manila',
+  :git => 'https://github.com/openstack/puppet-manila',
+  :ref => 'master'
+
+mod 'neutron',
+  :git => 'https://github.com/openstack/puppet-neutron',
+  :ref => 'master'
+
+mod 'nova',
+  :git => 'https://git.susnik.work/jan/puppet-nova_ironic_ocata',
+  :ref => 'master'
+
+mod 'openstack_extras',
+  :git => 'https://github.com/openstack/puppet-openstack_extras',
+  :ref => 'master'
+
+mod 'openstacklib',
+  :git => 'https://github.com/openstack/puppet-openstacklib',
+  :ref => 'master'
+
+mod 'oslo',
+  :git => 'https://github.com/openstack/puppet-oslo',
+  :ref => 'master'
+
+mod 'sahara',
+  :git => 'https://github.com/openstack/puppet-sahara',
+  :ref => 'master'
+
+mod 'swift',
+  :git => 'https://github.com/openstack/puppet-swift',
+  :ref => 'master'
+
+mod 'tempest',
+  :git => 'https://github.com/openstack/puppet-tempest',
+  :ref => 'master'
+
+mod 'trove',
+  :git => 'https://github.com/openstack/puppet-trove',
+  :ref => 'master'
+
+mod 'vswitch',
+  :git => 'https://github.com/openstack/puppet-vswitch',
+  :ref => 'master'
+
+## Non-OpenStack modules
+
+mod 'apache',
+  :git => 'https://github.com/puppetlabs/puppetlabs-apache',
+  :ref => 'master'
+
+mod 'certmonger',
+  :git => 'https://github.com/rcritten/puppet-certmonger',
+  :ref => 'master'
+
+mod 'concat',
+  :git => 'https://github.com/puppetlabs/puppetlabs-concat',
+  :ref => 'master'
+
+mod 'firewall',
+  :git => 'https://github.com/puppetlabs/puppetlabs-firewall',
+  :ref => 'master'
+
+mod 'inifile',
+  :git => 'https://github.com/puppetlabs/puppetlabs-inifile',
+  :ref => 'master'
+
+mod 'memcached',
+  :git => 'https://github.com/saz/puppet-memcached',
+  :ref => 'master'
+
+mod 'mongodb',
+  :git => 'https://github.com/puppetlabs/puppetlabs-mongodb',
+  :ref => 'master'
+
+mod 'mysql',
+  :git => 'https://github.com/puppetlabs/puppetlabs-mysql',
+  :ref => 'master'
+
+mod 'nssdb',
+  :git => 'https://github.com/rcritten/puppet-nssdb',
+  :ref => 'master'
+
+mod 'panko',
+  :git => 'https://github.com/openstack/puppet-panko',
+  :ref => 'master'
+
+mod 'rabbitmq',
+  :git => 'https://github.com/puppetlabs/puppetlabs-rabbitmq',
+  :ref => 'master'
+
+mod 'redis',
+  :git => 'https://github.com/arioch/puppet-redis',
+  :ref => 'master'
+
+mod 'remote',
+  :git => 'https://github.com/paramite/puppet-remote',
+  :ref => 'master'
+
+mod 'rsync',
+  :git => 'https://github.com/puppetlabs/puppetlabs-rsync',
+  :ref => 'master'
+
+mod 'ssh',
+  :git => 'https://github.com/saz/puppet-ssh',
+  :ref => 'master'
+
+mod 'stdlib',
+  :git => 'https://github.com/puppetlabs/puppetlabs-stdlib',
+  :ref => 'master'
+
+mod 'sysctl',
+  :git => 'https://github.com/duritong/puppet-sysctl',
+  :ref => 'master'
+
+mod 'vcsrepo',
+  :git => 'https://github.com/puppetlabs/puppetlabs-vcsrepo',
+  :ref => 'master'
+
+mod 'xinetd',
+  :git => 'https://github.com/puppetlabs/puppetlabs-xinetd',
+  :ref => 'master'
+

+ 57 - 0
README.md

@@ -0,0 +1,57 @@
+# Packstack
+
+Fork of official Packstack repo which contains fix for installing Packstack
+with Ironic on CentOS 7 with OpenStack Ocata release. There is only one
+changed file `packstack/puppet/modules/packstack/manifests/nova/sched/ironic.pp`
+which contains fix for correct value of `scheduler_host_manager` for Ocata
+release.
+
+This repository also contains installation script `run_setup.sh`, so the installation
+can be performed only by running a script, without any additional effort.
+
+## Installation of Packstack with Ironic and existing network configuration:
+
+    $ sudo yum install -y git
+    $ git clone https://git.susnik.work/jan/packstack_ironic_ocata.git
+    $ cd packstack
+    $ sudo bash run_setup.sh ironic
+
+## Installation of Packstack with Ironic and bridged network:
+
+Make sure to replace `enp2s0` interface with your own.
+
+    $ sudo yum install -y git
+    $ git clone https://git.susnik.work/jan/packstack_ironic_ocata.git
+    $ cd packstack
+    $ sudo bash run_setup.sh ironic enp2s0
+
+## Installation of Packstack with Ironic and bridged network + named external network:
+
+Make sure to replace `enp2s0` interface and `extnet` external network with your own.
+
+    $ sudo yum install -y git
+    $ git clone https://git.susnik.work/jan/packstack_ironic_ocata.git
+    $ cd packstack
+    $ sudo bash run_setup.sh ironic enp2s0 extnet
+
+## Installation of Packstack with default components:
+
+    $ sudo yum install -y git
+    $ git clone https://git.susnik.work/jan/packstack_ironic_ocata.git
+    $ cd packstack
+    $ sudo bash run_setup.sh
+
+## Development and general information
+
+For any additional information about development or anything other connected to
+Packstack, please refer to original README (in file `README_ORIGINAL`),
+or check official repository located at https://github.com/openstack/packstack
+
+## License
+
+The software is provided "as is", without warranty of any kind from my side.
+All files in project are included as fetched from source and may have been
+changed in the way to make things work with technologies specified above.
+Please read original license which is added to the project if you want to
+further use this software or modify it. All respective rights reserved to
+authors and contributors.

+ 230 - 0
README_ORIGINAL

@@ -0,0 +1,230 @@
+# Packstack
+
+Utility to install **OpenStack** on **Red Hat** based operating system. See
+other branches for older **OpenStack** versions. Details on how to
+contribute to **Packstack** may be found in the **Packstack** wiki at
+<https://wiki.openstack.org/wiki/Packstack> Additional information
+about involvement in the community around **Packstack** can be found at
+<https://openstack.redhat.com/Get_involved>
+
+
+This utility can be used to install **OpenStack** on a single or group of
+hosts (over `ssh`).
+
+This utility is still in the early stages, a lot of the configuration
+options have yet to be added.
+
+## Installation of packstack:
+
+    $ yum install -y git
+    $ git clone git://github.com/openstack/packstack.git
+    $ cd packstack && sudo python setup.py install
+
+## Installation of openstack-puppet-modules (REQUIRED if running packstack from source):
+
+    $ export GEM_HOME=/tmp/somedir
+    $ gem install r10k
+    $ sudo -E /tmp/somedir/bin/r10k puppetfile install -v
+    $ sudo cp -r packstack/puppet/modules/packstack /usr/share/openstack-puppet/modules
+
+### Option 1 (all-in-one)
+
+    $ packstack --allinone
+
+This will install all **OpenStack** services on a single host without
+prompting for any configuration information.  This will generate an
+"answers" file (`packstack-answers-<date>-<time>.txt`) containing all
+the values used for the install.
+
+If you need to re-run packstack, you must use the `--answer-file`
+option in order for packstack to use the correct values for passwords
+and other authentication credentials:
+
+    $ packstack --answer-file packstack-answers-<date>-<time>.txt
+
+### Option 2 (using answer file)
+
+    $ packstack --gen-answer-file=ans.txt
+
+Then edit `ans.txt` as appropriate e.g.
+
+- set `CONFIG_SSH_KEY` to a public ssh key to be installed to remote machines
+- Edit the IP address to anywhere you want to install a piece of OpenStack on another server
+- Edit the 3 network interfaces to whatever makes sense in your setup
+
+you'll need to use a icehouse repository for example for RHEL
+
+    $ CONFIG_REPO=http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/epel-6/
+
+    $ packstack --answer-file=ans.txt
+
+### Option 3 (prompts for configuration options)
+
+    $ packstack
+
+that's it, if everything went well you can now start using OpenStack
+
+    $ cd
+    $ . keystonerc_admin
+    $ nova list
+    $ swift list  # if you have installed swift
+
+## Config options
+
+- `CONFIG_NOVA_COMPUTE_HOSTS` :
+  * A comma separated list of ip addresses on which to install nova compute
+- `CONFIG_SWIFT_STORAGE_HOSTS` :
+  * A comma separated list of swift storage devices
+    * `1.1.1.1`: create a testing loopback device and use this for storage
+    * `1.1.1.1/sdb`: use `/dev/sdb` on `1.1.1.1` as a storage device
+
+## Logging
+
+The location of the log files and generated puppet manifests are in the
+`/var/tmp/packstack` directory under a directory named by the date in which
+**Packstack** was run and a random string (e.g. `/var/tmp/packstack/20131022-204316-Bf3Ek2`).
+Inside, we find a manifest directory and the `openstack-setup.log` file; puppet
+manifests and a log file for each one are found inside the manifest directory.
+
+## Debugging
+
+To make **Packstack** write more detailed information into the log file you can use the `-d` switch:
+
+    $ packstack -d --allinone
+
+## Developing
+
+**Warning:**
+this procedure installs **openstack-puppet-modules** containing code that has
+not been upstreamed and fully tested yet and as such will not be as robust as
+the other install procedures.  It is recommended to install from **RPM**
+instead.
+
+To ease development of **Packstack** and **openstack-puppet-modules**, it can be
+useful to install from *git* such that updates to the git repositories are
+immediately effective without reinstallation of packstack and
+**openstack-puppet-modules**.
+
+To do this, start with a minimal **Fedora 21** installation.  Then remove any
+relevant packages that might conflict:
+
+    $ yum -y erase openstack-{packstack*,puppet-modules}
+
+Disable **SELinux** by changing "`enforcing`" to "`permissive`" in
+`/etc/sysconfig/selinux`, then reboot to allow service changes to take effect
+and swap over networking.  Then install packages:
+
+    $ yum -y install git python-setuptools
+
+And install **RDO**:
+
+    $ yum -y install https://rdo.fedorapeople.org/rdo-release.rpm
+    $ yum -y update
+
+Now we get **openstack-puppet-modules**.  Because `python setup.py
+install_puppet_modules` from **Packstack** copies rather than linking, this is not
+entirely straightforward:
+
+    $ git clone https://github.com/redhat-openstack/openstack-puppet-modules
+    $ cd openstack-puppet-modules
+    $ git checkout master-patches
+    $ mkdir /usr/share/openstack-puppet
+    $ ln -sv /root/openstack-puppet-modules /usr/share/openstack-puppet/modules
+
+Then we get **Packstack**, and perform a similar dance:
+
+    $ yum install -y python-crypto python-devel libffi-devel openssl-devel gcc-c++
+    $ git clone https://github.com/openstack/packstack
+    $ cd packstack
+    $ python setup.py develop
+    $ cd /usr/share/openstack-puppet/modules
+    $ ln -sv /root/packstack/packstack/puppet/modules/packstack
+
+And we're done.  Changes to the contents of **Packstack** and
+**openstack-puppet-modules** repositories are picked up by the **Packstack**
+executable without further intervention, and **Packstack** is ready to install.
+
+## Puppet Style Guide
+
+**IMPORTANT** <https://docs.puppetlabs.com/guides/style_guide.html>
+
+Please, respect the Puppet Style Guide as much as possible !
+
+## Running local Puppet-lint tests
+
+It assumes that both `bundler` as well as `rubygems` (and `ruby`) are already
+installed on the system. If not, run this command:
+
+    $ sudo yum install rubygems rubygem-bundler ruby ruby-devel -y
+
+Go into the **Packstack** root directory.
+
+    $ cd packstack/
+
+A `Rakefile` contains all you need to run puppet-lint task automatically over
+all the puppet manifests included in the **Packstack** project.
+
+    $ ls -l packstack/puppet/templates/
+
+and
+
+    $ ls -l packstack/puppet/modules/
+
+The default puppet-lint pattern for `.pp` files is `**/*.pp`. So there is no
+need to go inside those directories to run puppet-lint !
+
+    $ mkdir vendor
+    $ export GEM_HOME=vendor
+    $ bundle install
+    $ bundle exec rake lint
+
+## Packstack integration tests
+
+Packstack is integration tested in the OpenStack gate and provides the means to
+reproduce these tests on your environment if you wish.
+
+This is the current matrix of available tests:
+
+|     -      | scenario001 | scenario002 | scenario003 |
+|:----------:|:-----------:|:-----------:|:------------:
+| keystone   |   FERNET    |    UUID     |    FERNET   |
+| glance     |    file     |    swift    |     file    |
+| nova       |      X      |      X      |      X      |
+| neutron    |      X      |      X      |      X      |
+| lbaasv2    |             |      X      |             |
+| cinder     |      X      |             |             |
+| ceilometer |             |             |      X      |
+| aodh       |             |             |      X      |
+| gnocchi    |             |             |      X      |
+| panko      |             |             |      X      |
+| heat       |             |             |      X      |
+| swift      |             |      X      |             |
+| sahara     |             |      X      |             |
+| trove      |             |      X      |             |
+| horizon    |      X      |             |             |
+| manila     |      X      |             |             |
+| nagios     |      X      |             |             |
+| SSL        |      X      |             |             |
+
+To run these tests:
+
+    export SCENARIO="scenario001"
+    ./run_tests.sh
+
+run_tests.sh will take care of installing the required dependencies,
+configure packstack to run according to the above matrix and run the complete
+installation process. If the installation is successful, tempest will also
+run smoke tests.
+
+By default, run_tests.sh will set up delorean (RDO Trunk) repositories.
+There are two ways of overriding default repositories:
+
+    export DELOREAN="http://someotherdomain.tld/delorean.repo"
+    export DELOREAN_DEPS="http://someotherdomain.tld/delorean-deps.repo"
+    ./run_tests.sh
+
+You can also choose to disable repository management entirely:
+
+    <setup your own custom repositories here>
+    export MANAGE_REPOS="false"
+    ./run_tests.sh

+ 29 - 0
Rakefile

@@ -0,0 +1,29 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+require 'puppet-syntax/tasks/puppet-syntax'
+
+PuppetLint.configuration.relative = true
+PuppetLint.configuration.log_format = "%{path}:%{linenumber}:%{check}:%{KIND}:%{message}"
+PuppetLint.configuration.fail_on_warnings = true
+PuppetLint.configuration.send('disable_class_parameter_defaults')
+PuppetLint.configuration.send('disable_class_inherits_from_params_class')
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.send('disable_containing_dash')
+PuppetLint.configuration.send('disable_quoted_booleans')
+PuppetLint.configuration.send('disable_documentation')
+
+exclude_paths = [
+"pkg/**/*",
+"vendor/**/*",
+"spec/**/*",
+]
+
+Rake::Task[:lint].clear
+PuppetLint.configuration.ignore_paths = exclude_paths
+PuppetSyntax.exclude_paths = exclude_paths
+
+desc "Run syntax, lint"
+task :test => [
+  :syntax,
+  :lint,
+]

+ 5 - 0
bindep.txt

@@ -0,0 +1,5 @@
+libffi-dev [platform:dpkg]
+libffi-devel [platform:rpm]
+libssl-dev [platform:dpkg]
+openssl-devel [platform:rpm]
+

+ 153 - 0
docs/Makefile

@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/packstack.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/packstack.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/packstack"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/packstack"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."

+ 255 - 0
docs/conf.py

@@ -0,0 +1,255 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# packstack documentation build configuration file, created by
+# sphinx-quickstart on Thu Nov 15 20:34:41 2012.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+
+sys.path.append('..')
+from packstack.version import version_info
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+# sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'packstack'
+
+# General information about the project.
+project = u'packstack'
+copyright = u'2012, Red Hat'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = version_info.version_string()
+# The full version, including alpha/beta/rc tags.
+release = version_info.release_string()
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+# language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+# today = ''
+# Else, today_fmt is used as the format for a strftime call.
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+# default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+# add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+# html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+# html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+# html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+# html_domain_indices = True
+
+# If false, no index is generated.
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'packstackdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+    # The paper size ('letterpaper' or 'a4paper').
+    # 'papersize': 'letterpaper',
+
+    # The font size ('10pt', '11pt' or '12pt').
+    # 'pointsize': '10pt',
+
+    # Additional stuff for the LaTeX preamble.
+    # 'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+    ('index', 'packstack.tex', u'packstack Documentation', u'Derek Higgins', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+# latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('packstack', 'packstack', u'install openstack', [u'Derek Higgins, Martin Magr, Sandro Mathys, Flavio Percoco, Alvaro Lopez Ortega'], 1)
+]
+
+# If true, show URL addresses after external links.
+# man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    ('index', 'packstack', u'packstack Documentation',
+     u'Derek Higgins', 'packstack', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+# texinfo_show_urls = 'footnote'

File diff suppressed because it is too large
+ 1171 - 0
docs/packstack.rst


+ 0 - 0
packstack/__init__.py


+ 3 - 0
packstack/installer/.gitignore

@@ -0,0 +1,3 @@
+*.pyc
+*.swp
+*.log

+ 202 - 0
packstack/installer/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 0 - 0
packstack/installer/__init__.py


+ 104 - 0
packstack/installer/basedefs.py

@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This module provides all the predefined variables.
+"""
+
+import datetime
+import os
+import pkg_resources
+import sys
+import tempfile
+
+from .utils import get_current_user
+
+
+APP_NAME = "Packstack"
+
+FILE_YUM_VERSION_LOCK = "/etc/yum/pluginconf.d/versionlock.list"
+
+PACKSTACK_SRC_DOC = pkg_resources.resource_filename(
+    pkg_resources.Requirement.parse('packstack'), 'docs/packstack.rst'
+)
+if os.path.exists(PACKSTACK_SRC_DOC):
+    PACKSTACK_DOC = PACKSTACK_SRC_DOC
+elif os.path.exists(os.path.join(sys.prefix, 'share/packstack/packstack.rst')):
+    PACKSTACK_DOC = os.path.join(sys.prefix, 'share/packstack/packstack.rst')
+else:
+    PACKSTACK_DOC = '/usr/share/packstack/packstack.rst'
+
+PACKSTACK_VAR_DIR = '/var/tmp/packstack'
+try:
+    os.mkdir(PACKSTACK_VAR_DIR, 0o700)
+except OSError:
+    # directory is already created, check ownership
+    stat = os.stat(PACKSTACK_VAR_DIR)
+    if stat.st_uid == 0 and os.getuid() != stat.st_uid:
+        print('%s is already created and owned by root. Please change '
+              'ownership and try again.' % PACKSTACK_VAR_DIR)
+        sys.exit(1)
+finally:
+    uid, gid = get_current_user()
+
+    if uid != 0 and os.getuid() == 0:
+        try:
+            os.chown(PACKSTACK_VAR_DIR, uid, gid)
+        except Exception as ex:
+            print('Unable to change owner of %s. Please fix ownership '
+                  'manually and try again.' % PACKSTACK_VAR_DIR)
+            sys.exit(1)
+
+_tmpdirprefix = datetime.datetime.now().strftime('%Y%m%d-%H%M%S-')
+VAR_DIR = tempfile.mkdtemp(prefix=_tmpdirprefix, dir=PACKSTACK_VAR_DIR)
+DIR_LOG = VAR_DIR
+FILE_LOG = 'openstack-setup.log'
+PUPPET_MANIFEST_RELATIVE = "manifests"
+PUPPET_MANIFEST_DIR = os.path.join(VAR_DIR, PUPPET_MANIFEST_RELATIVE)
+HIERADATA_FILE_RELATIVE = "hieradata"
+HIERADATA_DIR = os.path.join(VAR_DIR, HIERADATA_FILE_RELATIVE)
+
+LATEST_LOG_DIR = '%s/latest' % PACKSTACK_VAR_DIR
+if os.path.exists(LATEST_LOG_DIR):
+    try:
+        os.unlink(LATEST_LOG_DIR)
+    except OSError:
+        print('Unable to delete symbol link for log dir %s.' % LATEST_LOG_DIR)
+
+try:
+    # Extract folder name at /var/tmp/packstack/<VAR_DIR> and do a relative
+    # symlink to /var/tmp/packstack/latest
+    os.symlink(os.path.basename(VAR_DIR),
+               os.path.join(PACKSTACK_VAR_DIR, 'latest'))
+except OSError:
+    print('Unable to create symbol link for log dir %s.' % LATEST_LOG_DIR)
+
+PUPPET_DEPENDENCIES = ['puppet', 'hiera', 'openssh-clients', 'tar', 'nc']
+PUPPET_MODULES_DEPS = ['rubygem-json']
+
+FILE_INSTALLER_LOG = "setup.log"
+
+DIR_PROJECT_DIR = os.environ.get('INSTALLER_PROJECT_DIR', os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
+DIR_PLUGINS = os.path.join(DIR_PROJECT_DIR, "plugins")
+DIR_MODULES = os.path.join(DIR_PROJECT_DIR, "modules")
+
+EXEC_RPM = "rpm"
+EXEC_SEMANAGE = "semanage"
+EXEC_NSLOOKUP = "nslookup"
+EXEC_CHKCONFIG = "chkconfig"
+EXEC_SERVICE = "service"
+EXEC_IP = "ip"
+
+# space len size for color print
+SPACE_LEN = 70

+ 0 - 0
packstack/installer/core/__init__.py


+ 409 - 0
packstack/installer/core/drones.py

@@ -0,0 +1,409 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import stat
+import uuid
+import time
+import logging
+import tarfile
+import tempfile
+
+from .. import utils
+
+
+class SshTarballTransferMixin(object):
+    """
+    Transfers resources and recipes by packing them to tar.gz and
+    copying it via ssh.
+    """
+    def _transfer(self, pack_path, pack_dest, res_dir):
+        node = self.node
+        args = locals()
+        # copy and extract tarball
+        script = utils.ScriptRunner()
+        script.append("scp %(pack_path)s root@%(node)s:%(pack_dest)s"
+                      % args)
+        script.append("ssh -o StrictHostKeyChecking=no "
+                      "-o UserKnownHostsFile=/dev/null root@%(node)s "
+                      "tar -C %(res_dir)s -xpzf %(pack_dest)s" % args)
+        try:
+            script.execute()
+        except ScriptRuntimeError as ex:
+            # TO-DO: change to appropriate exception
+            raise RuntimeError('Failed to copy resources to node %s. '
+                               'Reason: %s' % (node, ex))
+
+    def _pack_resources(self):
+        randpart = uuid.uuid4().hex[:8]
+        pack_path = os.path.join(self.local_tmpdir,
+                                 'res-%s.tar.gz' % randpart)
+        pack = tarfile.open(pack_path, mode='w:gz')
+        os.chmod(pack_path, stat.S_IRUSR | stat.S_IWUSR)
+        for path, dest in self._resources:
+            if not dest:
+                dest = os.path.basename(path)
+            pack.add(path,
+                     arcname=os.path.join(dest, os.path.basename(path)))
+        pack.close()
+        return pack_path
+
+    def _copy_resources(self):
+        pack_path = self._pack_resources()
+        pack_dest = os.path.join(self.remote_tmpdir,
+                                 os.path.basename(pack_path))
+        self._transfer(pack_path, pack_dest, self.resource_dir)
+
+    def _pack_recipes(self):
+        randpart = uuid.uuid4().hex[:8]
+        pack_path = os.path.join(self.local_tmpdir,
+                                 'rec-%s.tar.gz' % randpart)
+        pack = tarfile.open(pack_path, mode='w:gz')
+        os.chmod(pack_path, stat.S_IRUSR | stat.S_IWUSR)
+        if self.recipe_dir.startswith(self.resource_dir):
+            dest = self.recipe_dir[len(self.resource_dir):].lstrip('/')
+        else:
+            dest = ''
+        for marker, recipes in self._recipes.iteritems():
+            for path in recipes:
+                _dest = os.path.join(dest, os.path.basename(path))
+                pack.add(path, arcname=_dest)
+        pack.close()
+        return pack_path
+
+    def _copy_recipes(self):
+        pack_path = self._pack_recipes()
+        pack_dest = os.path.join(self.remote_tmpdir,
+                                 os.path.basename(pack_path))
+        if self.recipe_dir.startswith(self.resource_dir):
+            extr_dest = self.resource_dir
+        else:
+            extr_dest = self.recipe_dir
+        self._transfer(pack_path, pack_dest, extr_dest)
+
+
+class DroneObserver(object):
+    """
+    Base class for listening messages from drones.
+    """
+    def applying(self, drone, recipe):
+        """
+        Drone is calling this method when it starts applying recipe.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def checking(self, drone, recipe):
+        """
+        Drone is calling this method when it starts checking if recipe
+        has been applied.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def finished(self, drone, recipe):
+        """
+        Drone is calling this method when it's finished with recipe
+        application.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+
+class Drone(object):
+    """
+    Base class used to apply installation recipes to nodes.
+    """
+    def __init__(self, node, resource_dir=None, recipe_dir=None,
+                 local_tmpdir=None, remote_tmpdir=None):
+        self._recipes = utils.SortedDict()
+        self._resources = []
+        self._applied = set()
+        self._running = set()
+        self._observer = None
+
+        # remote host IP or hostname
+        self.node = node
+        # working directories on remote host
+        self.resource_dir = (resource_dir or
+                             '/tmp/drone%s' % uuid.uuid4().hex[:8])
+        self.recipe_dir = (recipe_dir or
+                           os.path.join(self.resource_dir, 'recipes'))
+        # temporary directories
+        self.remote_tmpdir = (remote_tmpdir or
+                              '/tmp/drone%s' % uuid.uuid4().hex[:8])
+        self.local_tmpdir = (local_tmpdir or
+                             tempfile.mkdtemp(prefix='drone'))
+
+    def init_node(self):
+        """
+        Initializes node for manipulation.
+        """
+        created = []
+        server = utils.ScriptRunner(self.node)
+        for i in (self.resource_dir, self.recipe_dir,
+                  self.remote_tmpdir):
+            server.append('mkdir -p %s' % os.path.dirname(i))
+            server.append('mkdir --mode 0700 %s' % i)
+            created.append('%s:%s' % (self.node, i))
+        server.execute()
+
+        # TO-DO: complete logger name when logging will be setup correctly
+        logger = logging.getLogger()
+        logger.debug('Created directories: %s' % ','.join(created))
+
+    @property
+    def recipes(self):
+        for i in self._recipes.itervalues():
+            for y in i:
+                yield y
+
+    @property
+    def resources(self):
+        for i in self._resources:
+            yield i[0]
+
+    def add_recipe(self, path, marker=None):
+        """
+        Registers recipe for application on node. Recipes will be
+        applied in order they where added to drone. Multiple recipes can
+        be applied in paralel if they have same marker.
+        """
+        marker = marker or uuid.uuid4().hex[:8]
+        self._recipes.setdefault(marker, []).append(path)
+
+    def add_resource(self, path, destination=None):
+        """
+        Registers resource. Destination will be relative from resource
+        directory on node.
+        """
+        dest = destination or ''
+        self._resources.append((path, dest))
+
+    def _copy_resources(self):
+        """
+        Copies all local files registered in self._resources to their
+        appropriate destination on self.node. If tmpdir is given this
+        method can operate only in this directory.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def _copy_recipes(self):
+        """
+        Copies all local files registered in self._recipes to their
+        appropriate destination on self.node. If tmpdir is given this
+        method can operate only in this directory.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def prepare_node(self):
+        """
+        Copies all local resources and recipes to self.node.
+        """
+        # TO-DO: complete logger name when logging will be setup correctly
+        logger = logging.getLogger()
+        logger.debug('Copying drone resources to node %s: %s'
+                     % (self.node, self.resources))
+        self._copy_resources()
+        logger.debug('Copying drone recipes to node %s: %s'
+                     % (self.node, [i[0] for i in self.recipes]))
+        self._copy_recipes()
+
+    def _apply(self, recipe):
+        """
+        Starts application of single recipe given as path to the recipe
+        file in self.node. This method should not wait until recipe is
+        applied.
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def _finished(self, recipe):
+        """
+        Returns True if given recipe is applied, otherwise returns False
+        """
+        # subclass must implement this method
+        raise NotImplementedError()
+
+    def _wait(self):
+        """
+        Waits until all started applications of recipes will be finished
+        """
+        while self._running:
+            _run = list(self._running)
+            for recipe in _run:
+                if self._observer:
+                    self._observer.checking(self, recipe)
+                if self._finished(recipe):
+                    self._applied.add(recipe)
+                    self._running.remove(recipe)
+                    if self._observer:
+                        self._observer.finished(self, recipe)
+                else:
+                    time.sleep(3)
+                    continue
+
+    def set_observer(self, observer):
+        """
+        Registers an observer. Given object should be subclass of class
+        DroneObserver.
+        """
+        for attr in ('applying', 'checking', 'finished'):
+            if not hasattr(observer, attr):
+                raise ValueError('Observer object should be a subclass '
+                                 'of class DroneObserver.')
+        self._observer = observer
+
+    def apply(self, marker=None, name=None, skip=None):
+        """
+        Applies recipes on node. If marker is specified, only recipes
+        with given marker are applied. If name is specified only recipe
+        with given name is applied. Skips recipes with names given
+        in list parameter skip.
+        """
+        # TO-DO: complete logger name when logging will be setup correctly
+        logger = logging.getLogger()
+        skip = skip or []
+        lastmarker = None
+        for mark, recipelist in self._recipes.iteritems():
+            if marker and marker != mark:
+                logger.debug('Skipping marker %s for node %s.' %
+                             (mark, self.node))
+                continue
+            for recipe in recipelist:
+                base = os.path.basename(recipe)
+                if (name and name != base) or base in skip:
+                    logger.debug('Skipping recipe %s for node %s.' %
+                                 (recipe, self.node))
+                    continue
+
+                # if the marker has changed then we don't want to
+                # proceed until all of the previous puppet runs have
+                # finished
+                if lastmarker and lastmarker != mark:
+                    self._wait()
+                lastmarker = mark
+
+                logger.debug('Applying recipe %s to node %s.' %
+                             (base, self.node))
+                rpath = os.path.join(self.recipe_dir, base)
+                if self._observer:
+                    self._observer.applying(self, recipe)
+                self._running.add(rpath)
+                self._apply(rpath)
+        self._wait()
+
+    def cleanup(self, resource_dir=True, recipe_dir=True):
+        """
+        Removes all directories created by this drone.
+        """
+        shutil.rmtree(self.local_tmpdir, ignore_errors=True)
+        server = utils.ScriptRunner(self.node)
+        server.append('rm -fr %s' % self.remote_tmpdir)
+        if recipe_dir:
+            server.append('rm -fr %s' % self.recipe_dir)
+        if resource_dir:
+            server.append('rm -fr %s' % self.resource_dir)
+        server.execute()
+
+
+class PackstackDrone(SshTarballTransferMixin, Drone):
+    """
+    This drone uses Puppet and it's manifests to manipulate node.
+    """
+    # XXX: Since this implementation is Packstack specific (_apply
+    #      method), it should be moved out of installer when
+    #      Controller and plugin system will be refactored and installer
+    #      will support projects.
+    def __init__(self, *args, **kwargs):
+        kwargs['resource_dir'] = ('/var/tmp/packstack/drone%s'
+                                  % uuid.uuid4().hex[:8])
+        kwargs['recipe_dir'] = '%s/manifests' % kwargs['resource_dir']
+        kwargs['remote_tmpdir'] = '%s/temp' % kwargs['resource_dir']
+
+        super(PackstackDrone, self).__init__(*args, **kwargs)
+
+        self.module_dir = os.path.join(self.resource_dir, 'modules')
+        self.fact_dir = os.path.join(self.resource_dir, 'facts')
+
+    def init_node(self):
+        """
+        Initializes node for manipulation.
+        """
+        super(PackstackDrone, self).init_node()
+        server = utils.ScriptRunner(self.node)
+        for pkg in ("puppet", "openssh-clients", "tar"):
+            server.append("rpm -q --whatprovides %(pkg)s || "
+                          "yum install -y %(pkg)s" % locals())
+        server.execute()
+
+    def add_resource(self, path, resource_type=None):
+        """
+        Resource type should be module, fact or resource.
+        """
+        resource_type = resource_type or 'resource'
+        dest = '%ss' % resource_type
+        super(PackstackDrone, self).add_resource(path, destination=dest)
+
+    def _finished(self, recipe):
+        recipe_base = os.path.basename(recipe)
+        log = os.path.join(self.recipe_dir,
+                           recipe_base.replace(".finished", ".log"))
+        local = utils.ScriptRunner()
+        local.append('scp -o StrictHostKeyChecking=no '
+                     '-o UserKnownHostsFile=/dev/null '
+                     'root@%s:%s %s' % (self.node, recipe, log))
+        try:
+            # once a remote puppet run has finished, we retrieve
+            # the log file and check it for errors
+            local.execute(log=False)
+            # if we got to this point the puppet apply has finished
+            return True
+        except utils.ScriptRuntimeError as e:
+            # the test raises an exception if the file doesn't exist yet
+            return False
+
+    def _apply(self, recipe):
+        running = "%s.running" % recipe
+        finished = "%s.finished" % recipe
+
+        server = utils.ScriptRunner(self.node)
+        server.append("touch %s" % running)
+        server.append("chmod 600 %s" % running)
+
+        # XXX: This is terrible hack, but unfortunatelly the apache
+        # puppet module doesn't work if we set FACTERLIB
+        # https://github.com/puppetlabs/puppetlabs-apache/pull/138
+        for bad_word in ('horizon', 'nagios', 'apache'):
+            if bad_word in recipe:
+                break
+        else:
+            server.append("export FACTERLIB=$FACTERLIB:%s" %
+                          self.fact_dir)
+        server.append("export PACKSTACK_VAR_DIR=%s" % self.resource_dir)
+
+        # TO-DO: complete logger name when logging will be setup correctly
+        logger = logging.getLogger()
+        loglevel = logger.level <= logging.DEBUG and '--debug' or ''
+        rdir = self.resource_dir
+        mdir = self._module_dir
+        server.append(
+            "( flock %(rdir)s/ps.lock "
+            "puppet apply %(loglevel)s --modulepath %(mdir)s "
+            "%(recipe)s > %(running)s 2>&1 < /dev/null; "
+            "mv %(running)s %(finished)s ) "
+            "> /dev/null 2>&1 < /dev/null &" % locals())
+        server.execute()

+ 60 - 0
packstack/installer/core/parameters.py

@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Container set for groups and parameters
+"""
+
+from ..utils.datastructures import SortedDict
+
+
+class Parameter(object):
+    allowed_keys = ('CONF_NAME', 'CMD_OPTION', 'USAGE', 'PROMPT',
+                    'PROCESSORS', 'VALIDATORS', 'LOOSE_VALIDATION',
+                    'DEFAULT_VALUE', 'USE_DEFAULT', 'OPTION_LIST',
+                    'MASK_INPUT', 'NEED_CONFIRM', 'CONDITION', 'DEPRECATES',
+                    'MESSAGE', 'MESSAGE_VALUES')
+
+    def __init__(self, attributes=None):
+        attributes = attributes or {}
+        defaults = {}.fromkeys(self.allowed_keys)
+        defaults.update(attributes)
+
+        for key, value in defaults.iteritems():
+            if key not in self.allowed_keys:
+                raise KeyError('Given attribute %s is not allowed' % key)
+            self.__dict__[key] = value
+
+
+class Group(Parameter):
+    allowed_keys = ('GROUP_NAME', 'DESCRIPTION', 'PRE_CONDITION',
+                    'PRE_CONDITION_MATCH', 'POST_CONDITION',
+                    'POST_CONDITION_MATCH')
+
+    def __init__(self, attributes=None, parameters=None):
+        super(Group, self).__init__(attributes)
+        self.parameters = SortedDict()
+        for param in parameters or []:
+            self.parameters[param['CONF_NAME']] = Parameter(attributes=param)
+
+    def search(self, attr, value):
+        """
+        Returns list of parameters which have given attribute of given
+        value.
+        """
+        result = []
+        for param in self.parameters.itervalues():
+            if getattr(param, attr) == value:
+                result.append(param)
+        return result

+ 109 - 0
packstack/installer/core/sequences.py

@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Base class for steps & sequences
+"""
+
+import sys
+import logging
+import traceback
+
+from .. import utils
+from ..exceptions import SequenceError
+
+
+class Step(object):
+    """
+    Wrapper for function representing single setup step.
+    """
+    def __init__(self, name, function, title=None):
+        self.name = name
+        self.title = title or ('Step: %s' % name)
+
+        # process step function
+        if function and not callable(function):
+            raise SequenceError("Function object have to be callable. "
+                                "Object %s is not callable." % function)
+        self.function = function
+
+    def run(self, config=None, messages=None):
+        config = config if config is not None else {}
+        messages = messages if messages is not None else []
+        # TO-DO: complete logger name when logging will be setup correctly
+        logger = logging.getLogger()
+        logger.debug('Running step %s.' % self.name)
+
+        # execute and report state
+        try:
+            self.function(config, messages)
+        except Exception as ex:
+            logger.debug(traceback.format_exc())
+            state = utils.state_message(self.title, 'ERROR', 'red')
+            sys.stdout.write('%s\n' % state)
+            sys.stdout.flush()
+            raise
+        else:
+            state = utils.state_message(self.title, 'DONE', 'green')
+            sys.stdout.write('%s\n' % state)
+            sys.stdout.flush()
+
+
+class Sequence(object):
+    """
+    Wrapper for sequence of setup steps.
+    """
+    def __init__(self, name, steps, title=None, condition=None,
+                 cond_match=None):
+        self.name = name
+        self.title = title
+        self.condition = condition
+        self.cond_match = cond_match
+
+        # process sequence steps
+        self.steps = utils.SortedDict()
+        for step in steps:
+            name, func = step['name'], step['function']
+            self.steps[name] = Step(name, func, title=step.get('title'))
+
+    def validate_condition(self, config):
+        """
+        Returns True if config option condition has value given
+        in cond_match. Otherwise returns False.
+        """
+        if not self.condition:
+            return True
+        result = config.get(self.condition)
+        return result == self.cond_match
+
+    def run(self, config=None, messages=None, step=None):
+        """
+        Runs sequence of steps. Runs only specific step if step's name
+        is given via 'step' parameter.
+        """
+        config = config if config is not None else {}
+        messages = messages if messages is not None else []
+        if not self.validate_condition(config):
+            return
+        if step:
+            self.steps[step].run(config=config, messages=messages)
+            return
+
+        logger = logging.getLogger()
+        logger.debug('Running sequence %s.' % self.name)
+        if self.title:
+            sys.stdout.write('%s\n' % self.title)
+            sys.stdout.flush()
+        for step in self.steps.itervalues():
+            step.run(config=config, messages=messages)

+ 89 - 0
packstack/installer/exceptions.py

@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = (
+    'PackStackError',
+
+    'InstallError',
+    'FlagValidationError',
+    'MissingRequirements',
+
+    'PluginError',
+    'ParamProcessingError',
+    'ParamValidationError',
+
+    'NetworkError',
+    'ScriptRuntimeError',
+)
+
+
+class PackStackError(Exception):
+    """Default Exception class for packstack installer."""
+    def __init__(self, *args, **kwargs):
+        super(PackStackError, self).__init__(*args)
+        self.stdout = kwargs.get('stdout', None)
+        self.stderr = kwargs.get('stderr', None)
+
+
+class PuppetError(Exception):
+    """Raised when Puppet will have some problems."""
+
+
+class MissingRequirements(PackStackError):
+    """Raised when minimum install requirements are not met."""
+    pass
+
+
+class InstallError(PackStackError):
+    """Exception for generic errors during setup run."""
+    pass
+
+
+class FlagValidationError(InstallError):
+    """Raised when single flag validation fails."""
+    pass
+
+
+class ParamValidationError(InstallError):
+    """Raised when parameter value validation fails."""
+    pass
+
+
+class PluginError(PackStackError):
+    pass
+
+
+class ParamProcessingError(PluginError):
+    pass
+
+
+class NetworkError(PackStackError):
+    """Should be used for packstack's network failures."""
+    pass
+
+
+class ScriptRuntimeError(PackStackError):
+    """
+    Raised when utils.ScriptRunner.execute does not end successfully.
+    """
+    pass
+
+
+class ExecuteRuntimeError(PackStackError):
+    """Raised when utils.execute does not end successfully."""
+
+
+class SequenceError(PackStackError):
+    """Exception for errors during setup sequence run."""
+    pass

+ 100 - 0
packstack/installer/output_messages.py

@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''
+external text file to hold all user visible text.
+info messages begins with INFO_ and error msg with ERR_
+
+any text with %s inside it, has dynamic parameters inside.
+please don't remove the %s from the text.
+you can relocate %s position in the text as long as the context is kept.
+\n means new line in the text
+\ at the end of a line lets you continue the text in a new line
+
+DONT CHANGE any of the params names (in UPPER-CASE)
+they are used in the engine-setup.py
+'''
+
+import basedefs
+
+INFO_HEADER = "Welcome to the %s setup utility" % basedefs.APP_NAME
+INFO_INSTALL_SUCCESS = "\n **** Installation completed successfully ******\n"
+INFO_INSTALL = "Installing:"
+INFO_DSPLY_PARAMS = "\n%s will be installed using the following configuration:" % basedefs.APP_NAME
+INFO_USE_PARAMS = "Proceed with the configuration listed above"
+INFO_DONE = "DONE"
+INFO_ERROR = "ERROR"
+INFO_LOG_FILE_PATH = "The installation log file is available at: %s"
+INFO_MANIFEST_PATH = "The generated manifests are available at: %s"
+INFO_ADDTIONAL_MSG = "Additional information:"
+INFO_ADDTIONAL_MSG_BULLET = " * %s"
+INFO_CONF_PARAMS_PASSWD_CONFIRM_PROMPT = "Confirm password"
+INFO_VAL_PATH_SPACE = "Error: mount point %s contains only %s of available space while a minimum of %s is required"
+INFO_VAL_NOT_INTEGER = "Error: value is not an integer"
+INFO_VAL_PORT_NOT_RANGE = "Error: port is outside the range of %i - 65535"
+INFO_VAL_STRING_EMPTY = "Warning: The %s parameter is empty"
+INFO_VAL_NOT_IN_OPTIONS = "Error: response is not part of the following accepted answers: %s"
+INFO_VAL_NOT_DOMAIN = "Error: domain is not a valid domain name"
+INFO_VAL_NOT_USER = "Error: user name contains illegal characters"
+INFO_VAL_PORT_OCCUPIED = "Error: TCP Port %s is already open by %s (pid: %s)"
+INFO_VAL_PORT_OCCUPIED_BY_JBOSS = "Error: TCP Port %s is used by JBoss"
+INFO_VAL_PASSWORD_DONT_MATCH = "Error: passwords don't match"
+
+INFO_CHANGED_VALUE = ("Packstack changed given value %s to required "
+                      "value %s")
+WARN_VAL_IS_HOSTNAME = ("Warning: Packstack failed to change given "
+                        "hostname %s to IP address. Note that some "
+                        "services might not run correctly when hostname"
+                        " is used.")
+
+INFO_STRING_LEN_LESS_THAN_MIN = "String length is less than the minimum allowed: %s"
+INFO_STRING_EXCEEDS_MAX_LENGTH = "String length exceeds the maximum length allowed: %s"
+INFO_STRING_CONTAINS_ILLEGAL_CHARS = "String contains illegal characters"
+INFO_CINDER_VOLUMES_EXISTS = "Did not create a cinder volume group, one already existed"
+INFO_REMOVE_REMOTE_VAR = "Removing %s on %s (if it is a remote host)"
+
+WARN_WEAK_PASS = "Warning: Weak Password."
+WARN_NM_ENABLED = ("Warning: NetworkManager is active on %s. OpenStack "
+                   "networking currently does not work on systems that have "
+                   "the Network Manager service enabled.")
+WARN_IPV6_OVS = ("Warning: IPv6 and ovs tunneling is not yet supported and "
+                 "will fail on host %s see https://bugzilla.redhat.com/show_bug.cgi?id=1100360.")
+
+ERR_PING = "Error: the provided hostname is unreachable"
+ERR_SSH = "Error: could not connect to the ssh server: %s"
+ERR_FILE = "Error: the provided file is not present"
+ERR_CHECK_LOG_FILE_FOR_MORE_INFO = "Please check log file %s for more information"
+ERR_YUM_LOCK = "Internal Error: Can't edit versionlock "
+ERR_FAILED_START_SERVICE = "Error: Can't start the %s service"
+ERR_FAILED_STOP_SERVICE = "Error: Can't stop the %s service"
+ERR_EXP_HANDLE_PARAMS = "Failed handling user parameters input"
+ERR_EXP_KEYBOARD_INTERRUPT = "Keyboard interrupt caught."
+ERR_READ_RPM_VER = "Error reading version number for package %s"
+ERR_EXP_READ_INPUT_PARAM = "Error while trying to read parameter %s from user."
+ERR_EXP_VALIDATE_PARAM = "Error validating parameter %s from user."
+ERR_EXP_HANDLE_ANSWER_FILE = "Failed handling answer file: %s"
+ERR_EXP_GET_CFG_IPS = "Could not get list of available IP addresses on this host"
+ERR_EXP_GET_CFG_IPS_CODES = "Failed to get list of IP addresses"
+ERR_EXP_CANT_FIND_IP = "Could not find any configured IP address"
+ERR_DIDNT_RESOLVED_IP = "%s did not resolve into an IP address"
+ERR_IPS_NOT_CONFIGED = "Some or all of the IP addresses: (%s) which were resolved from the FQDN %s are not configured on any interface on this host"
+ERR_IPS_NOT_CONFIGED_ON_INT = "The IP (%s) which was resolved from the FQDN %s is not configured on any interface on this host"
+ERR_IPS_HAS_NO_PTR = "None of the IP addresses on this host(%s) holds a PTR record for the FQDN: %s"
+ERR_IP_HAS_NO_PTR = "The IP %s does not hold a PTR record for the FQDN: %s"
+ERR_EXP_FAILED_INIT_LOGGER = "Unexpected error: Failed to initiate logger, please check file system permission"
+ERR_FAILURE = "General failure"
+ERR_NO_ANSWER_FILE = "Error: Could not find file %s"
+ERR_ONLY_1_FLAG = "Error: The %s flag is mutually exclusive to all other command line options"
+ERR_REMOVE_REMOTE_VAR = "Error: Failed to remove directory %s on %s, it contains sensitive data and should be removed"
+ERR_REMOVE_TMP_FILE = "Error: Failed to remove temporary file %s, it contains sensitive data and should be removed"
+#

+ 159 - 0
packstack/installer/processors.py

@@ -0,0 +1,159 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import netaddr
+import os
+import uuid
+
+from .utils import force_ip
+from .utils import ScriptRunner
+from .exceptions import NetworkError
+from .exceptions import ParamProcessingError
+
+
+__all__ = ('ParamProcessingError', 'process_cidr', 'process_host',
+           'process_ssh_key', 'process_add_quotes_around_values',
+           'process_password', 'process_string_nofloat', 'process_bool')
+
+
+def process_cidr(param, param_name, config=None):
+    """
+    Corrects given CIDR if necessary.
+    """
+    if '/' not in param:
+        # we need to skip this if single IP address has been given
+        return param
+    try:
+        return str(netaddr.IPNetwork(param).cidr)
+    except Exception as ex:
+        raise ParamProcessingError(str(ex))
+
+
+def process_host(param, param_name, config=None):
+    """
+    Tries to change given parameter to IP address, if it is in hostname
+    format
+    """
+    try:
+        return force_ip(param, allow_localhost=True)
+    except NetworkError as ex:
+        raise ParamProcessingError(str(ex))
+
+
+def process_ssh_key(param, param_name, config=None):
+    """
+    Generates SSH key if given key in param doesn't exist. In case param
+    is an empty string it generates default SSH key ($HOME/.ssh/id_rsa).
+    """
+    def create_key(path):
+        # make path absolute
+        path = os.path.expanduser(path)
+        path = os.path.abspath(path)
+        # create new ssh key
+        local = ScriptRunner()
+        local.append('ssh-keygen -f "%s" -N ""' % path)
+        local.execute()
+
+    if not param:
+        key_file = '%s/.ssh/id_rsa' % os.environ["HOME"]
+        param = '%s.pub' % key_file
+        if not os.path.isfile(param):
+            create_key(key_file)
+    elif not os.path.isfile(param):
+        key_file = param.endswith('.pub') and param[:-4] or param
+        param = param.endswith('.pub') and param or ('%s.pub' % param)
+        create_key(key_file)
+    return param
+
+
+def process_add_quotes_around_values(param, param_name, config=None):
+    """
+    Add a single quote character around each element of a comma
+    separated list of values
+    """
+    params_list = param.split(',')
+    for index, elem in enumerate(params_list):
+        if not elem.startswith("'"):
+            elem = "'" + elem
+        if not elem.endswith("'"):
+            elem = elem + "'"
+        params_list[index] = elem
+    param = ','.join(params_list)
+    return param
+
+
+def process_password(param, param_name, config=None):
+    """
+    Process passwords, checking the following:
+    1- If there is a user-entered password, use it
+    2- Otherwise, check for a global default password, and use it if available
+    3- As a last resort, generate a random password
+    """
+    if not hasattr(process_password, "pw_dict"):
+        process_password.pw_dict = {}
+
+    if param == "PW_PLACEHOLDER":
+        if config["CONFIG_DEFAULT_PASSWORD"] != "":
+            param = config["CONFIG_DEFAULT_PASSWORD"]
+        else:
+            # We need to make sure we store the random password we provide
+            # and return it once we are asked for it again
+            if param_name.endswith("_CONFIRMED"):
+                unconfirmed_param = param_name[:-10]
+                if unconfirmed_param in process_password.pw_dict:
+                    param = process_password.pw_dict[unconfirmed_param]
+                else:
+                    param = uuid.uuid4().hex[:16]
+                    process_password.pw_dict[unconfirmed_param] = param
+            elif param_name not in process_password.pw_dict:
+                param = uuid.uuid4().hex[:16]
+                process_password.pw_dict[param_name] = param
+            else:
+                param = process_password.pw_dict[param_name]
+    return param
+
+
+def process_heat(param, param_name, config=None):
+    if config["CONFIG_SAHARA_INSTALL"] == 'y':
+        param = 'y'
+    return param
+
+
+def process_string_nofloat(param, param_name, config=None):
+    """
+    Process a string, making sure it is *not* convertible into a float
+    If it is, change it into a random 16 char string, and check again
+    """
+    while True:
+        try:
+            float(param)
+        except ValueError:
+            return param
+        else:
+            param = uuid.uuid4().hex[:16]
+
+
+def process_bool(param, param_name, config=None):
+    """Converts param to appropriate boolean representation.
+
+    Retunrs True if answer == y|yes|true, False if answer == n|no|false.
+    """
+    if param.lower() in ('y', 'yes', 'true'):
+        return True
+    elif param.lower() in ('n', 'no', 'false'):
+        return False
+
+# Define silent processors
+for proc_func in (process_bool, process_add_quotes_around_values):
+    proc_func.silent = True

File diff suppressed because it is too large
+ 1045 - 0
packstack/installer/run_setup.py


+ 150 - 0
packstack/installer/setup_controller.py

@@ -0,0 +1,150 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Controller class is a SINGLETON which handles all groups, params, sequences,
+steps and replaces the CONF dictionary.
+"""
+from .core.parameters import Group
+from .core.sequences import Sequence
+
+
+def steps_new_format(steplist):
+    # we have to duplicate title to name parameter and also only sigle
+    # function is allowed in new step
+    return [{'name': i['title'], 'title': i['title'],
+             'function': i['functions'][0]} for i in steplist]
+
+
+class Controller(object):
+
+    __GROUPS = []
+    __SEQUENCES = []
+    __PLUGINS = []
+    MESSAGES = []
+    CONF = {}
+
+    __single = None  # the one, true Singleton ... for god's sake why ??? :)
+
+    def __new__(self, *args, **kwargs):
+        """
+        Singleton implementation.
+        Will return __single if self is the same class as the class of __single
+        which means that we will not invoke this singleton if someone tries to create a new
+        instance from a class which inherit Controller.
+        did not use isinstance because inheritence makes it behave erratically.
+        """
+        if self != type(self.__single):
+            self.__single = object.__new__(self, *args, **kwargs)
+        return self.__single
+
+    # PLugins
+    def addPlugin(self, plugObj):
+        self.__PLUGINS.append(plugObj)
+
+    def getPluginByName(self, pluginName):
+        for plugin in self.__PLUGINS:
+            if plugin.__name__ == pluginName:
+                return plugin
+        return None
+
+    def getAllPlugins(self):
+        return self.__PLUGINS
+
+    # Sequences and steps
+    def addSequence(self, desc, cond, cond_match, steps):
+        self.__SEQUENCES.append(Sequence(desc, steps_new_format(steps),
+                                         condition=cond,
+                                         cond_match=cond_match))
+
+    def insertSequence(self, desc, cond, cond_match, steps, index=0):
+        self.__SEQUENCES.insert(index, Sequence(desc,
+                                                steps_new_format(steps),
+                                                condition=cond,
+                                                cond_match=cond_match))
+
+    def getAllSequences(self):
+        return self.__SEQUENCES
+
+    def runAllSequences(self):
+        for sequence in self.__SEQUENCES:
+            sequence.run(config=self.CONF, messages=self.MESSAGES)
+
+    def getSequenceByDesc(self, desc):
+        for sequence in self.getAllSequences():
+            if sequence.name == desc:
+                return sequence
+        return None
+
+    def __getSequenceIndexByDesc(self, desc):
+        for sequence in self.getAllSequences():
+            if sequence.name == desc:
+                return self.__SEQUENCES.index(sequence)
+        return None
+
+    def insertSequenceBeforeSequence(self, sequenceName, desc, cond, cond_match, steps):
+        """
+        Insert a sequence before a named sequence.
+        i.e. if the specified sequence name is "update x", the new
+        sequence will be inserted BEFORE "update x"
+        """
+        index = self.__getSequenceIndexByDesc(sequenceName)
+        if index is None:
+            index = len(self.getAllSequences())
+        self.__SEQUENCES.insert(index, Sequence(desc,
+                                                steps_new_format(steps),
+                                                condition=cond,
+                                                cond_match=cond_match))
+
+    # Groups and params
+    def addGroup(self, group, params):
+        self.__GROUPS.append(Group(group, params))
+
+    def getGroupByName(self, groupName):
+        for group in self.getAllGroups():
+            if group.GROUP_NAME == groupName:
+                return group
+        return None
+
+    def getAllGroups(self):
+        return self.__GROUPS
+
+    def __getGroupIndexByDesc(self, name):
+        for group in self.getAllGroups():
+            if group.GROUP_NAME == name:
+                return self.__GROUPS.index(group)
+        return None
+
+    def insertGroupBeforeGroup(self, groupName, group, params):
+        """
+        Insert a group before a named group.
+        i.e. if the specified group name is "update x", the new
+        group will be inserted BEFORE "update x"
+        """
+        index = self.__getGroupIndexByDesc(groupName)
+        if index is None:
+            index = len(self.getAllGroups())
+        self.__GROUPS.insert(index, Group(group, params))
+
+    def getParamByName(self, paramName):
+        for group in self.getAllGroups():
+            if paramName in group.parameters:
+                return group.parameters[paramName]
+        return None
+
+    def getParamKeyValue(self, paramName, keyName):
+        param = self.getParamByName(paramName)
+        if param:
+            return getattr(param, keyName)
+        else:
+            return None

+ 41 - 0
packstack/installer/utils/__init__.py

@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .datastructures import SortedDict
+from .decorators import retry
+from .network import get_localhost_ip
+from .network import host2ip
+from .network import force_ip
+from .network import device_from_ip
+from .shell import execute
+from .shell import ScriptRunner
+from .shortcuts import host_iter
+from .shortcuts import hosts
+from .shortcuts import get_current_user
+from .shortcuts import get_current_username
+from .shortcuts import split_hosts
+from .strings import COLORS
+from .strings import color_text
+from .strings import mask_string
+from .strings import state_format
+from .strings import state_message
+
+
+__all__ = ('SortedDict',
+           'retry',
+           'get_localhost_ip', 'host2ip', 'force_ip', 'device_from_ip',
+           'ScriptRunner', 'execute',
+           'host_iter', 'hosts', 'get_current_user', 'get_current_username',
+           'split_hosts', 'COLORS', 'color_text', 'mask_string',
+           'state_format', 'state_message')

+ 137 - 0
packstack/installer/utils/datastructures.py

@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+from types import GeneratorType
+
+
+# taken from Django.utils.datastructures
+class SortedDict(dict):
+    """
+    A dictionary that keeps its keys in the order in which they're inserted.
+    """
+    def __new__(cls, *args, **kwargs):
+        instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
+        instance.keyOrder = []
+        return instance
+
+    def __init__(self, data=None):
+        if data is None:
+            data = {}
+        elif isinstance(data, GeneratorType):
+            # Unfortunately we need to be able to read a generator twice.  Once
+            # to get the data into self with our super().__init__ call and a
+            # second time to setup keyOrder correctly
+            data = list(data)
+        super(SortedDict, self).__init__(data)
+        if isinstance(data, dict):
+            self.keyOrder = data.keys()
+        else:
+            self.keyOrder = []
+            seen = set()
+            for key, value in data:
+                if key not in seen:
+                    self.keyOrder.append(key)
+                    seen.add(key)
+
+    def __deepcopy__(self, memo):
+        return self.__class__([(key, copy.deepcopy(value, memo))
+                               for key, value in self.iteritems()])
+
+    def __setitem__(self, key, value):
+        if key not in self:
+            self.keyOrder.append(key)
+        super(SortedDict, self).__setitem__(key, value)
+
+    def __delitem__(self, key):
+        super(SortedDict, self).__delitem__(key)
+        self.keyOrder.remove(key)
+
+    def __iter__(self):
+        return iter(self.keyOrder)
+
+    def pop(self, k, *args):
+        result = super(SortedDict, self).pop(k, *args)
+        try:
+            self.keyOrder.remove(k)
+        except ValueError:
+            # Key wasn't in the dictionary in the first place. No problem.
+            pass
+        return result
+
+    def popitem(self):
+        result = super(SortedDict, self).popitem()
+        self.keyOrder.remove(result[0])
+        return result
+
+    def items(self):
+        return zip(self.keyOrder, self.values())
+
+    def iteritems(self):
+        for key in self.keyOrder:
+            yield key, self[key]
+
+    def keys(self):
+        return self.keyOrder[:]
+
+    def iterkeys(self):
+        return iter(self.keyOrder)
+
+    def values(self):
+        return map(self.__getitem__, self.keyOrder)
+
+    def itervalues(self):
+        for key in self.keyOrder:
+            yield self[key]
+
+    def update(self, dict_):
+        for k, v in dict_.iteritems():
+            self[k] = v
+
+    def setdefault(self, key, default):
+        if key not in self:
+            self.keyOrder.append(key)
+        return super(SortedDict, self).setdefault(key, default)
+
+    def value_for_index(self, index):
+        """Returns the value of the item at the given zero-based index."""
+        return self[self.keyOrder[index]]
+
+    def insert(self, index, key, value):
+        """Inserts the key, value pair before the item with the given index."""
+        if key in self.keyOrder:
+            n = self.keyOrder.index(key)
+            del self.keyOrder[n]
+            if n < index:
+                index -= 1
+        self.keyOrder.insert(index, key)
+        super(SortedDict, self).__setitem__(key, value)
+
+    def copy(self):
+        """Returns a copy of this object."""
+        # This way of initializing the copy means it works for subclasses, too.
+        obj = self.__class__(self)
+        obj.keyOrder = self.keyOrder[:]
+        return obj
+
+    def __repr__(self):
+        """
+        Replaces the normal dict.__repr__ with a version that returns the keys
+        in their sorted order.
+        """
+        return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
+
+    def clear(self):
+        super(SortedDict, self).clear()
+        self.keyOrder = []

+ 41 - 0
packstack/installer/utils/decorators.py

@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+
+def retry(count=1, delay=0, retry_on=Exception):
+    """
+    Decorator which tries to run specified fuction if the previous
+    run ended by given exception. Retry count and delays can be also
+    specified.
+    """
+    if count < 0 or delay < 0:
+        raise ValueError('Count and delay has to be positive number.')
+
+    def decorator(func):
+        def wrapper(*args, **kwargs):
+            tried = 0
+            while tried <= count:
+                try:
+                    return func(*args, **kwargs)
+                except retry_on:
+                    if tried >= count:
+                        raise
+                    if delay:
+                        time.sleep(delay)
+                    tried += 1
+        wrapper.func_name = func.func_name
+        return wrapper
+    return decorator

+ 137 - 0
packstack/installer/utils/network.py

@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import netifaces
+import socket
+import logging
+from ..exceptions import NetworkError
+from .shell import ScriptRunner
+
+netaddr_available = True
+try:
+    import netaddr
+except ImportError:
+    netaddr_available = False
+
+
+def get_localhost_ip():
+    """
+    Returns IP address of localhost.
+    """
+    # Try to get the IPv4 or IPv6 default gateway, then open a socket
+    # to discover our local IP
+    gw = None
+    for protocol in (socket.AF_INET, socket.AF_INET6):
+        try:
+            gw = netifaces.gateways()['default'][protocol][0]
+            if protocol == socket.AF_INET6:
+                gw = gw + '%' + netifaces.gateways()['default'][protocol][1]
+            discovered_protocol = protocol
+            break
+        except KeyError:    # No default gw for this protocol
+            continue
+    else:
+        raise NetworkError('Local IP address discovery failed. Please set '
+                           'a default gateway for your system.')
+
+    address = socket.getaddrinfo(gw, 0, discovered_protocol,
+                                 socket.SOCK_DGRAM)[0]
+    s = socket.socket(discovered_protocol, socket.SOCK_DGRAM)
+    s.connect(address[4])
+    # Remove chars after %. Does nothing on IPv4, removes scope id in IPv6
+    loc_ip = s.getsockname()[0].split('%')[0]
+
+    return loc_ip
+
+
+_host_cache = {}
+
+
+def host2ip(hostname, allow_localhost=False):
+    """
+    Converts given hostname to IP address. Raises NetworkError
+    if conversion failed.
+    """
+    key = '{}:{}'.format(hostname, allow_localhost)
+    if key in _host_cache:
+        return _host_cache[key]
+    try:
+        ip_list = list(sockets[4][0] for sockets in
+                       socket.getaddrinfo(hostname, 22, 0, 0, socket.IPPROTO_TCP))
+
+        if allow_localhost:
+            ip = ip_list[0]
+        else:
+            routable = [ip for ip in ip_list if ip not in ('127.0.0.1', '::1')]
+            if not routable:
+                raise NameError("Host %s is not routable, please fix"
+                                "your /etc/hosts", host)
+            if len(routable) > 1:
+                logging.warning("Multiple IPs for host detected!")
+            ip = routable[0]
+
+        _host_cache[key] = ip
+        return ip
+    except NameError:
+        # given hostname is localhost, return appropriate IP address
+        return get_localhost_ip()
+    except socket.error:
+        raise NetworkError('Unknown hostname %s.' % hostname)
+    except Exception as ex:
+        raise NetworkError('Unknown error appeared: %s' % repr(ex))
+
+
+def is_ipv6(host):
+    if not netaddr_available:
+        raise ImportError(
+            "netaddr module unavailable, install with pip install netaddr"
+        )
+    host = host.strip()
+    try:
+        return netaddr.IPAddress(host.strip('[]')).version == 6
+    except netaddr.core.AddrFormatError:
+        # Most probably a hostname
+        return False
+
+
+def is_ipv4(host):
+    if not netaddr_available:
+        raise ImportError(
+            "netaddr module unavailable, install with pip install netaddr"
+        )
+    host = host.strip()
+    try:
+        return netaddr.IPAddress(host).version == 4
+    except netaddr.core.AddrFormatError:
+        # Most probably a hostname
+        return False
+
+
+def force_ip(host, allow_localhost=False):
+    if not(is_ipv6(host) or is_ipv4(host)):
+        host = host2ip(host, allow_localhost=allow_localhost)
+    return host
+
+
+def device_from_ip(ip):
+    server = ScriptRunner()
+    server.append("DEVICE=($(ip -o address show to %s | cut -f 2 -d ' '))"
+                  % ip)
+    # Ensure that the IP is only assigned to one interface
+    server.append("if [ ! -z ${DISPLAY[1]} ]; then false; fi")
+    # Test device, raises an exception if it doesn't exist
+    server.append("ip link show \"$DEVICE\" > /dev/null")
+    server.append("echo $DEVICE")
+    rv, stdout = server.execute()
+    return stdout.strip()

+ 152 - 0
packstack/installer/utils/shell.py

@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+import os
+import types
+import logging
+import subprocess
+
+from ..exceptions import ExecuteRuntimeError
+from ..exceptions import NetworkError
+from ..exceptions import ScriptRuntimeError
+from .strings import mask_string
+
+
+block_fmt = ("\n============= %(title)s ==========\n%(content)s\n"
+             "======== END OF %(title)s ========")
+
+
+def execute(cmd, workdir=None, can_fail=True, mask_list=None,
+            use_shell=False, log=True):
+    """
+    Runs shell command cmd. If can_fail is set to False
+    ExecuteRuntimeError is raised if command returned non-zero return
+    code. Otherwise
+    """
+    mask_list = mask_list or []
+    repl_list = [("'", "'\\''")]
+
+    if not isinstance(cmd, types.StringType):
+        import pipes
+        masked = ' '.join((pipes.quote(i) for i in cmd))
+    else:
+        masked = cmd
+    masked = mask_string(masked, mask_list, repl_list)
+    if log:
+        logging.info("Executing command:\n%s" % masked)
+    environ = os.environ
+    environ['LANG'] = 'en_US.UTF8'
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE, cwd=workdir,
+                            shell=use_shell, close_fds=True,
+                            env=environ)
+    out, err = proc.communicate()
+    masked_out = mask_string(out, mask_list, repl_list)
+    masked_err = mask_string(err, mask_list, repl_list)
+    if log:
+        logging.debug(block_fmt % {'title': 'STDOUT', 'content': masked_out})
+
+    if proc.returncode:
+        if log:
+            logging.debug(block_fmt % {'title': 'STDERR',
+                                       'content': masked_err})
+        if can_fail:
+            msg = ('Failed to execute command, '
+                   'stdout: %s\nstderr: %s' %
+                   (masked_out, masked_err))
+            raise ExecuteRuntimeError(msg, stdout=out, stderr=err)
+    return proc.returncode, out
+
+
+class ScriptRunner(object):
+    _pkg_search = 'rpm -q --whatprovides'
+
+    def __init__(self, ip=None):
+        self.script = []
+        self.ip = ip
+
+    def append(self, s):
+        self.script.append(s)
+
+    def clear(self):
+        self.script = []
+
+    def execute(self, can_fail=True, mask_list=None, log=True):
+        mask_list = mask_list or []
+        repl_list = [("'", "'\\''")]
+        script = "\n".join(self.script)
+
+        masked = mask_string(script, mask_list, repl_list)
+        if log:
+            logging.info("[%s] Executing script:\n%s" %
+                         (self.ip or 'localhost', masked))
+
+        _PIPE = subprocess.PIPE  # pylint: disable=E1101
+        if self.ip:
+            cmd = ["ssh", "-o", "StrictHostKeyChecking=no",
+                          "-o", "UserKnownHostsFile=/dev/null",
+                          "root@%s" % self.ip, "bash -x"]
+        else:
+            cmd = ["bash", "-x"]
+        environ = os.environ
+        environ['LANG'] = 'en_US.UTF8'
+        obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE,
+                               close_fds=True, shell=False, env=environ)
+
+        script = "function t(){ exit $? ; } \n trap t ERR \n" + script
+        out, err = obj.communicate(script)
+        masked_out = mask_string(out, mask_list, repl_list)
+        masked_err = mask_string(err, mask_list, repl_list)
+        if log:
+            logging.debug(block_fmt % {'title': 'STDOUT',
+                                       'content': masked_out})
+
+        if obj.returncode:
+            if log:
+                logging.debug(block_fmt % {'title': 'STDERR',
+                                           'content': masked_err})
+            if can_fail:
+                pattern = (r'^ssh\:')
+                if re.search(pattern, err):
+                    raise NetworkError(masked_err, stdout=out, stderr=err)
+                else:
+                    msg = ('Failed to run remote script, '
+                           'stdout: %s\nstderr: %s' %
+                           (masked_out, masked_err))
+                    raise ScriptRuntimeError(msg, stdout=out, stderr=err)
+        return obj.returncode, out
+
+    def template(self, src, dst, varsdict):
+        with open(src) as fp:
+            content = fp.read() % varsdict
+            self.append("cat > %s <<- EOF\n%s\nEOF\n" % (dst, content))
+
+    def if_not_exists(self, path, command):
+        self.append("[ -e %s ] || %s" % (path, command))
+
+    def if_exists(self, path, command):
+        self.append("[ -e %s ] && %s" % (path, command))
+
+    def if_installed(self, pkg, command):
+        self.append("%s %s && %s" % (self._pkg_search, pkg, command))
+
+    def if_not_installed(self, pkg, command):
+        self.append("%s %s || %s" % (self._pkg_search, pkg, command))
+
+    def chown(self, target, uid, gid):
+        self.append("chown %s:%s %s" % (uid, gid, target))
+
+    def chmod(self, target, mode):
+        self.append("chmod %s %s" % (mode, target))

+ 63 - 0
packstack/installer/utils/shortcuts.py

@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import grp
+import os
+import pwd
+
+
+def host_iter(config):
+    for key, value in config.iteritems():
+        if key.endswith("_HOST"):
+            host = value.split('/')[0]
+            if host:
+                yield key, host
+        if key.endswith("_HOSTS"):
+            for i in value.split(","):
+                host = i.strip().split('/')[0]
+                if host:
+                    yield key, host
+
+
+def hosts(config):
+    result = set()
+    for key, host in host_iter(config):
+        result.add(host)
+    return result
+
+
+def get_current_user():
+    try:
+        user = pwd.getpwnam(os.getlogin())
+        uid, gid = user.pw_uid, user.pw_gid
+    except OSError:
+        # in case program is run by a script
+        uid, gid = os.getuid(), os.getgid()
+    return uid, gid
+
+
+def get_current_username():
+    uid, gid = get_current_user()
+    user = pwd.getpwuid(uid).pw_name
+    group = grp.getgrgid(gid).gr_name
+    return user, group
+
+
+def split_hosts(hosts_string):
+    hosts = set()
+    for host in hosts_string.split(','):
+        shost = host.strip()
+        if shost:
+            hosts.add(shost)
+    return hosts

+ 69 - 0
packstack/installer/utils/strings.py

@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+
+
+STR_MASK = '*' * 8
+COLORS = {'nocolor': "\033[0m", 'red': "\033[0;31m",
+          'green': "\033[32m", 'blue': "\033[34m",
+          'yellow': "\033[33m"}
+
+
+def color_text(text, color):
+    """
+    Returns given text string with appropriate color tag. Allowed values
+    for color parameter are 'red', 'blue', 'green' and 'yellow'.
+    """
+    return '%s%s%s' % (COLORS[color], text, COLORS['nocolor'])
+
+
+def mask_string(unmasked, mask_list=None, replace_list=None):
+    """
+    Replaces words from mask_list with MASK in unmasked string.
+    If words are needed to be transformed before masking, transformation
+    could be describe in replace list. For example [("'","'\\''")]
+    replaces all ' characters with '\\''.
+    """
+    mask_list = mask_list or []
+    replace_list = replace_list or []
+
+    masked = unmasked
+    for word in sorted(mask_list, lambda x, y: len(y) - len(x)):
+        if not word:
+            continue
+        for before, after in replace_list:
+            word = word.replace(before, after)
+        masked = masked.replace(word, STR_MASK)
+    return masked
+
+
+def state_format(msg, state, color):
+    """
+    Formats state with offset according to given message.
+    """
+    _msg = '%s' % msg.strip()
+    for clr in COLORS.values():
+        _msg = re.sub(re.escape(clr), '', msg)
+
+    space = 70 - len(_msg)
+    state = '[ %s ]' % color_text(state, color)
+    return state.rjust(space)
+
+
+def state_message(msg, state, color):
+    """
+    Formats given message with colored state information.
+    """
+    return '%s%s' % (msg, state_format(msg, state, color))

+ 398 - 0
packstack/installer/validators.py

@@ -0,0 +1,398 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Contains all core validation functions.
+"""
+
+import os
+import re
+import socket
+import logging
+
+from . import utils
+
+from .exceptions import ParamValidationError
+
+
+__all__ = ('ParamValidationError', 'validate_integer', 'validate_float',
+           'validate_regexp', 'validate_port', 'validate_not_empty',
+           'validate_options', 'validate_multi_options', 'validate_ip',
+           'validate_multi_ip', 'validate_file', 'validate_ping',
+           'validate_multi_ping', 'validate_ssh', 'validate_multi_ssh',
+           'validate_sshkey', 'validate_ldap_url', 'validate_ldap_dn',
+           'validate_export', 'validate_multi_export',
+           'validate_writeable_directory')
+
+
+def validate_integer(param, options=None):
+    """
+    Raises ParamValidationError if given param is not integer.
+    """
+    if not param:
+        return
+    options = options or []
+    try:
+        int(param)
+    except ValueError:
+        logging.debug('validate_integer(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value is not an integer: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_float(param, options=None):
+    """
+    Raises ParamValidationError if given param is not a float.
+    """
+    if not param:
+        return
+    options = options or []
+    try:
+        float(param)
+    except ValueError:
+        logging.debug('validate_float(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value is not a float: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_regexp(param, options=None):
+    """
+    Raises ParamValidationError if given param doesn't match at least
+    one of regular expressions given in options.
+    """
+    if not param:
+        return
+    options = options or []
+    for regex in options:
+        if re.search(regex, param):
+            break
+    else:
+        logging.debug('validate_regexp(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value does not match required regular expression: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_multi_regexp(param, options=None):
+    """
+    Raises ParamValidationError if any of the comma separated values given
+    in param doesn't match one of the regular expressions given in options.
+    """
+    options = options or []
+    for i in param.split(','):
+        validate_regexp(i.strip(), options=options)
+
+
+def validate_port(param, options=None):
+    """
+    Raises ParamValidationError if given param is not a decimal number
+    in range (0, 65535).
+    """
+    if not param:
+        return
+    options = options or []
+    validate_integer(param, options)
+    port = int(param)
+    if not (port >= 0 and port < 65535):
+        logging.debug('validate_port(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value is outside the range of (0, 65535): %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_not_empty(param, options=None):
+    """
+    Raises ParamValidationError if given param is empty.
+    """
+    options = options or []
+    if not param and param is not False:
+        logging.debug('validate_not_empty(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value is not allowed: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_options(param, options=None):
+    """
+    Raises ParamValidationError if given param is not member of options.
+    """
+    if not param:
+        return
+
+    options = options or []
+    validate_not_empty(param, options)
+    if param not in options:
+        logging.debug('validate_options(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given value is not member of allowed values %s: %s'
+        raise ParamValidationError(msg % (options, param))
+
+
+def validate_multi_options(param, options=None):
+    """
+    Validates if comma separated values given in params are members
+    of options.
+    """
+    if not param:
+        return
+    options = options or []
+    for i in param.split(','):
+        validate_options(i.strip(), options=options)
+
+
+def validate_ip(param, options=None):
+    """
+    Raises ParamValidationError if given parameter value is not in IPv4
+    or IPv6 address.
+    """
+    if not param:
+        return
+    for family in (socket.AF_INET, socket.AF_INET6):
+        try:
+            socket.inet_pton(family, param)
+            return family
+        except socket.error:
+            continue
+    else:
+        logging.debug('validate_ip(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given host is not in IP address format: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_multi_ip(param, options=None):
+    """
+    Raises ParamValidationError if comma separated IP addresses given
+    parameter value are in IPv4 or IPv6 aformat.
+    """
+    for host in param.split(','):
+        host = host.split('/', 1)[0]
+        validate_ip(host.strip(), options)
+
+
+def validate_file(param, options=None):
+    """
+    Raises ParamValidationError if provided file in param does not exist.
+    """
+    if not param:
+        return
+
+    options = options or []
+    if not os.path.isfile(param):
+        logging.debug('validate_file(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given file does not exist: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_writeable_directory(param, options=None):
+    """
+    Raises ParamValidationError if provided directory does not exist or
+    is not writeable.
+    """
+    if not param:
+        return
+
+    options = options or []
+    path = os.path.expanduser(param)
+    if not ((os.path.isdir(path) and os.access(path, os.W_OK)) or
+            os.access(
+            os.path.normpath(os.path.join(path, os.pardir)), os.W_OK)):
+        logging.debug('validate_writeable_directory(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given directory does not exist or is not writeable: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_ping(param, options=None):
+    """
+    Raises ParamValidationError if provided host does not answer to ICMP
+    echo request.
+    """
+    if not param:
+        return
+    options = options or []
+    rc, out = utils.execute(['/bin/ping', '-c', '1', str(param)],
+                            can_fail=False)
+    if rc != 0:
+        logging.debug('validate_ping(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given host is unreachable: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_multi_ping(param, options=None):
+    """
+    Raises ParamValidationError if comma separated host given in param
+    do not answer to ICMP echo request.
+    """
+    options = options or []
+    for host in param.split(","):
+        validate_ping(host.strip())
+
+
+_tested_ports = []
+
+
+def touch_port(host, port):
+    """
+    Check that provided host is listening on provided port.
+    """
+    key = "%s:%d" % (host, port)
+    if key in _tested_ports:
+        return
+    sock = socket.create_connection((host, port))
+    sock.shutdown(socket.SHUT_RDWR)
+    sock.close()
+    _tested_ports.append(key)
+
+
+def validate_ssh(param, options=None):
+    """
+    Raises ParamValidationError if provided host does not listen
+    on port 22.
+    """
+    if not param:
+        return
+    options = options or []
+    try:
+        touch_port(param.strip(), 22)
+    except socket.error:
+        logging.debug('validate_ssh(%s, options=%s) failed.' %
+                      (param, options))
+        msg = 'Given host does not listen on port 22: %s'
+        raise ParamValidationError(msg % param)
+
+
+def validate_multi_ssh(param, options=None):
+    """
+    Raises ParamValidationError if comma separated host provided
+    in param do not listen on port 22.
+    """
+    options = options or []
+    for host in param.split(","):
+        validate_ssh(host)
+
+
+def validate_sshkey(param, options=None):
+    """
+    Raises ParamValidationError if provided sshkey file is not public key.
+    """
+    if not param:
+        return
+    with open(param) as sshkey:
+        line = sshkey.readline()
+    msg = None
+    if not re.search('ssh-|ecdsa', line):
+        msg = ('Invalid content header in %s, Public SSH key is required.'
+               % param)
+    if re.search('BEGIN [RD]SA PRIVATE KEY', line):
+        msg = 'Public SSH key is required. You passed private key.'
+    if msg:
+        raise ParamValidationError(msg)
+
+
+def validate_ldap_url(param, options=None):
+    """
+    Raises ParamValidationError if provided param is not a valid LDAP URL
+    """
+    if not param:
+        return
+    try:
+        import ldapurl
+    except ImportError:
+        msg = (
+            'The python ldap package is required to use this functionality.'
+        )
+        raise ParamValidationError(msg)
+
+    try:
+        ldapurl.LDAPUrl(param)
+    except ValueError as ve:
+        msg = ('The given string [%s] is not a valid LDAP URL: %s' %
+               (param, ve))
+        raise ParamValidationError(msg)
+
+
+def validate_ldap_dn(param, options=None):
+    """
+    Raises ParamValidationError if provided param is not a valid LDAP DN
+    """
+    if not param:
+        return
+    try:
+        import ldap
+        import ldap.dn
+    except ImportError:
+        msg = (
+            'The python ldap package is required to use this functionality.'
+        )
+        raise ParamValidationError(msg)
+
+    try:
+        ldap.dn.str2dn(param)
+    except ldap.DECODING_ERROR as de:
+        msg = ('The given string [%s] is not a valid LDAP DN: %s' %
+               (param, de))
+        raise ParamValidationError(msg)
+
+
+def validate_export(param, options=None):
+    """
+    Raises ParamValidationError if the nfs export is not valid.
+    """
+    msg = ('The nfs export [%s] is not a valid export - use squares around ipv6 addresses -.' %
+           param)
+    try:
+        [ip, export] = param.split(':/')
+    except ValueError:
+        raise ParamValidationError(msg)
+    get_squares = re.search(r'\[([^]]+)\]', ip)
+    ip_to_test = ip
+    if get_squares:
+        # this should be a valid ipv6 address.
+        ip_to_test = get_squares.group(1)
+        if not utils.network.is_ipv6(ip_to_test):
+            raise ParamValidationError(msg)
+    else:
+        # this should be an ipv4. Cannot have ipv6 without square braquet
+        # notation here, as the mount will fail.
+        if not utils.network.is_ipv4(ip):
+            raise ParamValidationError(msg)
+    validate_ip(ip_to_test, options)
+    if not export:
+        raise ParamValidationError(msg)
+
+
+def validate_multi_export(param, options=None):
+    """
+    Raises ParamValidationError if comma separated nfs export given
+    in param is not valid
+    """
+    for export in param.split(","):
+        validate_export(export)
+
+
+def validate_neutron(param, options=None):
+    """
+    Raises ParamValidationError if neutron is not enabled.
+    This is intended to make user aware nova-network has been removed
+    in ocata cycle.
+    """
+    validate_options(param, options=options)
+    if param != 'y':
+        msg = ('Nova network support has been removed in Ocata. Neutron service must be enabled')
+        raise ParamValidationError(msg)

+ 0 - 0
packstack/modules/__init__.py


+ 99 - 0
packstack/modules/common.py

@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import netaddr
+
+from ..installer import utils
+
+
+def filtered_hosts(config, exclude=True, dbhost=True):
+    """
+    Returns list of hosts which need installation taking into account
+    CONFIG_MARIADB_INSTALL if parameter dbhost is True and EXCLUDE_SERVERS
+    if parameter exclude is True.
+    """
+    exclset = set([i.strip()
+                   for i in config.get('EXCLUDE_SERVERS', '').split(',')
+                   if i.strip()])
+    result = set()
+    dbinst = config.get('CONFIG_MARIADB_INSTALL') == 'y'
+    vcenter = config.get('CONFIG_VMWARE_BACKEND') == 'y'
+    for hosttype, hostname in utils.host_iter(config):
+        # if dbhost is being taken into account and we are not installing
+        # MariaDB then we should omit the MariaDB host
+        if dbhost and not dbinst and hosttype == 'CONFIG_MARIADB_HOST':
+            continue
+        if vcenter and hosttype == 'CONFIG_VCENTER_HOST':
+            continue
+        result.add(hostname)
+    if exclude:
+        result = result - exclset
+    return result
+
+
+def is_all_in_one(config):
+    """
+    Returns True if packstack is running allinone setup, otherwise
+    returns False.
+    """
+    # Even if some host have been excluded from installation, we must count
+    # with them when checking all-in-one. MariaDB host should however be
+    # omitted if we are not installing MariaDB.
+    return len(filtered_hosts(config, exclude=False, dbhost=True)) == 1
+
+
+def cidr_to_ifname(cidr, host, config):
+    """
+    Returns appropriate host's interface name from given CIDR subnet. Passed
+    config dict has to contain discovered hosts details.
+    """
+    if not config or not config['HOST_DETAILS'] or '/' not in cidr:
+        raise ValueError(
+            'Cannot translate CIDR to interface, invalid parameters '
+            'were given.'
+        )
+    info = config['HOST_DETAILS'][host]
+
+    result = []
+    for item in cidr.split(','):
+        translated = []
+        for fragment in item.split(':'):
+            try:
+                subnet_a = netaddr.IPNetwork(fragment)
+            except netaddr.AddrFormatError:
+                translated.append(fragment)
+                continue
+
+            for interface in info['interfaces'].split(','):
+                interface = interface.strip()
+                ipaddr = info.get('ipaddress_{}'.format(interface))
+                netmask = info.get('netmask_{}'.format(interface))
+                if ipaddr and netmask:
+                    subnet_b = netaddr.IPNetwork('{ipaddr}/{netmask}'.format(**locals()))
+                    if subnet_a == subnet_b:
+                        translated.append(interface)
+                        break
+        result.append(':'.join(translated))
+    return ','.join(result)
+
+
+# Function find_pair_with search in a list of "key:value" pairs, one
+# containing the desired element as key (if index is 0), or value (if index
+# is 1). It returns the pair if it's found or KeyError.
+def find_pair_with(pairs_list, element, index):
+    for pair in pairs_list:
+        found_element = pair.split(':')[index].strip()
+        if found_element == element:
+            return pair
+    raise KeyError('Couldn\'t find element %s in %s.' % (element, pairs_list))

+ 83 - 0
packstack/modules/documentation.py

@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# Author: Martin Magr <mmagr@redhat.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from docutils import core
+
+
+# ------------------ helpers to locate option list ------------------ #
+def _iter_by_titles(tree):
+    for i in tree.children:
+        i = i.asdom()
+        for child in i.childNodes:
+            if child.nodeName != 'title':
+                continue
+            if child.childNodes and child.childNodes[0].nodeValue:
+                title = child.childNodes[0].nodeValue
+                yield title, i
+
+
+def _get_options(tree, section):
+    for title, node in _iter_by_titles(tree):
+        if title == section:
+            return node
+
+
+# --------------------- helper to locate options -------------------- #
+def _iter_options(section):
+    for subsection in section.childNodes:
+        for subsub in subsection.childNodes:
+            if subsub.nodeName != 'definition_list':
+                # TO-DO: log parsing warning
+                continue
+            for defitem in subsub.childNodes:
+                key_node = defitem.getElementsByTagName('strong')
+                val_node = defitem.getElementsByTagName('paragraph')
+                if not key_node or not val_node:
+                    # TO-DO: log parsing warning
+                    continue
+                key_node = key_node[0].childNodes[0]
+                val_node = val_node[0].childNodes[0]
+                yield key_node.nodeValue, val_node.nodeValue
+
+
+# ----------------------------- interface --------------------------- #
+_rst_cache = {}
+
+
+def update_params_usage(path, params, opt_title='OPTIONS', sectioned=True):
+    """Updates params dict with USAGE texts parsed from given rst file."""
+    def _update(section, rst):
+        for param in section:
+            if param['CONF_NAME'] not in rst:
+                # TO-DO: log warning
+                continue
+            param['USAGE'] = rst[param['CONF_NAME']]
+
+    if not _rst_cache:
+        tree = core.publish_doctree(
+            source=open(path).read().decode('utf-8'), source_path=path
+        )
+        for key, value in _iter_options(_get_options(tree, opt_title)):
+            _rst_cache.setdefault(key, value)
+
+    if sectioned:
+        for section in params.values():
+            _update(section, _rst_cache)
+    else:
+        _update(params, _rst_cache)

+ 172 - 0
packstack/modules/ospluginutils.py

@@ -0,0 +1,172 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import yaml
+
+from OpenSSL import crypto
+from time import time
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer.setup_controller import Controller
+
+controller = Controller()
+
+PUPPET_DIR = os.path.join(basedefs.DIR_PROJECT_DIR, "puppet")
+PUPPET_TEMPLATE_DIR = os.path.join(PUPPET_DIR, "templates")
+HIERA_COMMON_YAML = os.path.join(basedefs.HIERADATA_DIR, "common.yaml")
+# For compatibility with hiera < 3.0
+HIERA_DEFAULTS_YAML = os.path.join(basedefs.HIERADATA_DIR, "defaults.yaml")
+
+
+class ManifestFiles(object):
+    def __init__(self):
+        self.filelist = []
+        self.data = {}
+
+    # continuous manifest file that have the same marker can be
+    # installed in parallel, if on different servers
+    def addFile(self, filename, marker, data=''):
+        self.data[filename] = self.data.get(filename, '') + '\n' + data
+        for f, p in self.filelist:
+            if f == filename:
+                return
+
+        self.filelist.append((filename, marker))
+
+    def prependFile(self, filename, marker, data=''):
+        self.data[filename] = data + '\n' + self.data.get(filename, '')
+        for f, p in self.filelist:
+            if f == filename:
+                return
+
+        self.filelist.append((filename, marker))
+
+    def getFiles(self):
+        return [f for f in self.filelist]
+
+    def writeManifests(self):
+        """
+        Write out the manifest data to disk, this should only be called once
+        write before the puppet manifests are copied to the various servers
+        """
+        os.mkdir(basedefs.PUPPET_MANIFEST_DIR, 0o700)
+        for fname, data in self.data.items():
+            path = os.path.join(basedefs.PUPPET_MANIFEST_DIR, fname)
+            fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600)
+            with os.fdopen(fd, 'w') as fp:
+                fp.write(data)
+manifestfiles = ManifestFiles()
+
+
+def getManifestTemplate(template_name):
+    if not template_name.endswith(".pp"):
+        template_name += ".pp"
+    with open(os.path.join(PUPPET_TEMPLATE_DIR, template_name)) as fp:
+        return fp.read() % controller.CONF
+
+
+def appendManifestFile(manifest_name, data, marker=''):
+    manifestfiles.addFile(manifest_name, marker, data)
+
+
+def generateHieraDataFile():
+    os.mkdir(basedefs.HIERADATA_DIR, 0o700)
+    with open(HIERA_COMMON_YAML, 'w') as outfile:
+        outfile.write(yaml.dump(controller.CONF,
+                                explicit_start=True,
+                                default_flow_style=False))
+    os.symlink(os.path.basename(HIERA_COMMON_YAML), HIERA_DEFAULTS_YAML)
+
+
+def generate_ssl_cert(config, host, service, ssl_key_file, ssl_cert_file):
+    """
+    Wrapper on top of openssl
+    """
+    # We have to check whether the certificate already exists
+    cert_dir = os.path.join(config['CONFIG_SSL_CERT_DIR'], 'certs')
+    local_cert_name = host + os.path.basename(ssl_cert_file)
+    local_cert_path = os.path.join(cert_dir, local_cert_name)
+    if not os.path.exists(local_cert_path):
+        ca_file = open(config['CONFIG_SSL_CACERT_FILE'], 'rt').read()
+        ca_key_file = open(config['CONFIG_SSL_CACERT_KEY_FILE'], 'rt').read()
+        ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, ca_key_file)
+        ca = crypto.load_certificate(crypto.FILETYPE_PEM, ca_file)
+
+        k = crypto.PKey()
+        k.generate_key(crypto.TYPE_RSA, 4096)
+        mail = config['CONFIG_SSL_CERT_SUBJECT_MAIL']
+        hostinfo = config['HOST_DETAILS'][host]
+        fqdn = hostinfo['fqdn']
+        cert = crypto.X509()
+        subject = cert.get_subject()
+        subject.C = config['CONFIG_SSL_CERT_SUBJECT_C']
+        subject.ST = config['CONFIG_SSL_CERT_SUBJECT_ST']
+        subject.L = config['CONFIG_SSL_CERT_SUBJECT_L']
+        subject.O = config['CONFIG_SSL_CERT_SUBJECT_O']
+        subject.OU = config['CONFIG_SSL_CERT_SUBJECT_OU']
+        subject.CN = "%s/%s" % (service, fqdn)
+        subject.emailAddress = mail
+
+        cert.add_extensions([
+            crypto.X509Extension(
+                "keyUsage".encode('ascii'),
+                False,
+                "nonRepudiation,digitalSignature,keyEncipherment".encode('ascii')),
+            crypto.X509Extension(
+                "extendedKeyUsage".encode('ascii'),
+                False,
+                "clientAuth,serverAuth".encode('ascii')),
+        ])
+
+        cert.gmtime_adj_notBefore(0)
+        cert.gmtime_adj_notAfter(315360000)
+        cert.set_issuer(ca.get_subject())
+        cert.set_pubkey(k)
+        serial = int(time())
+        cert.set_serial_number(serial)
+        cert.sign(ca_key, 'sha1')
+
+        final_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
+        final_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
+        deliver_ssl_file(ca_file, config['CONFIG_SSL_CACERT'], host)
+        deliver_ssl_file(final_cert, ssl_cert_file, host)
+        deliver_ssl_file(final_key, ssl_key_file, host)
+
+        with open(local_cert_path, 'w') as f:
+            f.write(final_cert)
+
+
+def deliver_ssl_file(content, path, host):
+    server = utils.ScriptRunner(host)
+    server.append("grep -- '{content}' {path} || "
+                  "echo '{content}' > {path} ".format(
+                      content=content,
+                      path=path))
+    server.execute()
+
+
+def gethostlist(CONF):
+    hosts = []
+    for key, value in CONF.items():
+        if key.endswith("_HOST"):
+            value = value.split('/')[0]
+            if value and value not in hosts:
+                hosts.append(value)
+        if key.endswith("_HOSTS"):
+            for host in value.split(","):
+                host = host.strip()
+                host = host.split('/')[0]
+                if host and host not in hosts:
+                    hosts.append(host)
+    return hosts

+ 121 - 0
packstack/modules/puppet.py

@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import re
+
+from packstack.installer.exceptions import PuppetError
+
+
+# TODO: Fill logger name when logging system will be refactored
+logger = logging.getLogger()
+
+re_color = re.compile('\x1b.*?\d\dm')
+re_error = re.compile(
+    'err:|Syntax error at|^Duplicate definition:|^Invalid tag|'
+    '^No matching value for selector param|^Parameter name failed:|Error:|'
+    '^Invalid parameter|^Duplicate declaration:|^Could not find resource|'
+    '^Could not parse for|^/usr/bin/puppet:\d+: .+|.+\(LoadError\)|'
+    '^Could not autoload|'
+    '^\/usr\/bin\/env\: jruby\: No such file or directory|'
+    'failed to execute puppet'
+)
+re_ignore = re.compile(
+    # Puppet preloads a provider using the mysql command before it is installed
+    'Command mysql is missing|'
+    # Puppet preloads a database_grant provider which fails if /root/.my.cnf
+    # is missing, this is ok because it will be retried later if needed
+    'Could not prefetch database_grant provider.*?\\.my\\.cnf|'
+    # Swift Puppet module tries to install swift-plugin-s3, there is no such
+    # package on RHEL, fixed in the upstream puppet module
+    'yum.*?install swift-plugin-s3|'
+    # facter gives a weird NM error when it's disabled, due to
+    # https://tickets.puppetlabs.com/browse/FACT-697
+    'NetworkManager is not running'
+)
+re_notice = re.compile(r"notice: .*Notify\[packstack_info\]"
+                       "\/message: defined \'message\' as "
+                       "\'(?P<message>.*)\'")
+
+surrogates = [
+    # Value in /etc/sysctl.conf cannot be changed
+    ('Sysctl::Value\[.*\]\/Sysctl\[(?P<arg1>.*)\].*Field \'val\' is required',
+        'Cannot change value of %(arg1)s in /etc/sysctl.conf'),
+    # Package is not found in yum repos
+    ('Package\[.*\]\/ensure.*yum.*install (?P<arg1>.*)\'.*Nothing to do',
+        'Package %(arg1)s has not been found in enabled Yum repos.'),
+    ('Execution of \'.*yum.*install (?P<arg1>.*)\'.*Nothing to do',
+        'Package %(arg1)s has not been found in enabled Yum repos.'),
+    # Packstack does not cooperate with jruby
+    ('jruby', 'Your Puppet installation uses jruby instead of ruby. Package '
+              'jruby does not cooperate with Packstack well. You will have to '
+              'fix this manually.'),
+]
+
+
+def validate_logfile(logpath):
+    """
+    Check given Puppet log file for errors and raise PuppetError if there is
+    any error
+    """
+    manifestpath = os.path.splitext(logpath)[0]
+    manifestfile = os.path.basename(manifestpath)
+    with open(logpath) as logfile:
+        for line in logfile:
+            line = line.strip()
+
+            if re_error.search(line) is None:
+                continue
+
+            error = re_color.sub('', line)  # remove colors
+            if re_ignore.search(line):
+                msg = ('Ignoring expected error during Puppet run %s: %s' %
+                       (manifestfile, error))
+                logger.debug(msg)
+                continue
+
+            for regex, surrogate in surrogates:
+                match = re.search(regex, error)
+                if match is None:
+                    continue
+
+                args = {}
+                num = 1
+                while True:
+                    try:
+                        args['arg%d' % num] = match.group(num)
+                        num += 1
+                    except IndexError:
+                        break
+                error = surrogate % args
+
+            message = ('Error appeared during Puppet run: %s\n%s\n'
+                       'You will find full trace in log %s' %
+                       (manifestfile, error, logpath))
+            raise PuppetError(message)
+
+
+def scan_logfile(logpath):
+    """
+    Returns list of packstack_info/packstack_warn notices parsed from
+    given puppet log file.
+    """
+    output = []
+    with open(logpath) as logfile:
+        for line in logfile:
+            match = re_notice.search(line)
+            if match:
+                output.append(match.group('message'))
+    return output

+ 0 - 0
packstack/plugins/__init__.py


+ 203 - 0
packstack/plugins/amqp_002.py

@@ -0,0 +1,203 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures AMQP
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import utils
+
+from packstack.modules.common import filtered_hosts
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- AMQP Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "AMQP"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "amqp-backend",
+         "PROMPT": "Set the AMQP service backend",
+         "OPTION_LIST": ["rabbitmq"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "rabbitmq",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_AMQP_BACKEND",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_AMQP_SERVER']},
+
+        {"CMD_OPTION": "amqp-host",
+         "PROMPT": "Enter the host for the AMQP service",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_ssh],
+         "DEFAULT_VALUE": utils.get_localhost_ip(),
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_AMQP_HOST",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "amqp-enable-ssl",
+         "PROMPT": "Enable SSL for the AMQP service?",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "n",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_AMQP_ENABLE_SSL",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "amqp-enable-auth",
+         "PROMPT": "Enable Authentication for the AMQP service?",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "n",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_AMQP_ENABLE_AUTH",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "AMQP",
+             "DESCRIPTION": "AMQP Config parameters",
+             "PRE_CONDITION": False,
+             "PRE_CONDITION_MATCH": True,
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+    params = [
+        {"CMD_OPTION": "amqp-nss-certdb-pw",
+         "PROMPT": "Enter the password for NSS certificate database",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_AMQP_NSS_CERTDB_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "AMQPSSL",
+             "DESCRIPTION": "AMQP Config SSL parameters",
+             "PRE_CONDITION": "CONFIG_AMQP_ENABLE_SSL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+    params = [
+        {"CMD_OPTION": "amqp-auth-user",
+         "PROMPT": "Enter the user for amqp authentication",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "amqp_user",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_AMQP_AUTH_USER",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "amqp-auth-password",
+         "PROMPT": "Enter the password for user authentication",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_not_empty],
+         "PROCESSORS": [processors.process_password],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_AMQP_AUTH_PASSWORD",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "AMQPAUTH",
+             "DESCRIPTION": "AMQP Config Athentication parameters",
+             "PRE_CONDITION": "CONFIG_AMQP_ENABLE_AUTH",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    amqpsteps = [
+        {'title': 'Preparing AMQP entries',
+         'functions': [create_manifest]}
+    ]
+    controller.addSequence("Installing AMQP", [], [], amqpsteps)
+
+
+# ------------------------ step functions -------------------------
+
+def create_manifest(config, messages):
+    server = utils.ScriptRunner(config['CONFIG_AMQP_HOST'])
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        config['CONFIG_AMQP_SSL_ENABLED'] = True
+        config['CONFIG_AMQP_PROTOCOL'] = 'ssl'
+        config['CONFIG_AMQP_CLIENTS_PORT'] = "5671"
+        amqp_host = config['CONFIG_AMQP_HOST']
+        service = 'AMQP'
+        ssl_key_file = '/etc/pki/tls/private/ssl_amqp.key'
+        ssl_cert_file = '/etc/pki/tls/certs/ssl_amqp.crt'
+        cacert = config['CONFIG_AMQP_SSL_CACERT_FILE'] = (
+            config['CONFIG_SSL_CACERT']
+        )
+        generate_ssl_cert(config, amqp_host, service, ssl_key_file,
+                          ssl_cert_file)
+    else:
+        # Set default values
+        config['CONFIG_AMQP_CLIENTS_PORT'] = "5672"
+        config['CONFIG_AMQP_SSL_ENABLED'] = False
+        config['CONFIG_AMQP_PROTOCOL'] = 'tcp'
+
+    if config['CONFIG_AMQP_ENABLE_AUTH'] == 'n':
+        config['CONFIG_AMQP_AUTH_PASSWORD'] = 'guest'
+        config['CONFIG_AMQP_AUTH_USER'] = 'guest'
+
+    if config['CONFIG_IP_VERSION'] == 'ipv6':
+        config['CONFIG_AMQP_HOST_URL'] = "[%s]" % config['CONFIG_AMQP_HOST']
+    else:
+        config['CONFIG_AMQP_HOST_URL'] = config['CONFIG_AMQP_HOST']
+
+    fw_details = dict()
+    # All hosts should be able to talk to amqp
+    for host in filtered_hosts(config, exclude=False):
+        key = "amqp_%s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % host
+        fw_details[key]['service_name'] = "amqp"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['5671', '5672']
+        fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_AMQP_RULES'] = fw_details

+ 117 - 0
packstack/plugins/aodh_810.py

@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Aodh
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Aodh Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Aodh"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    aodh_params = {
+        "AODH": [
+            {"CONF_NAME": "CONFIG_AODH_KS_PW",
+             "CMD_OPTION": "aodh-ks-passwd",
+             "PROMPT": "Enter the password for the Aodh Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "aodh-db-passwd",
+             "PROMPT": "Enter the password for the aodh DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_AODH_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+        ]
+    }
+
+    update_params_usage(basedefs.PACKSTACK_DOC, aodh_params)
+
+    def use_aodh(config):
+        return (config['CONFIG_CEILOMETER_INSTALL'] == 'y' and
+                config['CONFIG_AODH_INSTALL'] == 'y')
+
+    aodh_groups = [
+        {"GROUP_NAME": "AODH",
+         "DESCRIPTION": "Aodh Config parameters",
+         "PRE_CONDITION": use_aodh,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in aodh_groups:
+        paramList = aodh_params[group["GROUP_NAME"]]
+        controller.addGroup(group, paramList)
+
+
+def initSequences(controller):
+    if (controller.CONF['CONFIG_AODH_INSTALL'] != 'y' or
+       controller.CONF['CONFIG_CEILOMETER_INSTALL'] != 'y'):
+        return
+
+    steps = [{'title': 'Preparing Aodh entries',
+              'functions': [create_manifest]}]
+    controller.addSequence("Installing OpenStack Aodh", [], [],
+                           steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_cert_file = config['CONFIG_AODH_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_aodh.crt'
+        )
+        ssl_key_file = config['CONFIG_AODH_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_aodh.key'
+        )
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        service = 'aodh'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "aodh_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "aodh-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8042']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_AODH_RULES'] = fw_details

+ 261 - 0
packstack/plugins/ceilometer_800.py

@@ -0,0 +1,261 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Ceilometer
+"""
+
+import uuid
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Ceilometer Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Ceilometer"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    ceilometer_params = {
+        "CEILOMETER": [
+            {"CONF_NAME": "CONFIG_CEILOMETER_SECRET",
+             "CMD_OPTION": "ceilometer-secret",
+             "PROMPT": "Enter the Ceilometer secret key",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": uuid.uuid4().hex[:16],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CONF_NAME": "CONFIG_CEILOMETER_KS_PW",
+             "CMD_OPTION": "ceilometer-ks-passwd",
+             "PROMPT": "Enter the password for the Ceilometer Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "ceilometer-service-name",
+             "PROMPT": "Enter the Ceilometer service name.",
+             "OPTION_LIST": ['ceilometer', 'httpd'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "httpd",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_CEILOMETER_SERVICE_NAME',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CONF_NAME": "CONFIG_CEILOMETER_COORDINATION_BACKEND",
+             "CMD_OPTION": "ceilometer-coordination-backend",
+             "PROMPT": "Enter the coordination driver",
+             "OPTION_LIST": ['redis', 'none'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'redis',
+             "MASK_INPUT": False,
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CONF_NAME": "CONFIG_CEILOMETER_METERING_BACKEND",
+             "CMD_OPTION": "ceilometer-metering-backend",
+             "PROMPT": "Enter the metering backend to use",
+             "OPTION_LIST": ['database', 'gnocchi'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'database',
+             "MASK_INPUT": False,
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CONF_NAME": "CONFIG_CEILOMETER_EVENTS_BACKEND",
+             "CMD_OPTION": "ceilometer-events-backend",
+             "PROMPT": "Enter the events backend to use",
+             "OPTION_LIST": ['database', 'panko'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'database',
+             "MASK_INPUT": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MONGODB": [
+            {"CMD_OPTION": "mongodb-host",
+             "PROMPT": "Enter the host for the MongoDB server",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ssh],
+             "DEFAULT_VALUE": utils.get_localhost_ip(),
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_MONGODB_HOST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+        "REDIS": [
+            {"CMD_OPTION": "redis-host",
+             "PROMPT": "Enter the host for the Redis server",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ssh],
+             "DEFAULT_VALUE": utils.get_localhost_ip(),
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_REDIS_HOST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ["CONFIG_REDIS_MASTER_HOST"]},
+            {"CMD_OPTION": "redis-port",
+             "PROMPT": "Enter the port of the redis server(s)",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_port],
+             "DEFAULT_VALUE": 6379,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_REDIS_PORT",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, ceilometer_params)
+
+    ceilometer_groups = [
+        {"GROUP_NAME": "CEILOMETER",
+         "DESCRIPTION": "Ceilometer Config parameters",
+         "PRE_CONDITION": "CONFIG_CEILOMETER_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MONGODB",
+         "DESCRIPTION": "MONGODB Config parameters",
+         "PRE_CONDITION": "CONFIG_CEILOMETER_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "REDIS",
+         "DESCRIPTION": "Redis Config parameters",
+         "PRE_CONDITION": "CONFIG_CEILOMETER_COORDINATION_BACKEND",
+         "PRE_CONDITION_MATCH": "redis",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in ceilometer_groups:
+        paramList = ceilometer_params[group["GROUP_NAME"]]
+        controller.addGroup(group, paramList)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_CEILOMETER_INSTALL'] != 'y':
+        return
+
+    steps = [{'title': 'Preparing MongoDB entries',
+              'functions': [create_mongodb_manifest]},
+             {'title': 'Preparing Redis entries',
+              'functions': [create_redis_manifest]},
+             {'title': 'Preparing Ceilometer entries',
+              'functions': [create_manifest]}]
+    controller.addSequence("Installing OpenStack Ceilometer", [], [],
+                           steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_cert_file = config['CONFIG_CEILOMETER_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_ceilometer.crt'
+        )
+        ssl_key_file = config['CONFIG_CEILOMETER_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_ceilometer.key'
+        )
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        service = 'ceilometer'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "ceilometer_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "ceilometer-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8777']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_CEILOMETER_RULES'] = fw_details
+
+
+def create_mongodb_manifest(config, messages):
+    host = config['CONFIG_MONGODB_HOST']
+    if config['CONFIG_IP_VERSION'] == 'ipv6':
+        config['CONFIG_MONGODB_HOST_URL'] = "[%s]" % host
+    else:
+        config['CONFIG_MONGODB_HOST_URL'] = host
+
+    fw_details = dict()
+    key = "mongodb_server"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "%s" % config['CONFIG_CONTROLLER_HOST']
+    fw_details[key]['service_name'] = "mongodb-server"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['27017']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_MONGODB_RULES'] = fw_details
+
+
+def create_redis_manifest(config, messages):
+    if config['CONFIG_CEILOMETER_COORDINATION_BACKEND'] == 'redis':
+        redis_host = config['CONFIG_REDIS_HOST']
+        if config['CONFIG_IP_VERSION'] == 'ipv6':
+            config['CONFIG_REDIS_HOST_URL'] = "[%s]" % redis_host
+        else:
+            config['CONFIG_REDIS_HOST_URL'] = redis_host
+
+        # master
+        master_clients = set([config['CONFIG_CONTROLLER_HOST']])
+        config['FIREWALL_REDIS_RULES'] = _create_redis_firewall_rules(
+            master_clients, config['CONFIG_REDIS_PORT'])
+
+
+# ------------------------- helper functions -------------------------
+
+def _create_redis_firewall_rules(hosts, port):
+    fw_details = dict()
+    for host in hosts:
+        key = "redis service from %s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % host
+        fw_details[key]['service_name'] = "redis service"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = port
+        fw_details[key]['proto'] = "tcp"
+    return fw_details

+ 756 - 0
packstack/plugins/cinder_250.py

@@ -0,0 +1,756 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Cinder
+"""
+
+import re
+
+from packstack.installer import basedefs
+from packstack.installer import exceptions
+from packstack.installer import processors
+from packstack.installer import validators
+from packstack.installer.utils import split_hosts
+
+from packstack.installer import utils
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------------ Cinder Packstack Plugin initialization ------------------
+
+PLUGIN_NAME = "OS-Cinder"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+NETAPP_DEFAULT_STORAGE_FAMILY = "ontap_cluster"
+NETAPP_DEFAULT_STORAGE_PROTOCOL = "nfs"
+
+
+def initConfig(controller):
+    conf_params = {
+        "CINDER": [
+            {"CMD_OPTION": "cinder-db-passwd",
+             "PROMPT": "Enter the password for the Cinder DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": 'cinder-db-purge-enable',
+             "PROMPT": (
+                 "Enter y if cron job for removing soft deleted DB rows "
+                 "should be created"
+             ),
+             "OPTION_LIST": ['y', 'n'],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_bool],
+             "DEFAULT_VALUE": 'y',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_CINDER_DB_PURGE_ENABLE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "cinder-ks-passwd",
+             "PROMPT": "Enter the password for the Cinder Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_KS_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "cinder-backend",
+             "PROMPT": "Enter the Cinder backend to be configured",
+             "OPTION_LIST": ["lvm", "gluster", "nfs", "vmdk", "netapp",
+                             "solidfire"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "lvm",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_BACKEND",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERVOLUMECREATE": [
+            {"CMD_OPTION": "cinder-volumes-create",
+             "PROMPT": ("Should Cinder's volumes group be created (for "
+                        "proof-of-concept installation)?"),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_VOLUMES_CREATE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERVOLUMESIZE": [
+            {"CMD_OPTION": "cinder-volumes-size",
+             "PROMPT": "Enter Cinder's volumes group usable size",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "20G",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_VOLUMES_SIZE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERGLUSTERMOUNTS": [
+            {"CMD_OPTION": "cinder-gluster-mounts",
+             "PROMPT": ("Enter a single or comma separated list of gluster "
+                        "volume shares to use with Cinder"),
+             "OPTION_LIST": ["^([\d]{1,3}\.){3}[\d]{1,3}:/.*",
+                             "^[a-zA-Z0-9][\-\.\w]*:/.*"],
+             "VALIDATORS": [validators.validate_multi_regexp],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_GLUSTER_MOUNTS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNFSMOUNTS": [
+            {"CMD_OPTION": "cinder-nfs-mounts",
+             "PROMPT": ("Enter a single or comma seprated list of NFS exports "
+                        "to use with Cinder"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_multi_export],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NFS_MOUNTS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPMAIN": [
+            {"CMD_OPTION": "cinder-netapp-login",
+             "PROMPT": ("Enter a NetApp login"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_LOGIN",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-password",
+             "PROMPT": ("Enter a NetApp password"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_PASSWORD",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-hostname",
+             "PROMPT": ("Enter a NetApp hostname"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_add_quotes_around_values],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_HOSTNAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-server-port",
+             "PROMPT": ("Enter a NetApp server port"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_port],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": 80,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_SERVER_PORT",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-storage-family",
+             "PROMPT": ("Enter a NetApp storage family"),
+             "OPTION_LIST": ["ontap_7mode", "ontap_cluster", "eseries"],
+             "VALIDATORS": [validators.validate_options],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": NETAPP_DEFAULT_STORAGE_FAMILY,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_STORAGE_FAMILY",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-transport-type",
+             "PROMPT": ("Enter a NetApp transport type"),
+             "OPTION_LIST": ["http", "https"],
+             "VALIDATORS": [validators.validate_options],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "http",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_TRANSPORT_TYPE",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-storage-protocol",
+             "PROMPT": ("Enter a NetApp storage protocol"),
+             "OPTION_LIST": ["iscsi", "fc", "nfs"],
+             "VALIDATORS": [validators.validate_options],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": NETAPP_DEFAULT_STORAGE_PROTOCOL,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPONTAPISCSI": [
+            {"CMD_OPTION": "cinder-netapp-size-multiplier",
+             "PROMPT": ("Enter a NetApp size multiplier"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "1.0",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPNFS": [
+            {"CMD_OPTION": "cinder-netapp-expiry-thres-minutes",
+             "PROMPT": ("Enter a threshold"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_integer],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": 720,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-thres-avl-size-perc-start",
+             "PROMPT": ("Enter a value"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_integer],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": 20,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-thres-avl-size-perc-stop",
+             "PROMPT": ("Enter a value"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_integer],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": 60,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-nfs-shares",
+             "PROMPT": ("Enter a single or comma-separated list of NetApp NFS shares"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_NFS_SHARES",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-nfs-shares-config",
+             "PROMPT": ("Enter a NetApp NFS share config file"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "/etc/cinder/shares.conf",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPISCSI7MODE": [
+            {"CMD_OPTION": "cinder-netapp-volume-list",
+             "PROMPT": ("Enter a NetApp volume list"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_VOLUME_LIST",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-vfiler",
+             "PROMPT": ("Enter a NetApp vFiler"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_VFILER",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPP7MODEFC": [
+            {"CMD_OPTION": "cinder-netapp-partner-backend-name",
+             "PROMPT": ("Enter a NetApp partner backend name"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPVSERVER": [
+            {"CMD_OPTION": "cinder-netapp-vserver",
+             "PROMPT": ("Enter a NetApp Vserver"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_VSERVER",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "CINDERNETAPPESERIES": [
+            {"CMD_OPTION": "cinder-netapp-controller-ips",
+             "PROMPT": ("Enter a value"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_multi_ping],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_CONTROLLER_IPS",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-sa-password",
+             "PROMPT": ("Enter a password"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_SA_PASSWORD",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-eseries-host-type",
+             "PROMPT": ("Enter a host type"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "linux_dm_mp",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-webservice-path",
+             "PROMPT": ("Enter a path"),
+             "OPTION_LIST": ["^[/].*$"],
+             "VALIDATORS": [validators.validate_regexp],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "/devmgr/v2",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_WEBSERVICE_PATH",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-netapp-storage-pools",
+             "PROMPT": ("Enter a value"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_CINDER_NETAPP_STORAGE_POOLS",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            ],
+
+        "CINDERSOLIDFIRE": [
+            {"CMD_OPTION": "cinder-solidfire-login",
+             "PROMPT": ("Enter the cluster admin login"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_SOLIDFIRE_LOGIN",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-solidfire-password",
+             "PROMPT": ("Enter cluster admin password"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_SOLIDFIRE_PASSWORD",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+            {"CMD_OPTION": "cinder-solidfire-hostname",
+             "PROMPT": ("Enter a SolidFire hostname or IP"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_add_quotes_around_values],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_CINDER_SOLIDFIRE_HOSTNAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            ]
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, conf_params)
+
+    conf_groups = [
+        {"GROUP_NAME": "CINDER",
+         "DESCRIPTION": "Cinder Config parameters",
+         "PRE_CONDITION": "CONFIG_CINDER_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERVOLUMECREATE",
+         "DESCRIPTION": "Cinder volume create Config parameters",
+         "PRE_CONDITION": check_lvm_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERVOLUMESIZE",
+         "DESCRIPTION": "Cinder volume size Config parameters",
+         "PRE_CONDITION": check_lvm_vg_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERGLUSTERMOUNTS",
+         "DESCRIPTION": "Cinder gluster Config parameters",
+         "PRE_CONDITION": check_gluster_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNFSMOUNTS",
+         "DESCRIPTION": "Cinder NFS Config parameters",
+         "PRE_CONDITION": check_nfs_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPMAIN",
+         "DESCRIPTION": "Cinder NetApp main configuration",
+         "PRE_CONDITION": check_netapp_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPONTAPISCSI",
+         "DESCRIPTION": "Cinder NetApp ONTAP-iSCSI configuration",
+         "PRE_CONDITION": check_netapp_ontap_iscsi_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPNFS",
+         "DESCRIPTION": "Cinder NetApp NFS configuration",
+         "PRE_CONDITION": check_netapp_nfs_settings,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPISCSI7MODE",
+         "DESCRIPTION": "Cinder NetApp iSCSI & 7-mode configuration",
+         "PRE_CONDITION": check_netapp_7modeiscsi_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPP7MODEFC",
+         "DESCRIPTION": "Cinder NetApp 7-mode Fibre Channel configuration",
+         "PRE_CONDITION": check_netapp_7mode_fc_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPVSERVER",
+         "DESCRIPTION": "Cinder NetApp Vserver configuration",
+         "PRE_CONDITION": check_netapp_vserver_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERNETAPPESERIES",
+         "DESCRIPTION": "Cinder NetApp E-Series configuration",
+         "PRE_CONDITION": check_netapp_eseries_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "CINDERSOLIDFIRE",
+         "DESCRIPTION": "Cinder SolidFire configuration",
+         "PRE_CONDITION": check_solidfire_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True}
+        ]
+    for group in conf_groups:
+        params = conf_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    config = controller.CONF
+    if config['CONFIG_CINDER_INSTALL'] != 'y':
+        return
+
+    config['CONFIG_CINDER_BACKEND'] = (
+        [i.strip() for i in config['CONFIG_CINDER_BACKEND'].split(',') if i]
+    )
+
+    for key in ('CONFIG_CINDER_NETAPP_VOLUME_LIST',
+                'CONFIG_CINDER_GLUSTER_MOUNTS',
+                'CONFIG_CINDER_NFS_MOUNTS'):
+        if key in config:
+            config[key] = [i.strip() for i in config[key].split(',') if i]
+
+    cinder_steps = []
+
+    if 'lvm' in config['CONFIG_CINDER_BACKEND']:
+        cinder_steps.append(
+            {'title': 'Checking if the Cinder server has a cinder-volumes vg',
+             'functions': [check_cinder_vg]})
+
+    cinder_steps.append(
+        {'title': 'Preparing Cinder entries',
+         'functions': [create_manifest]}
+    )
+    controller.addSequence("Installing OpenStack Cinder", [], [], cinder_steps)
+
+
+# ------------------------- helper functions -------------------------
+
+def check_lvm_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'lvm' in config['CONFIG_CINDER_BACKEND'])
+
+
+def check_lvm_vg_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'lvm' in config['CONFIG_CINDER_BACKEND'])
+
+
+def check_gluster_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'gluster' in config['CONFIG_CINDER_BACKEND'])
+
+
+def check_nfs_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'nfs' in config['CONFIG_CINDER_BACKEND'])
+
+
+def check_netapp_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'netapp' in config['CONFIG_CINDER_BACKEND'])
+
+
+def check_netapp_ontap_iscsi_options(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_FAMILY'] in
+            ['ontap_cluster', 'ontap_7mode'] and
+            config['CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'] == "iscsi")
+
+
+def check_netapp_nfs_settings(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'] == "nfs")
+
+
+def check_netapp_7modeiscsi_options(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_FAMILY'] == 'ontap_7mode' and
+            config['CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'] == 'iscsi')
+
+
+def check_netapp_7mode_fc_options(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_FAMILY'] == "ontap_7mode"
+            and config['CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'] == "fc")
+
+
+def check_netapp_vserver_options(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_FAMILY'] == "ontap_cluster"
+            and config['CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'] in
+            ['nfs', 'iscsi'])
+
+
+def check_netapp_eseries_options(config):
+    return (check_netapp_options(config) and
+            config['CONFIG_CINDER_NETAPP_STORAGE_FAMILY'] == "eseries")
+
+
+def check_solidfire_options(config):
+    return (config['CONFIG_CINDER_INSTALL'] == 'y' and
+            'solidfire' in config['CONFIG_CINDER_BACKEND'])
+
+
+# -------------------------- step functions --------------------------
+
+def check_cinder_vg(config, messages):
+    cinders_volume = 'cinder-volumes'
+
+    # Do we have a cinder-volumes vg?
+    have_cinders_volume = False
+    server = utils.ScriptRunner(config['CONFIG_STORAGE_HOST'])
+    server.append('vgdisplay %s' % cinders_volume)
+    try:
+        server.execute()
+        have_cinders_volume = True
+    except exceptions.ScriptRuntimeError:
+        pass
+
+    if config["CONFIG_CINDER_VOLUMES_CREATE"] == "n":
+        if not have_cinders_volume:
+            raise exceptions.MissingRequirements("The cinder server should "
+                                                 "contain a cinder-volumes "
+                                                 "volume group")
+    match = re.match('^(?P<size>\d+)G$',
+                     config['CONFIG_CINDER_VOLUMES_SIZE'].strip())
+    if not match:
+        msg = 'Invalid Cinder volumes VG size.'
+        raise exceptions.ParamValidationError(msg)
+
+    cinders_volume_size = int(match.group('size')) * 1024
+    cinders_reserve = int(cinders_volume_size * 0.03)
+
+    cinders_volume_size = cinders_volume_size + cinders_reserve
+    config['CONFIG_CINDER_VOLUMES_SIZE'] = '%sM' % cinders_volume_size
+
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_STORAGE_HOST']
+        ssl_cert_file = config['CONFIG_CINDER_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_cinder.crt'
+        )
+        ssl_key_file = config['CONFIG_CINDER_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_cinder.key'
+        )
+        service = 'cinder'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    for host in split_hosts(config['CONFIG_COMPUTE_HOSTS']):
+        if (config['CONFIG_NOVA_INSTALL'] == 'y' and
+                config['CONFIG_VMWARE_BACKEND'] == 'n'):
+            key = "cinder_%s" % host
+            fw_details.setdefault(key, {})
+            fw_details[key]['host'] = "%s" % host
+        else:
+            key = "cinder_all"
+            fw_details.setdefault(key, {})
+            fw_details[key]['host'] = "ALL"
+
+        fw_details[key]['service_name'] = "cinder"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['3260']
+        fw_details[key]['proto'] = "tcp"
+
+    config['FIREWALL_CINDER_RULES'] = fw_details
+
+    # cinder API should be open for everyone
+    fw_details = dict()
+    key = "cinder_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "cinder-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8776']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_CINDER_API_RULES'] = fw_details

+ 211 - 0
packstack/plugins/dashboard_500.py

@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures OpenStack Horizon
+"""
+
+import os
+import uuid
+
+from packstack.installer import basedefs
+from packstack.installer import exceptions
+from packstack.installer import utils
+from packstack.installer import validators
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+from packstack.modules.ospluginutils import deliver_ssl_file
+
+# ------------- Horizon Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Horizon"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "os-horizon-ssl",
+         "PROMPT": "Would you like to set up Horizon communication over https",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "n",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_HORIZON_SSL",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-horizon-secretkey",
+         "PROMPT": "Horizon Secret Encryption Key",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": uuid.uuid4().hex,
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HORIZON_SECRET_KEY",
+         "USE_DEFAULT": True,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "OSHORIZON",
+             "DESCRIPTION": "OpenStack Horizon Config parameters",
+             "PRE_CONDITION": "CONFIG_HORIZON_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+    params = [
+        {"CMD_OPTION": "os-ssl-cert",
+         "PROMPT": ("Enter the path to a PEM encoded certificate to be used "
+                    "on the https server, leave blank if one should be "
+                    "generated, this certificate should not require "
+                    "a passphrase"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [],
+         "DEFAULT_VALUE": '',
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_HORIZON_SSL_CERT",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_SSL_CERT']},
+
+        {"CMD_OPTION": "os-ssl-key",
+         "PROMPT": ("Enter the SSL keyfile corresponding to the certificate "
+                    "if one was entered"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [],
+         "DEFAULT_VALUE": "",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_HORIZON_SSL_KEY",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_SSL_KEY']},
+
+        {"CMD_OPTION": "os-ssl-cachain",
+         "PROMPT": ("Enter the CA chain file corresponding to the certificate "
+                    "if one was entered"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [],
+         "DEFAULT_VALUE": "",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_HORIZON_SSL_CACERT",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_SSL_CACHAIN']},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "OSSSL",
+             "DESCRIPTION": "SSL Config parameters",
+             "PRE_CONDITION": "CONFIG_HORIZON_SSL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_HORIZON_INSTALL'] != 'y':
+        return
+
+    steps = [
+        {'title': 'Preparing Horizon entries',
+         'functions': [create_manifest]}
+    ]
+    controller.addSequence("Installing OpenStack Horizon", [], [], steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    horizon_host = config['CONFIG_CONTROLLER_HOST']
+
+    proto = "http"
+    config["CONFIG_HORIZON_PORT"] = 80
+    sslmanifestdata = ''
+    if config["CONFIG_HORIZON_SSL"] == 'y':
+        config["CONFIG_HORIZON_PORT"] = 443
+        proto = "https"
+
+        # Are we using the users cert/key files
+        if config["CONFIG_HORIZON_SSL_CERT"]:
+            ssl_cert_file = config["CONFIG_HORIZON_SSL_CERT"]
+            ssl_key_file = config["CONFIG_HORIZON_SSL_KEY"]
+            ssl_chain_file = config["CONFIG_HORIZON_SSL_CACERT"]
+
+            if not os.path.exists(ssl_cert_file):
+                raise exceptions.ParamValidationError(
+                    "The file %s doesn't exist" % ssl_cert_file)
+
+            if not os.path.exists(ssl_key_file):
+                raise exceptions.ParamValidationError(
+                    "The file %s doesn't exist" % ssl_key_file)
+
+            if not os.path.exists(ssl_chain_file):
+                raise exceptions.ParamValidationError(
+                    "The file %s doesn't exist" % ssl_chain_file)
+
+            final_cert = open(ssl_cert_file, 'rt').read()
+            final_key = open(ssl_key_file, 'rt').read()
+            final_cacert = open(ssl_chain_file, 'rt').read()
+            host = config['CONFIG_CONTROLLER_HOST']
+            deliver_ssl_file(final_cacert, ssl_chain_file, host)
+            deliver_ssl_file(final_cert, ssl_cert_file, host)
+            deliver_ssl_file(final_key, ssl_key_file, host)
+
+        else:
+            ssl_cert_file = config["CONFIG_HORIZON_SSL_CERT"] = (
+                '/etc/pki/tls/certs/ssl_dashboard.crt'
+            )
+            ssl_key_file = config["CONFIG_HORIZON_SSL_KEY"] = (
+                '/etc/pki/tls/private/ssl_dashboard.key'
+            )
+            cacert = config['CONFIG_SSL_CACERT']
+            config["CONFIG_HORIZON_SSL_CACERT"] = cacert
+            ssl_host = config['CONFIG_CONTROLLER_HOST']
+            service = 'dashboard'
+            generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                              ssl_cert_file)
+            messages.append(
+                "%sNOTE%s : A certificate was generated to be used for ssl, "
+                "You should change the ssl certificate configured in "
+                "/etc/httpd/conf.d/ssl.conf on %s to use a CA signed cert."
+                % (utils.COLORS['red'], utils.COLORS['nocolor'], horizon_host))
+
+    config["CONFIG_HORIZON_NEUTRON_LB"] = False
+    config["CONFIG_HORIZON_NEUTRON_FW"] = False
+    config["CONFIG_HORIZON_NEUTRON_VPN"] = False
+
+    if config['CONFIG_NEUTRON_INSTALL'] == 'y':
+        if config["CONFIG_LBAAS_INSTALL"] == 'y':
+            config["CONFIG_HORIZON_NEUTRON_LB"] = True
+        if config["CONFIG_NEUTRON_FWAAS"] == 'y':
+            config["CONFIG_HORIZON_NEUTRON_FW"] = True
+        if config["CONFIG_NEUTRON_VPNAAS"] == 'y':
+            config["CONFIG_HORIZON_NEUTRON_VPN"] = True
+
+    msg = ("To access the OpenStack Dashboard browse to %s://%s/dashboard .\n"
+           "Please, find your login credentials stored in the keystonerc_admin"
+           " in your home directory."
+           % (proto, config['CONFIG_CONTROLLER_HOST']))
+    messages.append(msg)

+ 130 - 0
packstack/plugins/glance_200.py

@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Glance
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import utils
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Glance Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Glance"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "glance-db-passwd",
+         "PROMPT": "Enter the password for the Glance DB access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "PROCESSORS": [processors.process_password],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_GLANCE_DB_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "glance-ks-passwd",
+         "PROMPT": "Enter the password for the Glance Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "PROCESSORS": [processors.process_password],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_GLANCE_KS_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "glance-backend",
+         "PROMPT": "Glance storage backend",
+         "OPTION_LIST": ["file", "swift"],
+         "VALIDATORS": [validators.validate_options],
+         "PROCESSORS": [process_backend],
+         "DEFAULT_VALUE": "file",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_GLANCE_BACKEND",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "GLANCE",
+             "DESCRIPTION": "Glance Config parameters",
+             "PRE_CONDITION": "CONFIG_GLANCE_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    conf = controller.CONF
+    if conf['CONFIG_GLANCE_INSTALL'] != 'y':
+        if conf['CONFIG_NOVA_INSTALL'] == 'y':
+            raise RuntimeError('Glance is required to install Nova properly. '
+                               'Please set CONFIG_GLANCE_INSTALL=y')
+        return
+
+    glancesteps = [
+        {'title': 'Preparing Glance entries',
+         'functions': [create_manifest]}
+    ]
+    controller.addSequence("Installing OpenStack Glance", [], [], glancesteps)
+
+
+# ------------------------- helper functions -------------------------
+
+def process_backend(value, param_name, config):
+    if value == 'swift' and config['CONFIG_SWIFT_INSTALL'] != 'y':
+        return 'file'
+    return value
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_STORAGE_HOST']
+        ssl_cert_file = config['CONFIG_GLANCE_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_glance.crt'
+        )
+        ssl_key_file = config['CONFIG_GLANCE_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_glance.key'
+        )
+        service = 'glance'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "glance_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "glance"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['9292']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_GLANCE_RULES'] = fw_details

+ 103 - 0
packstack/plugins/gnocchi_790.py

@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Gnocchi
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Gnocchi Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Gnocchi"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    gnocchi_params = {
+        "GNOCCHI": [
+            {"CONF_NAME": "CONFIG_GNOCCHI_DB_PW",
+             "CMD_OPTION": "gnocchi-db-passwd",
+             "PROMPT": "Enter the password for Gnocchi DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+            {"CONF_NAME": "CONFIG_GNOCCHI_KS_PW",
+             "CMD_OPTION": "gnocchi-ks-passwd",
+             "PROMPT": "Enter the password for the Gnocchi Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False}
+        ]
+    }
+
+    update_params_usage(basedefs.PACKSTACK_DOC, gnocchi_params)
+
+    def use_gnocchi(config):
+        return (config['CONFIG_CEILOMETER_INSTALL'] == 'y' and
+                config['CONFIG_GNOCCHI_INSTALL'] == 'y')
+
+    gnocchi_groups = [
+        {"GROUP_NAME": "GNOCCHI",
+         "DESCRIPTION": "Gnocchi Config parameters",
+         "PRE_CONDITION": use_gnocchi,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in gnocchi_groups:
+        paramList = gnocchi_params[group["GROUP_NAME"]]
+        controller.addGroup(group, paramList)
+
+
+def initSequences(controller):
+    if (controller.CONF['CONFIG_GNOCCHI_INSTALL'] != 'y' or
+       controller.CONF['CONFIG_CEILOMETER_INSTALL'] != 'y'):
+        return
+
+    steps = [{'title': 'Preparing Gnocchi entries',
+              'functions': [create_manifest]}]
+    controller.addSequence("Installing OpenStack Gnocchi", [], [],
+                           steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    fw_details = dict()
+    key = "gnocchi_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "gnocchi-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8041']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_GNOCCHI_RULES'] = fw_details

+ 216 - 0
packstack/plugins/heat_650.py

@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Heat
+"""
+
+import uuid
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Heat Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Heat"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    parameters = [
+        {"CMD_OPTION": "os-heat-mysql-password",
+         "PROMPT": "Enter the password for the Heat DB user",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_DB_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "heat-auth-encryption-key",
+         "PROMPT": ("Enter the authentication key for Heat to use for "
+                    "authenticate info in database (16, 24, or 32 chars)"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": uuid.uuid4().hex[:16],
+         "PROCESSORS": [processors.process_string_nofloat],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_AUTH_ENC_KEY",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-ks-passwd",
+         "PROMPT": "Enter the password for the Heat Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_KS_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-cloudwatch-install",
+         "PROMPT": "Should Packstack install Heat CloudWatch API",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "n",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_CLOUDWATCH_INSTALL",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-cfn-install",
+         "PROMPT": "Should Packstack install Heat CloudFormation API",
+         "OPTION_LIST": ["y", "n"],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "y",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_CFN_INSTALL",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-domain",
+         "PROMPT": "Enter name of Keystone domain for Heat",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "heat",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_DOMAIN",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-domain-admin",
+         "PROMPT": "Enter name of Keystone domain admin user for Heat",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "heat_admin",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_DOMAIN_ADMIN",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-heat-domain-password",
+         "PROMPT": "Enter password for Keystone domain admin user for Heat",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_HEAT_DOMAIN_PASSWORD",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, parameters, sectioned=False)
+
+    group = {"GROUP_NAME": "Heat",
+             "DESCRIPTION": "Heat Config parameters",
+             "PRE_CONDITION": "CONFIG_HEAT_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, parameters)
+
+
+def initSequences(controller):
+    config = controller.CONF
+    if config['CONFIG_HEAT_INSTALL'] != 'y':
+        return
+    steps = [
+        {'title': 'Preparing Heat entries',
+         'functions': [create_manifest]},
+    ]
+
+    if config.get('CONFIG_HEAT_CLOUDWATCH_INSTALL', 'n') == 'y':
+        steps.append(
+            {'title': 'Preparing Heat CloudWatch API entries',
+             'functions': [create_cloudwatch_manifest]})
+    if config.get('CONFIG_HEAT_CFN_INSTALL', 'n') == 'y':
+        steps.append(
+            {'title': 'Preparing Heat CloudFormation API entries',
+             'functions': [create_cfn_manifest]})
+    controller.addSequence("Installing Heat", [], [], steps)
+
+
+# ------------------------ step functions -------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        ssl_cert_file = config['CONFIG_HEAT_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_heat.crt'
+        )
+        ssl_key_file = config['CONFIG_HEAT_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_heat.key'
+        )
+        service = 'heat'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "heat"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "heat"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8004']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_HEAT_RULES'] = fw_details
+
+
+def create_cloudwatch_manifest(config, messages):
+    fw_details = dict()
+    key = "heat_api_cloudwatch"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "heat api cloudwatch"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8003']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_HEAT_CLOUDWATCH_RULES'] = fw_details
+
+
+def create_cfn_manifest(config, messages):
+    fw_details = dict()
+    key = "heat_cfn"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "heat cfn"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8000']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_HEAT_CFN_RULES'] = fw_details

+ 108 - 0
packstack/plugins/ironic_275.py

@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Ironic
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------------ Ironic Packstack Plugin initialization ------------------
+
+PLUGIN_NAME = "OS-Ironic"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    ironic_params = [
+        {"CONF_NAME": "CONFIG_IRONIC_DB_PW",
+         "CMD_OPTION": "os-ironic-db-passwd",
+         "PROMPT": "Enter the password for the Ironic DB user",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_IRONIC_KS_PW",
+         "CMD_OPTION": "os-ironic-ks-passwd",
+         "PROMPT": "Enter the password for Ironic Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, ironic_params, sectioned=False)
+    ironic_group = {"GROUP_NAME": "IRONIC",
+                    "DESCRIPTION": "Ironic Options",
+                    "PRE_CONDITION": "CONFIG_IRONIC_INSTALL",
+                    "PRE_CONDITION_MATCH": "y",
+                    "POST_CONDITION": False,
+                    "POST_CONDITION_MATCH": True}
+
+    controller.addGroup(ironic_group, ironic_params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_IRONIC_INSTALL'] != 'y':
+        return
+
+    steps = [
+        {'title': 'Preparing Ironic entries',
+         'functions': [create_manifest]},
+    ]
+
+    controller.addSequence("Installing OpenStack Ironic", [], [],
+                           steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        ssl_cert_file = config['CONFIG_IRONIC_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_ironic.crt'
+        )
+        ssl_key_file = config['CONFIG_IRONIC_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_ironic.key'
+        )
+        service = 'ironic'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "ironic-api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "ironic-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['6385']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_IRONIC_API_RULES'] = fw_details

+ 791 - 0
packstack/plugins/keystone_100.py

@@ -0,0 +1,791 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Keystone
+"""
+
+import uuid
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import utils
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Keystone Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Keystone"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    keystone_params = {
+        "KEYSTONE": [  # base keystone options
+            {"CMD_OPTION": "keystone-db-passwd",
+             "PROMPT": "Enter the password for the Keystone DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_password],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": 'keystone-db-purge-enable',
+             "PROMPT": (
+                 "Enter y if cron job for removing soft deleted DB rows "
+                 "should be created"
+             ),
+             "OPTION_LIST": ['y', 'n'],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_bool],
+             "DEFAULT_VALUE": 'y',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_DB_PURGE_ENABLE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-region",
+             "PROMPT": "Region name",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "RegionOne",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_REGION",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-admin-token",
+             "PROMPT": "The token to use for the Keystone service api",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": uuid.uuid4().hex,
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_ADMIN_TOKEN",
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-admin-email",
+             "PROMPT": "Enter the email address for the Keystone admin user",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "root@localhost",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_ADMIN_EMAIL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-admin-username",
+             "PROMPT": "Enter the username for the Keystone admin user",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "admin",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_ADMIN_USERNAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-admin-passwd",
+             "PROMPT": "Enter the password for the Keystone admin user",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_ADMIN_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-demo-passwd",
+             "PROMPT": "Enter the password for the Keystone demo user",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_KEYSTONE_DEMO_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-api-version",
+             "PROMPT": "Enter the Keystone API version string.",
+             "OPTION_LIST": ['v2.0', 'v3'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'v3',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_API_VERSION',
+             "USE_DEFAULT": True,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-token-format",
+             "PROMPT": "Enter the Keystone token format.",
+             "OPTION_LIST": ['UUID', 'PKI', 'FERNET'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'FERNET',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_TOKEN_FORMAT',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-identity-backend",
+             "PROMPT": "Enter the Keystone identity backend type.",
+             "OPTION_LIST": ['sql', 'ldap'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "sql",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_IDENTITY_BACKEND',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False}
+        ],
+
+        "KEYSTONE_LDAP": [  # keystone ldap identity backend options
+            {"CMD_OPTION": "keystone-ldap-url",
+             "PROMPT": "Enter the Keystone LDAP backend URL.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ldap_url],
+             "DEFAULT_VALUE": host_to_ldap_url(utils.get_localhost_ip()),
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_URL',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-dn",
+             "PROMPT": "Enter the Keystone LDAP user DN.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ldap_dn],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_DN',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-password",
+             "PROMPT": "Enter the Keystone LDAP user password.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_PASSWORD',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-suffix",
+             "PROMPT": "Enter the Keystone LDAP suffix.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty,
+                            validators.validate_ldap_dn],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_SUFFIX',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-query-scope",
+             "PROMPT": "Enter the Keystone LDAP query scope.",
+             "OPTION_LIST": ['base', 'one', 'sub'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "one",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_QUERY_SCOPE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-page-size",
+             "PROMPT": "Enter the Keystone LDAP query page size.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_integer],
+             "DEFAULT_VALUE": "-1",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_PAGE_SIZE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-subtree",
+             "PROMPT": "Enter the Keystone LDAP user subtree.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty,
+                            validators.validate_ldap_dn],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_SUBTREE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-filter",
+             "PROMPT": "Enter the Keystone LDAP user query filter.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_FILTER',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-objectclass",
+             "PROMPT": "Enter the Keystone LDAP user objectclass.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-id-attribute",
+             "PROMPT": "Enter the Keystone LDAP user ID attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-name-attribute",
+             "PROMPT": "Enter the Keystone LDAP user name attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-mail-attribute",
+             "PROMPT": "Enter the Keystone LDAP user email address attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-enabled-attribute",
+             "PROMPT": "Enter the Keystone LDAP user enabled attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-enabled-mask",
+             "PROMPT": "Enter the Keystone LDAP user enabled mask.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_integer],
+             "DEFAULT_VALUE": "-1",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-enabled-default",
+             "PROMPT": "Enter the Keystone LDAP user enabled default.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "TRUE",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-enabled-invert",
+             "PROMPT": "Enter the Keystone LDAP user enabled invert (n or y).",
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-attribute-ignore",
+             "PROMPT": (
+                 "Enter the comma separated Keystone LDAP user "
+                 "attributes to ignore."
+             ),
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-default-project-id-attribute",
+             "PROMPT": (
+                 "Enter the Keystone LDAP user default_project_id attribute."
+             ),
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME":
+             'CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-allow-create",
+             "PROMPT": (
+                 "Do you want to allow user create through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-allow-update",
+             "PROMPT": (
+                 "Do you want to allow user update through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-allow-delete",
+             "PROMPT": (
+                 "Do you want to allow user delete through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-pass-attribute",
+             "PROMPT": "Enter the Keystone LDAP user password attribute.",
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-enabled-emulation-dn",
+             "PROMPT": "Enter the Keystone LDAP enabled emulation DN.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ldap_dn],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-user-additional-attribute-mapping",
+             "PROMPT": (
+                 "Enter the comma separated Keystone LDAP user additional "
+                 "attribute mappings in the form "
+                 "ldap_attr:user_attr[,ldap_attr:user_attr]...."
+             ),
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME":
+             'CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-subtree",
+             "PROMPT": "Enter the Keystone LDAP group subtree.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty,
+                            validators.validate_ldap_dn],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-filter",
+             "PROMPT": "Enter the Keystone LDAP group query filter.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_FILTER',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-objectclass",
+             "PROMPT": "Enter the Keystone LDAP group objectclass.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-id-attribute",
+             "PROMPT": "Enter the Keystone LDAP group ID attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-name-attribute",
+             "PROMPT": "Enter the Keystone LDAP group name attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-member-attribute",
+             "PROMPT": "Enter the Keystone LDAP group member attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-desc-attribute",
+             "PROMPT": "Enter the Keystone LDAP group description attribute.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-attribute-ignore",
+             "PROMPT": (
+                 "Enter the comma separated Keystone LDAP group "
+                 "attributes to ignore."
+             ),
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-allow-create",
+             "PROMPT": (
+                 "Do you want to allow group create through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-allow-update",
+             "PROMPT": (
+                 "Do you want to allow group update through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-allow-delete",
+             "PROMPT": (
+                 "Do you want to allow group delete through Keystone (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-group-additional-attribute-mapping",
+             "PROMPT": (
+                 "Enter the comma separated Keystone LDAP group additional "
+                 "attribute mappings in the form "
+                 "ldap_attr:group_attr[,ldap_attr:group_attr]...."
+             ),
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME":
+             'CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-use-tls",
+             "PROMPT": (
+                 "Enable TLS for Keystone communicating with "
+                 "LDAP servers (n or y)."
+             ),
+             "OPTION_LIST": ['n', 'y'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'n',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_USE_TLS',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-tls-cacertdir",
+             "PROMPT": "CA Certificate directory for Keystone LDAP.",
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-tls-cacertfile",
+             "PROMPT": "CA Certificate file for Keystone LDAP.",
+             "OPTION_LIST": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "keystone-ldap-tls-req-cert",
+             "PROMPT": (
+                 "Keystone LDAP certificate checking strictness "
+                 "(never, allow, demand)"
+             ),
+             "OPTION_LIST": ["never", "allow", "demand"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "demand",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False}
+        ]
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, keystone_params)
+    keystone_groups = [
+        {"GROUP_NAME": "KEYSTONE",
+         "DESCRIPTION": "Keystone Config parameters",
+         "PRE_CONDITION": lambda x: 'yes',
+         "PRE_CONDITION_MATCH": "yes",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "KEYSTONE_LDAP",
+         "DESCRIPTION": "Keystone LDAP Identity Backend Config parameters",
+         "PRE_CONDITION": 'CONFIG_KEYSTONE_IDENTITY_BACKEND',
+         "PRE_CONDITION_MATCH": "ldap",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True}
+    ]
+    for group in keystone_groups:
+        params = keystone_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    keystonesteps = [
+        {'title':
+         'Fixing Keystone LDAP config parameters to be undef if empty',
+         'functions': [munge_ldap_config_params]},
+        {'title': 'Preparing Keystone entries',
+         'functions': [create_manifest]},
+    ]
+    controller.addSequence("Installing OpenStack Keystone", [], [],
+                           keystonesteps)
+
+
+# ------------------------- helper functions -------------------------
+
+def host_to_ldap_url(hostfqdn):
+    """Converts a host fqdn into an appropriate default
+    LDAP URL.
+    """
+    return "ldap://%s" % hostfqdn
+
+
+# -------------------------- step functions --------------------------
+
+def munge_ldap_config_params(config, messages):
+    def is_bool(keyname):
+        return keyname in (
+            'CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT',
+            'CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE',
+            'CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE',
+            'CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE',
+            'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE',
+            'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE',
+            'CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE',
+            'CONFIG_KEYSTONE_LDAP_USE_TLS'
+        )
+
+    def yn_to_bool(val):
+        return {'n': False, 'y': True}.get(val, False)
+
+    for key in config:
+        if not key.startswith('CONFIG_KEYSTONE_LDAP_'):
+            continue
+        if key in ('CONFIG_KEYSTONE_LDAP_PAGE_SIZE',
+                   'CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK'):
+            if config[key] == '-1':
+                config[key] = None
+        elif is_bool(key):
+            config[key] = yn_to_bool(config[key])
+        elif config[key] == '':
+            config[key] = None
+
+
+def create_manifest(config, messages):
+    if config['CONFIG_IP_VERSION'] == 'ipv6':
+        host = config['CONFIG_CONTROLLER_HOST']
+        config['CONFIG_KEYSTONE_HOST_URL'] = "[%s]" % host
+    else:
+        config['CONFIG_KEYSTONE_HOST_URL'] = config['CONFIG_CONTROLLER_HOST']
+
+    config['CONFIG_KEYSTONE_PUBLIC_URL'] = "http://%s:5000/%s" % (
+        config['CONFIG_KEYSTONE_HOST_URL'],
+        config['CONFIG_KEYSTONE_API_VERSION']
+    )
+    config['CONFIG_KEYSTONE_PUBLIC_URL_VERSIONLESS'] = "http://%s:5000/" % (
+        config['CONFIG_KEYSTONE_HOST_URL']
+    )
+    config['CONFIG_KEYSTONE_ADMIN_URL'] = "http://%s:35357" % (
+        config['CONFIG_KEYSTONE_HOST_URL']
+    )
+
+    fw_details = dict()
+    key = "keystone"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "keystone"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['5000', '35357']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_KEYSTONE_RULES'] = fw_details

+ 114 - 0
packstack/plugins/magnum_920.py

@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Magnum
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import processors
+from packstack.installer import utils
+from packstack.installer import validators
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Magnum Packstack Plugin Initialization --------------
+PLUGIN_NAME = "OS-Magnum"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    magnum_params = {
+        "MAGNUM": [
+            {"CMD_OPTION": "magnum-db-passwd",
+             "PROMPT": "Enter the password for the Magnum DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MAGNUM_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "magnum-ks-passwd",
+             "PROMPT": "Enter the password for the Magnum Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MAGNUM_KS_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+        ]
+
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, magnum_params)
+
+    magnum_groups = [
+        {"GROUP_NAME": "MAGNUM",
+         "DESCRIPTION": "Magnum Options",
+         "PRE_CONDITION": "CONFIG_MAGNUM_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in magnum_groups:
+        params = magnum_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_MAGNUM_INSTALL'] != 'y':
+        return
+
+    magnum_steps = [
+        {'title': 'Adding Magnum manifest entries',
+         'functions': [create_all_manifest]},
+    ]
+
+    controller.addSequence("Installing OpenStack Magnum", [], [],
+                           magnum_steps)
+
+
+# ------------------------- helper functions -------------------------
+
+# ------------------------ Step Functions ----------------------------
+def create_all_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_cert_file = config['CONFIG_MAGNUM_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_magnum.crt'
+        )
+        ssl_key_file = config['CONFIG_MAGNUM_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_magnum.key'
+        )
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        service = 'magnum'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "magnum"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "magnum api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['9511']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_MAGNUM_API_RULES'] = fw_details

+ 636 - 0
packstack/plugins/manila_355.py

@@ -0,0 +1,636 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Manila
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import processors
+from packstack.installer import validators
+from packstack.installer import utils
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Manila Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Manila"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    conf_params = {
+        "MANILA": [
+            {"CMD_OPTION": "manila-db-passwd",
+             "PROMPT": "Enter the password for the Manila DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-ks-passwd",
+             "PROMPT": "Enter the password for the Manila Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_KS_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-backend",
+             "PROMPT": "Enter the Manila backend to be configured",
+             "OPTION_LIST": ["generic", "netapp", "glusternative",
+                             "glusternfs"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "generic",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_BACKEND",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILANETAPP": [
+            {"CMD_OPTION": "manila-netapp-driver-handles-share-servers",
+             "PROMPT": ("Enter whether the driver handles share servers"),
+             "OPTION_LIST": ["true", "false"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "false",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-transport-type",
+             "PROMPT": ("Enter a NetApp transport type"),
+             "OPTION_LIST": ["http", "https"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "https",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_TRANSPORT_TYPE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-login",
+             "PROMPT": ("Enter a NetApp login"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "admin",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_LOGIN",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-password",
+             "PROMPT": ("Enter a NetApp password"),
+             "OPTION_LIST": [""],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_PASSWORD",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-server-hostname",
+             "PROMPT": ("Enter a NetApp hostname"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_SERVER_HOSTNAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-storage-family",
+             "PROMPT": ("Enter a NetApp storage family"),
+             "OPTION_LIST": ['ontap_cluster'],
+             "VALIDATORS": [validators.validate_options],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "ontap_cluster",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_STORAGE_FAMILY",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-server-port",
+             "PROMPT": ("Enter a NetApp server port"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "443",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_SERVER_PORT",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-aggregate-name-search-pattern",
+             "PROMPT": ("Enter a NetApp aggregate name search pattern"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "(.*)",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILANETAPPMULTISVM": [
+            {"CMD_OPTION": "manila-netapp-root-volume-aggregate",
+             "PROMPT": ("Enter a NetApp root volume aggregate"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-netapp-root-volume-name",
+             "PROMPT": ("Enter a NetApp root volume name."),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "root",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILANETAPPSINGLESVM": [
+            {"CMD_OPTION": "manila-netapp-vserver",
+             "PROMPT": ("Enter a NetApp Vserver"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETAPP_VSERVER",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILAGENERIC": [
+            {"CMD_OPTION": "manila-generic-driver-handles-share-servers",
+             "USAGE": ("Denotes whether the driver should handle the "
+                       "responsibility of managing share servers. This must be "
+                       "set to false if the driver is to operate without "
+                       "managing share servers."),
+             "PROMPT": ("Enter whether the driver handles share servers"),
+             "OPTION_LIST": ["true", "false"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "true",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-generic-volume-name-template",
+             "PROMPT": ("Enter a volume name template"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "manila-share-%s",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-generic-share-mount-path",
+             "PROMPT": ("Enter a share mount path"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "/shares",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-service-image-location",
+             "PROMPT": ("Enter a service image location"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": 'https://www.dropbox.com/s/vi5oeh10q1qkckh/'
+                              'ubuntu_1204_nfs_cifs.qcow2',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_SERVICE_IMAGE_LOCATION",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-service-instance-user",
+             "PROMPT": ("Enter a service instance user"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": 'ubuntu',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_SERVICE_INSTANCE_USER",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "manila-service-instance-password",
+             "PROMPT": ("Enter a service instance password"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": 'ubuntu',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILANETWORK": [
+            {"CMD_OPTION": "manila-network-type",
+             "PROMPT": ("Enter a network type"),
+             "OPTION_LIST": ['neutron', 'nova-network', 'standalone'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "neutron",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_TYPE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILANETWORKSTANDALONE": [
+            {"CMD_OPTION": "standalone_network_plugin_gateway",
+             "PROMPT": ("Enter a plugin gateway"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "standalone_network_plugin_mask",
+             "PROMPT": ("Enter a network mask"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_STANDALONE_NETMASK",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "standalone_network_plugin_segmentation_id",
+             "PROMPT": ("Enter a segmentation ID"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "standalone_network_plugin_ip_range",
+             "PROMPT": ("Enter a network mask"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "standalone_network_plugin_ip_version",
+             "PROMPT": ("Enter an IP version"),
+             "OPTION_LIST": ['4', '6'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "4",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILAGLUSTERNATIVE": [
+            {"CMD_OPTION": "glusterfs-servers",
+             "PROMPT": ("Enter GlusterFS servers"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_SERVERS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-native-path-to-private_key",
+             "PROMPT": ("Enter path to the GlusterFS private key"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_NATIVE_PATH_TO_PRIVATE_KEY",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-volume-pattern",
+             "PROMPT": ("Enter volume pattern for GlusterFS"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_VOLUME_PATTERN",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "MANILAGLUSTERNFS": [
+            {"CMD_OPTION": "glusterfs-target",
+             "PROMPT": ("Enter GlusterFS target"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_TARGET",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-mount-point-base",
+             "PROMPT": ("Enter a mount point for GlusterFS mount"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_MOUNT_POINT_BASE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-nfs-server-type",
+             "PROMPT": ("Enter NFS server type (gluster/ganesha)"),
+             "OPTION_LIST": ['gluster', 'ganesha'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "gluster",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_NFS_SERVER_TYPE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-path-to-private-key",
+             "PROMPT": ("Enter path to GlusterFS server private key"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_PATH_TO_PRIVATE_KEY",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "glusterfs-ganesha-server-ip",
+             "PROMPT": ("Enter ip address of GlusterFS ganesha server"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_MANILA_GLUSTERFS_GANESHA_SERVER_IP",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, conf_params)
+    conf_groups = [
+        {"GROUP_NAME": "MANILA",
+         "DESCRIPTION": "Manila Config parameters",
+         "PRE_CONDITION": "CONFIG_MANILA_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILANETAPP",
+         "DESCRIPTION": "Manila NetApp configuration",
+         "PRE_CONDITION": check_netapp_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILANETAPPMULTISVM",
+         "DESCRIPTION": "Manila NetApp multi-SVM configuration",
+         "PRE_CONDITION": check_netapp_options_multi_svm,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILANETAPPSINGLESVM",
+         "DESCRIPTION": "Manila NetApp single-SVM configuration",
+         "PRE_CONDITION": check_netapp_options_single_svm,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILAGENERIC",
+         "DESCRIPTION": "Manila generic driver configuration",
+         "PRE_CONDITION": check_generic_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILANETWORK",
+         "DESCRIPTION": "Manila general network configuration",
+         "PRE_CONDITION": "CONFIG_MANILA_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILANETWORKSTANDALONE",
+         "DESCRIPTION": "Manila standalone network configuration",
+         "PRE_CONDITION": check_network_standalone_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILAGLUSTERNATIVE",
+         "DESCRIPTION": "Manila GlusterFS native configuration",
+         "PRE_CONDITION": check_glusternative_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "MANILAGLUSTERNFS",
+         "DESCRIPTION": "Manila GlusterNFS configuration",
+         "PRE_CONDITION": check_glusternfs_options,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in conf_groups:
+        params = conf_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    config = controller.CONF
+    if config['CONFIG_MANILA_INSTALL'] != 'y':
+        return
+
+    config['CONFIG_MANILA_BACKEND'] = (
+        [i.strip() for i in config['CONFIG_MANILA_BACKEND'].split(',') if i]
+    )
+
+    manila_steps = [
+        {'title': 'Preparing Manila entries',
+         'functions': [create_manifest]}
+    ]
+
+    controller.addSequence("Installing OpenStack Manila", [], [], manila_steps)
+
+
+# ------------------------- helper functions -------------------------
+
+def check_netapp_options(config):
+    return (config['CONFIG_MANILA_INSTALL'] == 'y' and
+            'netapp' in config['CONFIG_MANILA_BACKEND'])
+
+
+def check_netapp_options_multi_svm(config):
+    key_name = 'CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS'
+    return (check_netapp_options(config) and
+            config[key_name] == "true")
+
+
+def check_netapp_options_single_svm(config):
+    key_name = 'CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS'
+    return (check_netapp_options(config) and
+            config[key_name] == "false")
+
+
+def check_generic_options(config):
+    return (config['CONFIG_MANILA_INSTALL'] == 'y' and
+            'generic' in config['CONFIG_MANILA_BACKEND'])
+
+
+def check_network_standalone_options(config):
+    return (config['CONFIG_MANILA_INSTALL'] == 'y' and
+            config['CONFIG_MANILA_NETWORK_TYPE'] == 'standalone')
+
+
+def check_glusternative_options(config):
+    return (config['CONFIG_MANILA_INSTALL'] == 'y' and
+            'glusternative' in config['CONFIG_MANILA_BACKEND'])
+
+
+def check_glusternfs_options(config):
+    return (config['CONFIG_MANILA_INSTALL'] == 'y' and
+            'glusternfs' in config['CONFIG_MANILA_BACKEND'])
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_UNSUPPORTED'] != 'y':
+        config['CONFIG_STORAGE_HOST'] = config['CONFIG_CONTROLLER_HOST']
+
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_STORAGE_HOST']
+        ssl_cert_file = config['CONFIG_MANILA_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_manila.crt'
+        )
+        ssl_key_file = config['CONFIG_MANILA_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_manila.key'
+        )
+        service = 'manila'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    # Change these from text to Boolean values
+    boolean_keys = ['CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS',
+                    'CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS']
+    for key in [k for k in boolean_keys if k in config]:
+        if config[key].lower() == "true":
+            config[key] = True
+
+        elif config[key].lower() == "false":
+            config[key] = False
+
+    # manila API should be open for everyone
+    fw_details = dict()
+    key = "manila_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "manila-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8786']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_MANILA_API_RULES'] = fw_details

+ 117 - 0
packstack/plugins/mariadb_003.py

@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures MariaDB
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import utils
+
+from packstack.modules.common import filtered_hosts
+from packstack.modules.documentation import update_params_usage
+
+# ------------- MariaDB Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "MariaDB"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "mariadb-host",
+         "PROMPT": "Enter the IP address of the MariaDB server",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_ssh],
+         "DEFAULT_VALUE": utils.get_localhost_ip(),
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_MARIADB_HOST",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_MYSQL_HOST']},
+
+        {"CMD_OPTION": "mariadb-user",
+         "USAGE": "Username for the MariaDB admin user",
+         "PROMPT": "Enter the username for the MariaDB admin user",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "root",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_MARIADB_USER",
+         "USE_DEFAULT": True,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_MYSQL_USER']},
+
+        {"CMD_OPTION": "mariadb-pw",
+         "USAGE": "Password for the MariaDB admin user",
+         "PROMPT": "Enter the password for the MariaDB admin user",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "PROCESSORS": [processors.process_password],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_MARIADB_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_MYSQL_PW']},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "MARIADB",
+             "DESCRIPTION": "MariaDB Config parameters",
+             "PRE_CONDITION": lambda x: 'yes',
+             "PRE_CONDITION_MATCH": "yes",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    mariadbsteps = [
+        {'title': 'Preparing MariaDB entries',
+         'functions': [create_manifest]}
+    ]
+    controller.addSequence("Installing MariaDB", [], [], mariadbsteps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    if config['CONFIG_MARIADB_INSTALL'] == 'y':
+        host = config['CONFIG_MARIADB_HOST']
+    else:
+        host = config['CONFIG_CONTROLLER_HOST']
+
+    if config['CONFIG_IP_VERSION'] == 'ipv6':
+        config['CONFIG_MARIADB_HOST_URL'] = "[%s]" % host
+    else:
+        config['CONFIG_MARIADB_HOST_URL'] = host
+
+    fw_details = dict()
+    for host in filtered_hosts(config, exclude=False, dbhost=True):
+        key = "mariadb_%s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % host
+        fw_details[key]['service_name'] = "mariadb"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['3306']
+        fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_MARIADB_RULES'] = fw_details

+ 111 - 0
packstack/plugins/nagios_910.py

@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Nagios
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import utils
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.common import filtered_hosts
+
+# ------------- Nagios Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Nagios"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "nagios-passwd",
+         "PROMPT": "Enter the password for the nagiosadmin user",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "PROCESSORS": [processors.process_password],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_NAGIOS_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "NAGIOS",
+             "DESCRIPTION": "Nagios Config parameters",
+             "PRE_CONDITION": "CONFIG_NAGIOS_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_NAGIOS_INSTALL'] != 'y':
+        return
+
+    nagiossteps = [
+        {'title': 'Preparing Nagios server entries',
+         'functions': [create_manifest]},
+        {'title': 'Preparing Nagios host entries',
+         'functions': [create_nrpe_manifests]}
+    ]
+    controller.addSequence("Installing Nagios", [], [], nagiossteps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    config['CONFIG_NAGIOS_NODES'] = list(filtered_hosts(config))
+    openstack_services = []
+    openstack_services.append('keystone-user-list')
+
+    if config['CONFIG_GLANCE_INSTALL'] == 'y':
+        openstack_services.append('glance-index')
+
+    if config['CONFIG_NOVA_INSTALL'] == 'y':
+        openstack_services.append('nova-list')
+
+    if config['CONFIG_CINDER_INSTALL'] == 'y':
+        openstack_services.append('cinder-list')
+
+    if config['CONFIG_SWIFT_INSTALL'] == 'y':
+        openstack_services.append('swift-list')
+
+    config['CONFIG_NAGIOS_SERVICES'] = openstack_services
+
+
+def create_nrpe_manifests(config, messages):
+    for hostname in filtered_hosts(config):
+        config['CONFIG_NRPE_HOST'] = hostname
+
+        # Only the Nagios host is allowed to talk to nrpe
+        fw_details = dict()
+        key = "nagios_nrpe"
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % config['CONFIG_CONTROLLER_HOST']
+        fw_details[key]['service_name'] = "nagios-nrpe"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['5666']
+        fw_details[key]['proto'] = "tcp"
+        config['FIREWALL_NAGIOS_NRPE_RULES'] = fw_details
+
+    messages.append("To use Nagios, browse to "
+                    "http://%(CONFIG_CONTROLLER_HOST)s/nagios "
+                    "username: nagiosadmin, password: %(CONFIG_NAGIOS_PW)s"
+                    % config)

+ 882 - 0
packstack/plugins/neutron_350.py

@@ -0,0 +1,882 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Neutron
+"""
+
+import re
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer import output_messages
+from packstack.installer.utils import split_hosts
+
+from packstack.modules import common
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Neutron Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Neutron"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    conf_params = {
+        "NEUTRON": [
+            {"CMD_OPTION": "os-neutron-ks-password",
+             "PROMPT": "Enter the password for Neutron Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NEUTRON_KS_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-db-password",
+             "PROMPT": "Enter the password for Neutron DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NEUTRON_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-l3-ext-bridge",
+             "PROMPT": ("Enter the ovs bridge the Neutron L3 agent will use "
+                        "for external traffic, or 'provider' if using "
+                        "provider networks."),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "br-ex",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_L3_EXT_BRIDGE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-metadata-pw",
+             "PROMPT": "Enter Neutron metadata agent password",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NEUTRON_METADATA_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-lbaas-install",
+             "PROMPT": "Should Packstack install Neutron LBaaS",
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "n",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_LBAAS_INSTALL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-metering-agent-install",
+             "PROMPT": ("Should Packstack install Neutron L3 Metering agent"),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NEUTRON_METERING_AGENT_INSTALL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "neutron-fwaas",
+             "PROMPT": "Would you like to configure neutron FWaaS?",
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "n",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_FWAAS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-vpnaas-install",
+             "PROMPT": "Would you like to configure neutron VPNaaS?",
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "n",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_VPNAAS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "NEUTRON_LB_AGENT": [
+            {"CMD_OPTION": "os-neutron-lb-interface-mappings",
+             "PROMPT": ("Enter a comma separated list of interface mappings "
+                        "for the Neutron linuxbridge plugin"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "NEUTRON_OVS_AGENT": [
+            {"CMD_OPTION": "os-neutron-ovs-bridge-mappings",
+             "PROMPT": ("Enter a comma separated list of bridge mappings for "
+                        "the Neutron openvswitch plugin"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "extnet:br-ex",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ovs-bridge-interfaces",
+             "PROMPT": ("Enter a comma separated list of OVS bridge:interface "
+                        "pairs for the Neutron openvswitch plugin"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_BRIDGE_IFACES",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ovs-bridges-compute",
+             "PROMPT": ("Enter a comma separated list of bridges for the "
+                        "Neutron OVS plugin in compute nodes. They must "
+                        "be included in os-neutron-ovs-bridge-mappings and "
+                        "os-neutron-ovs-bridge-interfaces."),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_BRIDGES_COMPUTE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ovs-external-physnet",
+             "PROMPT": ("Enter the name of the physical external network as"
+                        "defined in bridge mappings"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "extnet",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_EXTERNAL_PHYSNET",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "NEUTRON_OVS_AGENT_TUNNEL": [
+            {"CMD_OPTION": "os-neutron-ovs-tunnel-if",
+             "PROMPT": ("Enter interface with IP to override the default "
+                        "tunnel local_ip"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_TUNNEL_IF",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ovs-tunnel-subnets",
+             "PROMPT": ("Enter comma separated list of subnets used for "
+                        "tunneling to make them allowed by IP filtering."),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_TUNNEL_SUBNETS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "NEUTRON_OVS_AGENT_VXLAN": [
+            {"CMD_OPTION": "os-neutron-ovs-vxlan-udp-port",
+             "CONF_NAME": "CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT",
+             "PROMPT": "Enter VXLAN UDP port number",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_port],
+             "DEFAULT_VALUE": 4789,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "NEUTRON_ML2_PLUGIN": [
+            {"CMD_OPTION": "os-neutron-ml2-type-drivers",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_TYPE_DRIVERS",
+             "PROMPT": ("Enter a comma separated list of network type driver "
+                        "entrypoints"),
+             "OPTION_LIST": ["local", "flat", "vlan", "gre", "vxlan"],
+             "VALIDATORS": [validators.validate_multi_options],
+             "DEFAULT_VALUE": "vxlan,flat",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-tenant-network-types",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES",
+             "PROMPT": ("Enter a comma separated ordered list of "
+                        "network_types to allocate as tenant networks"),
+             "OPTION_LIST": ["local", "vlan", "gre", "vxlan"],
+             "VALIDATORS": [validators.validate_multi_options],
+             "DEFAULT_VALUE": "vxlan",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-mechanism-drivers",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS",
+             "PROMPT": ("Enter a comma separated ordered list of networking "
+                        "mechanism driver entrypoints"),
+             "OPTION_LIST": ["logger", "test", "linuxbridge", "openvswitch",
+                             "hyperv", "ncs", "arista", "cisco_nexus",
+                             "mlnx", "l2population", "sriovnicswitch"],
+             "VALIDATORS": [validators.validate_multi_options],
+             "DEFAULT_VALUE": "openvswitch",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-flat-networks",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_FLAT_NETWORKS",
+             "PROMPT": ("Enter a comma separated  list of physical_network "
+                        "names with which flat networks can be created"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "*",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-vlan-ranges",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_VLAN_RANGES",
+             "PROMPT": ("Enter a comma separated list of physical_network "
+                        "names usable for VLAN"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-tunnel-id-ranges",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES",
+             "PROMPT": ("Enter a comma separated list of <tun_min>:<tun_max> "
+                        "tuples enumerating ranges of GRE tunnel IDs that "
+                        "are available for tenant network allocation"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-vxlan-group",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_VXLAN_GROUP",
+             "PROMPT": "Enter a multicast group for VXLAN",
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-vni-ranges",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_VNI_RANGES",
+             "PROMPT": ("Enter a comma separated list of <vni_min>:<vni_max> "
+                        "tuples enumerating ranges of VXLAN VNI IDs that are "
+                        "available for tenant network allocation"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "10:100",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            # We need to ask for this only in case of ML2 plugins
+            {"CMD_OPTION": "os-neutron-l2-agent",
+             "PROMPT": ("Enter the name of the L2 agent to be used "
+                        "with Neutron"),
+             "OPTION_LIST": ["linuxbridge", "openvswitch"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "openvswitch",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NEUTRON_L2_AGENT",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+            {"CMD_OPTION": "os-neutron-ml2-supported-pci-vendor-devs",
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_SUPPORTED_PCI_VENDOR_DEVS",
+             "PROMPT": ("Enter a comma separated list of supported PCI "
+                        "vendor devices, defined by vendor_id:product_id "
+                        "according to the PCI ID Repository."),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": ['15b3:1004', '8086:10ca'],
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "os-neutron-ml2-sriov-interface-mappings",
+             "PROMPT": ("Enter a comma separated list of interface mappings "
+                        "for the Neutron ML2 sriov agent"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NEUTRON_ML2_SRIOV_INTERFACE_MAPPINGS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, conf_params)
+    conf_groups = [
+        {"GROUP_NAME": "NEUTRON",
+         "DESCRIPTION": "Neutron config",
+         "PRE_CONDITION": "CONFIG_NEUTRON_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "NEUTRON_ML2_PLUGIN",
+         "DESCRIPTION": "Neutron ML2 plugin config",
+         "PRE_CONDITION": neutron_install,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "NEUTRON_LB_AGENT",
+         "DESCRIPTION": "Neutron LB agent config",
+         "PRE_CONDITION": use_ml2_with_linuxbridge,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "NEUTRON_OVS_AGENT",
+         "DESCRIPTION": "Neutron OVS agent config",
+         "PRE_CONDITION": use_ml2_with_ovs,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "NEUTRON_OVS_AGENT_TUNNEL",
+         "DESCRIPTION": "Neutron OVS agent config for tunnels",
+         "PRE_CONDITION": use_ml2_with_ovs,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "NEUTRON_OVS_AGENT_VXLAN",
+         "DESCRIPTION": "Neutron OVS agent config for VXLAN",
+         "PRE_CONDITION": use_openvswitch_vxlan,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in conf_groups:
+        params = conf_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    config = controller.CONF
+    if config['CONFIG_NEUTRON_INSTALL'] != 'y':
+        return
+    if config['CONFIG_IRONIC_INSTALL'] == 'y':
+        config['CONFIG_NEUTRON_ML2_TYPE_DRIVERS'] += ', flat'
+        config['CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES'] += ', flat'
+        if 'openvswitch' not in config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS']:
+            config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS'] += 'openvswitch'
+        config['CONFIG_NEUTRON_ML2_FLAT_NETWORKS'] = 'physnet1'
+
+    if use_ml2_with_sriovnicswitch(config):
+        if ('openvswitch' not in config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS']
+                and 'linuxbridge' not in
+                config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS']):
+            config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS'] += ', openvswitch'
+
+    plugin_db = 'neutron'
+    plugin_path = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+    # values modification
+    for key in ('CONFIG_NEUTRON_ML2_TYPE_DRIVERS',
+                'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES',
+                'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS',
+                'CONFIG_NEUTRON_ML2_FLAT_NETWORKS',
+                'CONFIG_NEUTRON_ML2_VLAN_RANGES',
+                'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES',
+                'CONFIG_NEUTRON_ML2_VNI_RANGES'):
+        if config[key] == '':
+            config[key] = []
+        else:
+            config[key] = [i.strip() for i in config[key].split(',') if i]
+    key = 'CONFIG_NEUTRON_ML2_VXLAN_GROUP'
+    config[key] = "%s" % config[key] if config[key] else ''
+
+    config['CONFIG_NEUTRON_L2_DBNAME'] = plugin_db
+    config['CONFIG_NEUTRON_CORE_PLUGIN'] = plugin_path
+
+    global api_hosts, network_hosts, compute_hosts, q_hosts
+    api_hosts = split_hosts(config['CONFIG_CONTROLLER_HOST'])
+    network_hosts = split_hosts(config['CONFIG_NETWORK_HOSTS'])
+    compute_hosts = set()
+    if config['CONFIG_NOVA_INSTALL'] == 'y':
+        compute_hosts = split_hosts(config['CONFIG_COMPUTE_HOSTS'])
+    q_hosts = api_hosts | network_hosts | compute_hosts
+
+    neutron_steps = [
+        {'title': 'Preparing Neutron LBaaS Agent entries',
+         'functions': [create_lbaas_manifests]},
+        {'title': 'Preparing Neutron API entries',
+         'functions': [create_manifests]},
+        {'title': 'Preparing Neutron L3 entries',
+         'functions': [create_l3_manifests]},
+        {'title': 'Preparing Neutron L2 Agent entries',
+         'functions': [create_l2_agent_manifests]},
+        {'title': 'Preparing Neutron DHCP Agent entries',
+         'functions': [create_dhcp_manifests]},
+        {'title': 'Preparing Neutron Metering Agent entries',
+         'functions': [create_metering_agent_manifests]},
+        {'title': 'Checking if NetworkManager is enabled and running',
+         'functions': [check_nm_status]},
+    ]
+    controller.addSequence("Installing OpenStack Neutron", [], [],
+                           neutron_steps)
+
+
+# ------------------------- helper functions -------------------------
+
+def neutron_install(config):
+    return config['CONFIG_NEUTRON_INSTALL'] == 'y'
+
+
+def use_ml2_with_linuxbridge(config):
+    ml2_used = (neutron_install(config) and
+                config["CONFIG_NEUTRON_L2_AGENT"] == 'linuxbridge')
+    return ml2_used
+
+
+def use_ml2_with_ovs(config):
+    return (neutron_install(config) and
+            config["CONFIG_NEUTRON_L2_AGENT"] == 'openvswitch')
+
+
+def use_openvswitch_vxlan(config):
+    ml2_vxlan = (
+        use_ml2_with_ovs(config) and
+        'vxlan' in config['CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES']
+    )
+    return ml2_vxlan
+
+
+def use_openvswitch_gre(config):
+    ml2_vxlan = (
+        use_ml2_with_ovs(config) and
+        'gre' in config['CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES']
+    )
+    return ml2_vxlan
+
+
+def use_ml2_with_sriovnicswitch(config):
+    ml2_sriovnic = (
+        use_ml2_with_ovs(config) and
+        'sriovnicswitch' in config['CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS']
+    )
+    return ml2_sriovnic
+
+
+def get_if_driver(config):
+    agent = config['CONFIG_NEUTRON_L2_AGENT']
+    if agent == "openvswitch":
+        return 'neutron.agent.linux.interface.OVSInterfaceDriver'
+    elif agent == 'linuxbridge':
+        return 'neutron.agent.linux.interface.BridgeInterfaceDriver'
+
+
+def find_mapping(haystack, needle):
+    return needle in [x.split(':')[1].strip() for x in get_values(haystack)]
+
+
+def get_values(val):
+    return [x.strip() for x in val.split(',')] if val else []
+
+
+def tunnel_fw_details(config, host, src, fw_details):
+    key = "neutron_tunnel_%s_%s" % (host, src)
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "%s" % src
+    fw_details[key]['service_name'] = "neutron tunnel port"
+    fw_details[key]['chain'] = "INPUT"
+    if use_openvswitch_vxlan(config):
+        fw_details[key]['proto'] = 'udp'
+        tun_port = ("%s" % config['CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT'])
+    else:
+        fw_details[key]['proto'] = 'gre'
+        tun_port = None
+    fw_details[key]['ports'] = tun_port
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifests(config, messages):
+    global q_hosts
+
+    service_plugins = []
+    service_providers = []
+    if config['CONFIG_LBAAS_INSTALL'] == 'y':
+        lbaas_plugin = ('neutron_lbaas.services.loadbalancer.plugin.'
+                        'LoadBalancerPluginv2')
+        service_plugins.append(lbaas_plugin)
+        lbaas_sp = ('LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.'
+                    'plugin_driver.HaproxyOnHostPluginDriver:default')
+        service_providers.append(lbaas_sp)
+
+    # ML2 uses the L3 Router service plugin to implement l3 agent
+    service_plugins.append('router')
+
+    if config['CONFIG_NEUTRON_METERING_AGENT_INSTALL'] == 'y':
+        service_plugins.append('metering')
+
+    if config['CONFIG_NEUTRON_FWAAS'] == 'y':
+        service_plugins.append('firewall')
+        fwaas_sp = ('FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.'
+                    'OVSHybridIptablesFirewallDriver:default')
+        service_providers.append(fwaas_sp)
+
+    if config['CONFIG_NEUTRON_VPNAAS'] == 'y':
+        service_plugins.append('vpnaas')
+        vpnaas_sp = ('VPN:libreswan:neutron_vpnaas.services.vpn.'
+                     'service_drivers.ipsec.IPsecVPNDriver:default')
+        service_providers.append(vpnaas_sp)
+
+    config['SERVICE_PLUGINS'] = (service_plugins if service_plugins
+                                 else 'undef')
+
+    config['SERVICE_PROVIDERS'] = (service_providers if service_providers
+                                   else [])
+
+    config['FIREWALL_DRIVER'] = ("neutron.agent.linux.iptables_firewall."
+                                 "OVSHybridIptablesFirewallDriver")
+
+    plugin_manifest = 'neutron_ml2_plugin'
+
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_cert_file = config['CONFIG_NEUTRON_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_neutron.crt'
+        )
+        ssl_key_file = config['CONFIG_NEUTRON_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_neutron.key'
+        )
+        service = 'neutron'
+
+    for host in q_hosts:
+        if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+            generate_ssl_cert(config, host, service, ssl_key_file,
+                              ssl_cert_file)
+
+        if host in api_hosts:
+            # Firewall
+            fw_details = dict()
+            key = "neutron_server_%s" % host
+            fw_details.setdefault(key, {})
+            fw_details[key]['host'] = "ALL"
+            fw_details[key]['service_name'] = "neutron server"
+            fw_details[key]['chain'] = "INPUT"
+            fw_details[key]['ports'] = ['9696']
+            fw_details[key]['proto'] = "tcp"
+            config['FIREWALL_NEUTRON_SERVER_RULES'] = fw_details
+
+        # We also need to open VXLAN/GRE port for agent
+        if use_openvswitch_vxlan(config) or use_openvswitch_gre(config):
+            if config['CONFIG_IP_VERSION'] == 'ipv6':
+                msg = output_messages.WARN_IPV6_OVS
+                messages.append(utils.color_text(msg % host, 'red'))
+            fw_details = dict()
+            if (config['CONFIG_NEUTRON_OVS_TUNNEL_SUBNETS']):
+                tunnel_subnets = map(
+                    str.strip,
+                    config['CONFIG_NEUTRON_OVS_TUNNEL_SUBNETS'].split(',')
+                )
+                cf_fw_nt_key = ("FIREWALL_NEUTRON_TUNNEL_RULES_%s" % host)
+                for subnet in tunnel_subnets:
+                    tunnel_fw_details(config, host, subnet, fw_details)
+                config[cf_fw_nt_key] = fw_details
+            else:
+                cf_fw_nt_key = ("FIREWALL_NEUTRON_TUNNEL_RULES_%s" % host)
+                for n_host in network_hosts | compute_hosts:
+                    if config['CONFIG_NEUTRON_OVS_TUNNEL_IF']:
+                        if config['CONFIG_USE_SUBNETS'] == 'y':
+                            iface = common.cidr_to_ifname(
+                                config['CONFIG_NEUTRON_OVS_TUNNEL_IF'],
+                                n_host, config)
+                        else:
+                            iface = config['CONFIG_NEUTRON_OVS_TUNNEL_IF']
+                        ifip = ("ipaddress_%s" % iface)
+                        ifip = re.sub('[\.\-\:]', '_', ifip)
+                        try:
+                            src_host = config['HOST_DETAILS'][n_host][ifip]
+                        except KeyError:
+                            raise KeyError('Couldn\'t detect ipaddress of '
+                                           'interface %s on node %s' %
+                                           (iface, n_host))
+                    else:
+                        src_host = n_host
+                    tunnel_fw_details(config, host, src_host, fw_details)
+                config[cf_fw_nt_key] = fw_details
+
+
+def create_l3_manifests(config, messages):
+    global network_hosts
+
+    if config['CONFIG_NEUTRON_L3_EXT_BRIDGE'] == 'provider':
+        config['CONFIG_NEUTRON_L3_EXT_BRIDGE'] = ''
+
+    for host in network_hosts:
+        config['CONFIG_NEUTRON_L3_HOST'] = host
+        config['CONFIG_NEUTRON_L3_INTERFACE_DRIVER'] = get_if_driver(config)
+
+        if config['CONFIG_NEUTRON_L2_AGENT'] == 'openvswitch':
+            ext_bridge = config['CONFIG_NEUTRON_L3_EXT_BRIDGE']
+            mapping = find_mapping(
+                config['CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS'],
+                ext_bridge) if ext_bridge else None
+            if (ext_bridge and not mapping):
+                config['CONFIG_NEUTRON_OVS_BRIDGE'] = ext_bridge
+                config['CONFIG_NEUTRON_OVS_BRIDGE_CREATE'] = 'y'
+            else:
+                config['CONFIG_NEUTRON_OVS_BRIDGE_CREATE'] = 'n'
+        else:
+            config['CONFIG_NEUTRON_OVS_BRIDGE_CREATE'] = 'n'
+
+
+def create_dhcp_manifests(config, messages):
+    global network_hosts
+
+    for host in network_hosts:
+        config["CONFIG_NEUTRON_DHCP_HOST"] = host
+        config['CONFIG_NEUTRON_DHCP_INTERFACE_DRIVER'] = get_if_driver(config)
+
+        # Firewall Rules for dhcp in
+        fw_details = dict()
+        key = "neutron_dhcp_in_%s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "ALL"
+        fw_details[key]['service_name'] = "neutron dhcp in"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['67']
+        fw_details[key]['proto'] = "udp"
+        config['FIREWALL_NEUTRON_DHCPIN_RULES'] = fw_details
+
+        # Firewall Rules for dhcp out
+        fw_details = dict()
+        key = "neutron_dhcp_out_%s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "ALL"
+        fw_details[key]['service_name'] = "neutron dhcp out"
+        fw_details[key]['chain'] = "OUTPUT"
+        fw_details[key]['ports'] = ['68']
+        fw_details[key]['proto'] = "udp"
+        config['FIREWALL_NEUTRON_DHCPOUT_RULES'] = fw_details
+
+
+def create_lbaas_manifests(config, messages):
+    global network_hosts
+
+    if not config['CONFIG_LBAAS_INSTALL'] == 'y':
+        return
+
+    for host in network_hosts:
+        config['CONFIG_NEUTRON_LBAAS_INTERFACE_DRIVER'] = get_if_driver(config)
+
+
+def create_metering_agent_manifests(config, messages):
+    global network_hosts
+
+    if not config['CONFIG_NEUTRON_METERING_AGENT_INSTALL'] == 'y':
+        return
+
+    for host in network_hosts:
+        config['CONFIG_NEUTRON_METERING_IFCE_DRIVER'] = get_if_driver(config)
+
+
+def create_l2_agent_manifests(config, messages):
+    global network_hosts, compute_hosts
+
+    agent = config["CONFIG_NEUTRON_L2_AGENT"]
+
+    # CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS will be available only for ML2
+    # plugin deployment, but we need CONFIG_NEUTRON_USE_L2POPULATION also
+    # for other plugin template generation
+    if ('l2population' in
+            config.get('CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS', [])):
+        config['CONFIG_NEUTRON_USE_L2POPULATION'] = True
+    else:
+        config['CONFIG_NEUTRON_USE_L2POPULATION'] = False
+
+    if agent == "openvswitch":
+        ovs_type = 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS'
+        ovs_type = config.get(ovs_type, 'local')
+        tunnel = use_openvswitch_vxlan(config) or use_openvswitch_gre(config)
+        config["CONFIG_NEUTRON_OVS_TUNNELING"] = tunnel
+        tunnel_types = set(ovs_type) & set(['gre', 'vxlan'])
+        config["CONFIG_NEUTRON_OVS_TUNNEL_TYPES"] = list(tunnel_types)
+
+        bm_arr = get_values(config["CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS"])
+        iface_arr = get_values(config["CONFIG_NEUTRON_OVS_BRIDGE_IFACES"])
+
+        # The CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS parameter contains a
+        # comma-separated list of bridge mappings. Since the puppet module
+        # expects this parameter to be an array, this parameter must be
+        # properly formatted by packstack, then consumed by the puppet module.
+        # For example, the input string 'A, B' should formatted as '['A','B']'.
+        config["CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS"] = bm_arr
+        config["CONFIG_NEUTRON_OVS_BRIDGE_IFACES"] = []
+
+        # Bridge configuration and mappings for compute nodes can be different.
+        # Parameter CONFIG_NEUTRON_OVS_BRIDGES_COMPUTE contains the list of
+        # bridge names, included in bridge mappings and bridge interfaces, that
+        # must be created in compute nodes.
+        brd_arr_cmp = get_values(config["CONFIG_NEUTRON_OVS_BRIDGES_COMPUTE"])
+        if_arr_cmp = []
+        mapp_arr_cmp = []
+        for brd in brd_arr_cmp:
+            if_arr_cmp.append(common.find_pair_with(iface_arr, brd, 0))
+            mapp_arr_cmp.append(common.find_pair_with(bm_arr, brd, 1))
+
+        config["CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS_COMPUTE"] = mapp_arr_cmp
+        config["CONFIG_NEUTRON_OVS_BRIDGE_IFACES_COMPUTE"] = []
+        no_local_types = set(ovs_type) & set(['gre', 'vxlan', 'vlan', 'flat'])
+        no_tunnel_types = set(ovs_type) & set(['vlan', 'flat'])
+    elif agent == "linuxbridge":
+        host_var = 'CONFIG_NEUTRON_LB_HOST'
+    else:
+        raise KeyError("Unknown layer2 agent")
+
+    for host in network_hosts | compute_hosts:
+        # NICs connected to OVS bridges can be required in network nodes if
+        # vlan, flat, vxlan or gre are enabled. For compute nodes, they are
+        # only required if vlan or flat are enabled.
+        if (
+            agent == "openvswitch" and (
+                (host in network_hosts and no_local_types)
+                or no_tunnel_types)
+        ):
+            if config['CONFIG_USE_SUBNETS'] == 'y':
+                iface_arr = [
+                    common.cidr_to_ifname(i, host, config) for i in iface_arr
+                ]
+                if_arr_cmp = [
+                    common.cidr_to_ifname(i, host, config) for i in if_arr_cmp
+                ]
+            config["CONFIG_NEUTRON_OVS_BRIDGE_IFACES"] = iface_arr
+            config["CONFIG_NEUTRON_OVS_BRIDGE_IFACES_COMPUTE"] = if_arr_cmp
+            config['CREATE_BRIDGES'] = 'y'
+        else:
+            config['CREATE_BRIDGES'] = 'n'
+
+
+def check_nm_status(config, messages):
+    hosts_with_nm = []
+    for host in common.filtered_hosts(config):
+        server = utils.ScriptRunner(host)
+        server.append("systemctl")
+        rc, out = server.execute(can_fail=False)
+        server.clear()
+
+        if rc < 1:
+            server.append("systemctl is-enabled NetworkManager")
+            rc, is_enabled = server.execute(can_fail=False)
+            is_enabled = is_enabled.strip("\n ")
+            server.clear()
+
+            server.append("systemctl is-active NetworkManager")
+            rc, is_active = server.execute(can_fail=False)
+            is_active = is_active.strip("\n ")
+
+            if is_enabled == "enabled" or is_active == "active":
+                hosts_with_nm.append(host)
+        else:
+            server.clear()
+            server.append("service NetworkManager status")
+            rc, out = server.execute(can_fail=False)
+
+            if rc < 1:
+                hosts_with_nm.append(host)
+
+        server.clear()
+
+    if hosts_with_nm:
+        hosts_list = ', '.join("%s" % x for x in hosts_with_nm)
+        msg = output_messages.WARN_NM_ENABLED
+        messages.append(utils.color_text(msg % hosts_list, 'yellow'))

+ 533 - 0
packstack/plugins/nova_300.py

@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Nova
+"""
+
+import os
+import platform
+import socket
+
+from packstack.installer import basedefs
+from packstack.installer import exceptions
+from packstack.installer import processors
+from packstack.installer import utils
+from packstack.installer import validators
+
+from packstack.modules.common import filtered_hosts
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import deliver_ssl_file
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------- Nova Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Nova"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    if platform.linux_distribution()[0] == "Fedora":
+        primary_netif = "em1"
+        secondary_netif = "em2"
+    else:
+        primary_netif = "eth0"
+        secondary_netif = "eth1"
+
+    nova_params = {
+        "NOVA": [
+            {"CMD_OPTION": 'nova-db-purge-enable',
+             "PROMPT": (
+                 "Enter y if cron job for removing soft deleted DB rows "
+                 "should be created"
+             ),
+             "OPTION_LIST": ['y', 'n'],
+             "VALIDATORS": [validators.validate_not_empty],
+             "PROCESSORS": [processors.process_bool],
+             "DEFAULT_VALUE": 'y',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_NOVA_DB_PURGE_ENABLE',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-db-passwd",
+             "PROMPT": "Enter the password for the Nova DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NOVA_DB_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-ks-passwd",
+             "PROMPT": "Enter the password for the Nova Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NOVA_KS_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-manage-flavors",
+             "PROMPT": (
+                 "Should Packstack manage default Nova flavors"
+             ),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NOVA_MANAGE_FLAVORS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "novasched-cpu-allocation-ratio",
+             "PROMPT": "Enter the CPU overcommitment ratio. Set to 1.0 to "
+                       "disable CPU overcommitment",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_float],
+             "DEFAULT_VALUE": 16.0,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "novasched-ram-allocation-ratio",
+             "PROMPT": ("Enter the RAM overcommitment ratio. Set to 1.0 to "
+                        "disable RAM overcommitment"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_float],
+             "DEFAULT_VALUE": 1.5,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "novacompute-migrate-protocol",
+             "PROMPT": ("Enter protocol which will be used for instance "
+                        "migration"),
+             "OPTION_LIST": ['tcp', 'ssh'],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": 'tcp',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-compute-manager",
+             "PROMPT": ("Enter the compute manager for nova "
+                        "migration"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "nova.compute.manager.ComputeManager",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_COMPUTE_MANAGER",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-ssl-cert",
+             "PROMPT": ("Enter the path to a PEM encoded certificate to be used "
+                        "on the https server, leave blank if one should be "
+                        "generated, this certificate should not require "
+                        "a passphrase"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": '',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_VNC_SSL_CERT",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-ssl-key",
+             "PROMPT": ("Enter the SSL keyfile corresponding to the certificate "
+                        "if one was entered"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_VNC_SSL_KEY",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-pci-alias",
+             "PROMPT": ("Enter the PCI passthrough array of hash in JSON style for controller eg. "
+                        "[{'vendor_id':'1234', 'product_id':'5678', "
+                        "'name':'default'}, {...}] "),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_PCI_ALIAS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-pci-passthrough-whitelist",
+             "PROMPT": ("Enter the PCI passthrough whitelist as array of hash in JSON style for "
+                        "controller eg. "
+                        "[{'vendor_id':'1234', 'product_id':'5678', "
+                        "'name':'default'}, {...}]"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [],
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_NOVA_PCI_PASSTHROUGH_WHITELIST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "nova-libvirt-virt-type",
+             "PROMPT": (
+                 "The nova hypervisor that should be used. Either qemu or kvm."
+             ),
+             "OPTION_LIST": ['qemu', 'kvm'],
+             "DEFAULT_VALUE": '%{::default_hypervisor}',
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_NOVA_LIBVIRT_VIRT_TYPE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+        ],
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, nova_params)
+
+    nova_groups = [
+        {"GROUP_NAME": "NOVA",
+         "DESCRIPTION": "Nova Options",
+         "PRE_CONDITION": "CONFIG_NOVA_INSTALL",
+         "PRE_CONDITION_MATCH": "y",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in nova_groups:
+        params = nova_params[group["GROUP_NAME"]]
+        controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_NOVA_INSTALL'] != 'y':
+        return
+
+    if controller.CONF['CONFIG_NEUTRON_INSTALL'] == 'y':
+        network_title = ('Preparing OpenStack Network-related '
+                         'Nova entries')
+        network_function = create_neutron_manifest
+
+    novaapisteps = [
+        {'title': 'Preparing Nova API entries',
+         'functions': [create_api_manifest]},
+        {'title': 'Creating ssh keys for Nova migration',
+         'functions': [create_ssh_keys]},
+        {'title': 'Gathering ssh host keys for Nova migration',
+         'functions': [gather_host_keys]},
+        {'title': 'Preparing Nova Compute entries',
+         'functions': [create_compute_manifest]},
+        {'title': 'Preparing Nova Scheduler entries',
+         'functions': [create_sched_manifest]},
+        {'title': 'Preparing Nova VNC Proxy entries',
+         'functions': [create_vncproxy_manifest]},
+        {'title': network_title,
+         'functions': [network_function]},
+        {'title': 'Preparing Nova Common entries',
+         'functions': [create_common_manifest]},
+    ]
+
+    controller.addSequence("Installing OpenStack Nova API", [], [],
+                           novaapisteps)
+
+
+# ------------------------ Step Functions -------------------------
+
+def create_ssh_keys(config, messages):
+    migration_key = os.path.join(basedefs.VAR_DIR, 'nova_migration_key')
+    # Generate key if it does not exist
+    if not os.path.exists(migration_key):
+        local = utils.ScriptRunner()
+        local.append('ssh-keygen -t rsa -b 2048 -f "%s" -N ""' % migration_key)
+        local.execute()
+
+    with open(migration_key) as fp:
+        secret = fp.read().strip()
+    with open('%s.pub' % migration_key) as fp:
+        public = fp.read().strip()
+
+    config['NOVA_MIGRATION_KEY_TYPE'] = 'ssh-rsa'
+    config['NOVA_MIGRATION_KEY_PUBLIC'] = public.split()[1]
+    config['NOVA_MIGRATION_KEY_SECRET'] = secret
+
+
+def gather_host_keys(config, messages):
+    global compute_hosts
+
+    for host in compute_hosts:
+        local = utils.ScriptRunner()
+        local.append('ssh-keyscan %s' % host)
+        retcode, hostkey = local.execute()
+        config['HOST_KEYS_%s' % host] = hostkey
+
+
+def create_api_manifest(config, messages):
+    # Since this step is running first, let's create necessary variables here
+    # and make them global
+    global compute_hosts, network_hosts
+    com_var = config.get("CONFIG_COMPUTE_HOSTS", "")
+    compute_hosts = set([i.strip() for i in com_var.split(",") if i.strip()])
+    net_var = config.get("CONFIG_NETWORK_HOSTS", "")
+    network_hosts = set([i.strip() for i in net_var.split(",") if i.strip()])
+
+    # This is a hack around us needing to generate the neutron metadata
+    # password, but the nova puppet plugin uses the existence of that
+    # password to determine whether or not to configure neutron metadata
+    # proxy support. So the nova_api.pp template needs to be set to None
+    # to disable metadata support if neutron is not being installed.
+    if config['CONFIG_NEUTRON_INSTALL'] != 'y':
+        config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = None
+    else:
+        config['CONFIG_NEUTRON_METADATA_PW_UNQUOTED'] = "%s" % config['CONFIG_NEUTRON_METADATA_PW']
+
+    fw_details = dict()
+    key = "nova_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "nova api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8773', '8774', '8775', '8778']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_NOVA_API_RULES'] = fw_details
+
+
+def create_compute_manifest(config, messages):
+    global compute_hosts, network_hosts
+
+    if config["CONFIG_HORIZON_SSL"] == 'y':
+        config["CONFIG_VNCPROXY_PROTOCOL"] = "https"
+    else:
+        config["CONFIG_VNCPROXY_PROTOCOL"] = "http"
+
+    migrate_protocol = config['CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL']
+    if migrate_protocol == 'ssh':
+        config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
+            'qemu+ssh://nova@%s/system?no_verify=1&'
+            'keyfile=/etc/nova/ssh/nova_migration_key'
+        )
+    else:
+        config['CONFIG_NOVA_COMPUTE_MIGRATE_URL'] = (
+            'qemu+tcp://nova@%s/system'
+        )
+
+    ssh_keys_details = {}
+    for host in compute_hosts:
+        try:
+            hostname, aliases, addrs = socket.gethostbyaddr(host)
+        except socket.herror:
+            hostname, aliases, addrs = (host, [], [])
+
+        for hostkey in config['HOST_KEYS_%s' % host].split('\n'):
+            hostkey = hostkey.strip()
+            if not hostkey:
+                continue
+
+            _, host_key_type, host_key_data = hostkey.split()
+            key = "%s.%s" % (host_key_type, hostname)
+            ssh_keys_details.setdefault(key, {})
+            ssh_keys_details[key]['ensure'] = 'present'
+            ssh_keys_details[key]['host_aliases'] = aliases + addrs
+            ssh_keys_details[key]['key'] = host_key_data
+            ssh_keys_details[key]['type'] = host_key_type
+
+    config['SSH_KEYS'] = ssh_keys_details
+
+    if config['CONFIG_VMWARE_BACKEND'] == 'y':
+        vcenters = [i.strip() for i in
+                    config['CONFIG_VCENTER_CLUSTER_NAMES'].split(',')
+                    if i.strip()]
+        if not vcenters:
+            raise exceptions.ParamValidationError(
+                "Please specify at least one VMware vCenter cluster in"
+                " CONFIG_VCENTER_CLUSTER_NAMES"
+            )
+        if len(vcenters) != len(compute_hosts):
+            if len(vcenters) > 1:
+                raise exceptions.ParamValidationError(
+                    "Number of vmware clusters %s is not same"
+                    " as number of nova computes %s", (vcenters, compute_hosts)
+                )
+            else:
+                vcenters = len(compute_hosts) * [vcenters[0]]
+        vmware_clusters = dict(zip(compute_hosts, vcenters))
+        config['CONFIG_VCENTER_CLUSTERS'] = vmware_clusters
+
+    for host in compute_hosts:
+        if config['CONFIG_IRONIC_INSTALL'] == 'y':
+            cm = 'ironic.nova.compute.manager.ClusteredComputeManager'
+            config['CONFIG_NOVA_COMPUTE_MANAGER'] = cm
+
+        fw_details = dict()
+        cf_fw_qemu_mig_key = "FIREWALL_NOVA_QEMU_MIG_RULES_%s" % host
+        for c_host in compute_hosts:
+            key = "nova_qemu_migration_%s_%s" % (host, c_host)
+            fw_details.setdefault(key, {})
+            fw_details[key]['host'] = "%s" % c_host
+            fw_details[key]['service_name'] = "nova qemu migration"
+            fw_details[key]['chain'] = "INPUT"
+            fw_details[key]['ports'] = ['16509', '49152-49215']
+            fw_details[key]['proto'] = "tcp"
+
+        config[cf_fw_qemu_mig_key] = fw_details
+
+        if config['CONFIG_CEILOMETER_INSTALL'] == 'y':
+            if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+                ssl_cert_file = config['CONFIG_CEILOMETER_SSL_CERT'] = (
+                    '/etc/pki/tls/certs/ssl_amqp_ceilometer.crt'
+                )
+                ssl_key_file = config['CONFIG_CEILOMETER_SSL_KEY'] = (
+                    '/etc/pki/tls/private/ssl_amqp_ceilometer.key'
+                )
+                ssl_host = config['CONFIG_CONTROLLER_HOST']
+                service = 'ceilometer'
+                generate_ssl_cert(config, host, service, ssl_key_file,
+                                  ssl_cert_file)
+
+        fw_details = dict()
+        key = "nova_compute"
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % config['CONFIG_CONTROLLER_HOST']
+        fw_details[key]['service_name'] = "nova compute"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['5900-5999']
+        fw_details[key]['proto'] = "tcp"
+        config['FIREWALL_NOVA_COMPUTE_RULES'] = fw_details
+
+
+def create_sched_manifest(config, messages):
+    if config['CONFIG_IRONIC_INSTALL'] == 'y':
+        ram_alloc = '1.0'
+        config['CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO'] = ram_alloc
+
+
+def create_vncproxy_manifest(config, messages):
+    if config["CONFIG_HORIZON_SSL"] == 'y':
+        if config["CONFIG_VNC_SSL_CERT"]:
+            ssl_cert_file = config["CONFIG_VNC_SSL_CERT"]
+            ssl_key_file = config["CONFIG_VNC_SSL_KEY"]
+            if not os.path.exists(ssl_cert_file):
+                raise exceptions.ParamValidationError(
+                    "The file %s doesn't exist" % ssl_cert_file)
+
+            if not os.path.exists(ssl_key_file):
+                raise exceptions.ParamValidationError(
+                    "The file %s doesn't exist" % ssl_key_file)
+
+            final_cert = open(ssl_cert_file, 'rt').read()
+            final_key = open(ssl_key_file, 'rt').read()
+            deliver_ssl_file(final_cert, ssl_cert_file, config['CONFIG_CONTROLLER_HOST'])
+            deliver_ssl_file(final_key, ssl_key_file, config['CONFIG_CONTROLLER_HOST'])
+
+        else:
+            config["CONFIG_VNC_SSL_CERT"] = '/etc/pki/tls/certs/ssl_vnc.crt'
+            config["CONFIG_VNC_SSL_KEY"] = '/etc/pki/tls/private/ssl_vnc.key'
+            ssl_key_file = config["CONFIG_VNC_SSL_KEY"]
+            ssl_cert_file = config["CONFIG_VNC_SSL_CERT"]
+            ssl_host = config['CONFIG_CONTROLLER_HOST']
+            service = 'vnc'
+            generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                              ssl_cert_file)
+
+
+def create_common_manifest(config, messages):
+    global compute_hosts, network_hosts
+
+    network_type = (config['CONFIG_NEUTRON_INSTALL'] == "y" and
+                    'neutron' or 'nova')
+    network_multi = len(network_hosts) > 1
+    dbacces_hosts = set([config.get('CONFIG_CONTROLLER_HOST')])
+    dbacces_hosts |= network_hosts
+
+    for host in filtered_hosts(config):
+        pw_in_sqlconn = False
+        host = host.strip()
+
+        if host in compute_hosts and host not in dbacces_hosts:
+            # we should omit password in case we are installing only
+            # nova-compute to the host
+            perms = "nova"
+            pw_in_sqlconn = False
+        else:
+            perms = "nova:%s" % config['CONFIG_NOVA_DB_PW']
+            pw_in_sqlconn = True
+
+        mariadb_host_url = config['CONFIG_MARIADB_HOST_URL']
+        sqlconn = "mysql+pymysql://%s@%s/nova" % (perms, mariadb_host_url)
+        if pw_in_sqlconn:
+            config['CONFIG_NOVA_SQL_CONN_PW'] = sqlconn
+        else:
+            config['CONFIG_NOVA_SQL_CONN_NOPW'] = sqlconn
+
+        # for nova-network in multihost mode each compute host is metadata
+        # host otherwise we use api host
+        if (network_type == 'nova' and network_multi and
+                host in compute_hosts):
+            metadata = host
+        else:
+            metadata = config['CONFIG_CONTROLLER_HOST']
+        config['CONFIG_NOVA_METADATA_HOST'] = metadata
+
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        nova_hosts = compute_hosts
+        nova_hosts |= set([config.get('CONFIG_CONTROLLER_HOST')])
+        ssl_cert_file = config['CONFIG_NOVA_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_nova.crt'
+        )
+        ssl_key_file = config['CONFIG_NOVA_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_nova.key'
+        )
+        service = 'nova'
+        for host in nova_hosts:
+            generate_ssl_cert(config, host, service,
+                              ssl_key_file, ssl_cert_file)
+
+
+def create_neutron_manifest(config, messages):
+    if config['CONFIG_IRONIC_INSTALL'] == 'y':
+        virt_driver = 'nova.virt.firewall.NoopFirewallDriver'
+        config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver
+    else:
+        virt_driver = 'nova.virt.libvirt.vif.LibvirtGenericVIFDriver'
+        config['CONFIG_NOVA_LIBVIRT_VIF_DRIVER'] = virt_driver

+ 78 - 0
packstack/plugins/openstack_client_400.py

@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures an OpenStack Client
+"""
+
+import os
+
+from packstack.installer import utils
+
+# ------------- OpenStack Client Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Client"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    group = {"GROUP_NAME": "NOVACLIENT",
+             "DESCRIPTION": "NOVACLIENT Config parameters",
+             "PRE_CONDITION": "CONFIG_CLIENT_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, [])
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_CLIENT_INSTALL'] != 'y':
+        return
+
+    osclientsteps = [
+        {'title': 'Preparing OpenStack Client entries',
+         'functions': [create_manifest]}
+    ]
+    controller.addSequence("Installing OpenStack Client", [], [],
+                           osclientsteps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    client_host = config['CONFIG_CONTROLLER_HOST'].strip()
+
+    server = utils.ScriptRunner(client_host)
+    server.append('echo $HOME')
+    rc, root_home = server.execute()
+    root_home = root_home.strip()
+
+    homedir = os.path.expanduser('~')
+    config['HOME_DIR'] = homedir
+
+    uname, gname = utils.get_current_username()
+    config['NO_ROOT_USER'], config['NO_ROOT_GROUP'] = uname, gname
+
+    no_root_allinone = (client_host == utils.get_localhost_ip() and
+                        root_home != homedir)
+    config['NO_ROOT_USER_ALLINONE'] = no_root_allinone and True or False
+
+    msg = ("File %s/keystonerc_admin has been created on OpenStack client host"
+           " %s. To use the command line tools you need to source the file.")
+    messages.append(msg % (root_home, client_host))
+
+    if no_root_allinone:
+        msg = ("Copy of keystonerc_admin file has been created for non-root "
+               "user in %s.")
+        messages.append(msg % homedir)

+ 103 - 0
packstack/plugins/panko_820.py

@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Panko
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Panko Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Panko"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    panko_params = {
+        "PANKO": [
+            {"CONF_NAME": "CONFIG_PANKO_DB_PW",
+             "CMD_OPTION": "panko-db-passwd",
+             "PROMPT": "Enter the password for Panko DB access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+            {"CONF_NAME": "CONFIG_PANKO_KS_PW",
+             "CMD_OPTION": "panko-ks-passwd",
+             "PROMPT": "Enter the password for the Panko Keystone access",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False}
+        ]
+    }
+
+    update_params_usage(basedefs.PACKSTACK_DOC, panko_params)
+
+    def use_panko(config):
+        return (config['CONFIG_CEILOMETER_INSTALL'] == 'y' and
+                config['CONFIG_PANKO_INSTALL'] == 'y')
+
+    panko_groups = [
+        {"GROUP_NAME": "PANKO",
+         "DESCRIPTION": "Panko Config parameters",
+         "PRE_CONDITION": use_panko,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in panko_groups:
+        paramList = panko_params[group["GROUP_NAME"]]
+        controller.addGroup(group, paramList)
+
+
+def initSequences(controller):
+    if (controller.CONF['CONFIG_PANKO_INSTALL'] != 'y' or
+       controller.CONF['CONFIG_CEILOMETER_INSTALL'] != 'y'):
+        return
+
+    steps = [{'title': 'Preparing Panko entries',
+              'functions': [create_manifest]}]
+    controller.addSequence("Installing OpenStack Panko", [], [],
+                           steps)
+
+
+# -------------------------- step functions --------------------------
+
+def create_manifest(config, messages):
+    fw_details = dict()
+    key = "panko_api"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "panko-api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8779']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_PANKO_RULES'] = fw_details

+ 63 - 0
packstack/plugins/postscript_951.py

@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Plugin responsible for post-installation configuration
+"""
+
+from packstack.installer import utils
+from packstack.installer import basedefs
+
+
+# ------------- Postscript Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "Postscript"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    group = {"GROUP_NAME": "POSTSCRIPT",
+             "DESCRIPTION": "POSTSCRIPT Config parameters",
+             "PRE_CONDITION": lambda x: 'yes',
+             "PRE_CONDITION_MATCH": "yes",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, [])
+
+
+def initSequences(controller):
+    config = controller.CONF
+    postscript_steps = []
+    if (config['CONFIG_PROVISION_TEMPEST'] == "y" and
+            config['CONFIG_RUN_TEMPEST'] == "y"):
+        postscript_steps.append(
+            {'title': 'Running Tempest',
+             'functions': [run_tempest]}
+        )
+    controller.addSequence("Running post install scripts", [], [],
+                           postscript_steps)
+
+
+# -------------------------- step functions --------------------------
+
+def run_tempest(config, messages):
+    logfile = basedefs.DIR_LOG + "/tempest.log"
+    print("Running Tempest on %s" % config['CONFIG_TEMPEST_HOST'])
+    server = utils.ScriptRunner(config['CONFIG_TEMPEST_HOST'])
+    server.append('pushd /var/lib/tempest')
+    server.append('tempest run --regex \'(%s)\' --concurrency 2  > %s'
+                  % (config['CONFIG_RUN_TEMPEST_TESTS'].replace(' ', '|'),
+                     logfile))
+    server.append('popd')
+    server.execute()

File diff suppressed because it is too large
+ 1497 - 0
packstack/plugins/prescript_000.py


+ 353 - 0
packstack/plugins/provision_700.py

@@ -0,0 +1,353 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Provisioning for demo usage and testing
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Provision Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Provision"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+DEMO_IMAGE_NAME = 'cirros'
+DEMO_IMAGE_URL = (
+    'http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img'
+)
+DEMO_IMAGE_SSH_USER = 'cirros'
+DEMO_IMAGE_FORMAT = 'qcow2'
+UEC_IMAGE_NAME = 'cirros-uec'
+UEC_IMAGE_KERNEL_URL = (
+    'http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-kernel'
+)
+UEC_IMAGE_RAMDISK_URL = (
+    'http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-initramfs'
+)
+UEC_IMAGE_DISK_URL = (
+    'http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img'
+)
+
+
+def initConfig(controller):
+
+    def process_tempest(param, param_name, config=None):
+        if param == "":
+            # In case of multinode installs by default we deploy
+            # Tempest on network node
+            return config['CONFIG_NETWORK_HOSTS'].split(',')[0]
+        return param
+
+    conf_params = {
+        "PROVISION_INIT": [
+            {"CMD_OPTION": "provision-demo",
+             "PROMPT": ("Would you like to provision for demo usage "
+                        "and testing"),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_DEMO",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-tempest",
+             "PROMPT": ("Would you like to configure Tempest (OpenStack test "
+                        "suite). Note that provisioning is only supported for "
+                        "all-in-one installations."),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "n",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_TEMPEST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "PROVISION_DEMO": [
+            {"CMD_OPTION": "provision-demo-floatrange",
+             "PROMPT": "Enter the network address for the floating IP subnet",
+             "OPTION_LIST": False,
+             "VALIDATORS": False,
+             "DEFAULT_VALUE": "172.24.4.0/24",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_DEMO_FLOATRANGE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-image-name",
+             "PROMPT": "Enter the name to be assigned to the demo image",
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": DEMO_IMAGE_NAME,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_IMAGE_NAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-image-url",
+             "PROMPT": ("Enter the location of an image to be loaded "
+                        "into Glance"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": DEMO_IMAGE_URL,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_IMAGE_URL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-image-format",
+             "PROMPT": ("Enter the format of the demo image"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": DEMO_IMAGE_FORMAT,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_IMAGE_FORMAT",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-image-ssh-user",
+             "PROMPT": ("Enter the name of a user to use when connecting "
+                        "to the demo image via ssh"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": DEMO_IMAGE_SSH_USER,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_IMAGE_SSH_USER",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-uec-image-name",
+             "PROMPT": "Enter the name to be assigned to the uec image used for tempest",
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": UEC_IMAGE_NAME,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_UEC_IMAGE_NAME",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-uec-kernel-url",
+             "PROMPT": ("Enter the location of a uec kernel to be loaded "
+                        "into Glance"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": UEC_IMAGE_KERNEL_URL,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_UEC_IMAGE_KERNEL_URL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-uec-ramdisk-url",
+             "PROMPT": ("Enter the location of a uec ramdisk to be loaded "
+                        "into Glance"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": UEC_IMAGE_RAMDISK_URL,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_UEC_IMAGE_RAMDISK_URL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-uec-disk-url",
+             "PROMPT": ("Enter the location of a uec disk image to be loaded "
+                        "into Glance"),
+             "OPTION_LIST": False,
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": UEC_IMAGE_DISK_URL,
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_UEC_IMAGE_DISK_URL",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+        ],
+
+        "PROVISION_TEMPEST": [
+            {"CMD_OPTION": "tempest-host",
+             "PROMPT": "Enter the host where to deploy Tempest",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_ssh],
+             "DEFAULT_VALUE": "",
+             "PROCESSORS": [process_tempest],
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_TEMPEST_HOST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-tempest-user",
+             "PROMPT": ("Enter the name of the Tempest Provisioning user "
+                        "(if blank, Tempest will be configured in a "
+                        "standalone mode) "),
+             "OPTION_LIST": False,
+             "VALIDATORS": False,
+             "DEFAULT_VALUE": "",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_TEMPEST_USER",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-tempest-user-passwd",
+             "PROMPT": "Enter the password for the Tempest Provisioning user",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "PW_PLACEHOLDER",
+             "PROCESSORS": [processors.process_password],
+             "MASK_INPUT": True,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": "CONFIG_PROVISION_TEMPEST_USER_PW",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": True,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "provision-tempest-floatrange",
+             "PROMPT": "Enter the network address for the floating IP subnet",
+             "OPTION_LIST": False,
+             "VALIDATORS": False,
+             "DEFAULT_VALUE": "172.24.4.0/24",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_TEMPEST_FLOATRANGE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "run-tempest",
+             "PROMPT": ("Do you wish to run tempest?"),
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "n",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_RUN_TEMPEST",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "run-tempest-tests",
+             "PROMPT": ("What tempest tests should run ?"
+                        " (If blank, Tempest will run smoke tests)"),
+             "OPTION_LIST": [],
+             "VALIDATORS": False,
+             "DEFAULT_VALUE": "smoke",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_RUN_TEMPEST_TESTS",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False}
+        ],
+
+        "PROVISION_OVS_BRIDGE": [
+            {"CMD_OPTION": "provision-ovs-bridge",
+             "PROMPT": "Would you like to configure the external ovs bridge",
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_PROVISION_OVS_BRIDGE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE']},
+        ],
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, conf_params)
+
+    def check_provisioning_demo(config):
+        return (config.get('CONFIG_PROVISION_DEMO', 'n') == 'y')
+
+    def check_provisioning_tempest(config):
+        return (config.get('CONFIG_PROVISION_TEMPEST', 'n') == 'y')
+
+    def allow_all_in_one_ovs_bridge(config):
+        return (config['CONFIG_NEUTRON_INSTALL'] == 'y')
+
+    conf_groups = [
+        {"GROUP_NAME": "PROVISION_INIT",
+         "DESCRIPTION": "Provisioning demo config",
+         "PRE_CONDITION": lambda x: 'yes',
+         "PRE_CONDITION_MATCH": "yes",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "PROVISION_DEMO",
+         "DESCRIPTION": "Provisioning demo config",
+         "PRE_CONDITION": check_provisioning_demo,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "PROVISION_TEMPEST",
+         "DESCRIPTION": "Provisioning tempest config",
+         "PRE_CONDITION": check_provisioning_tempest,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+
+        {"GROUP_NAME": "PROVISION_OVS_BRIDGE",
+         "DESCRIPTION": "Provisioning all-in-one ovs bridge config",
+         "PRE_CONDITION": allow_all_in_one_ovs_bridge,
+         "PRE_CONDITION_MATCH": True,
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in conf_groups:
+        paramList = conf_params[group["GROUP_NAME"]]
+        controller.addGroup(group, paramList)
+
+    # Due to group checking some parameters might not be initialized, but
+    # provision.pp needs them all. So we will initialize them with default
+    # values
+    params = [
+        controller.getParamByName('CONFIG_PROVISION_OVS_BRIDGE')
+    ]
+    for param in params:
+        value = controller.CONF.get(param.CONF_NAME, param.DEFAULT_VALUE)
+        controller.CONF[param.CONF_NAME] = value
+
+
+def initSequences(controller):
+    config = controller.CONF

+ 279 - 0
packstack/plugins/puppet_950.py

@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Puppet
+"""
+
+import sys
+import logging
+import os
+import time
+
+from packstack.installer import utils
+from packstack.installer import basedefs
+from packstack.installer.exceptions import PuppetError
+from packstack.installer.exceptions import ScriptRuntimeError
+from packstack.installer.utils import split_hosts
+
+from packstack.modules.common import filtered_hosts
+from packstack.modules.ospluginutils import appendManifestFile
+from packstack.modules.ospluginutils import generateHieraDataFile
+from packstack.modules.ospluginutils import getManifestTemplate
+from packstack.modules.ospluginutils import manifestfiles
+from packstack.modules.puppet import validate_logfile
+from packstack.modules.puppet import scan_logfile
+
+
+# ------------- Puppet Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "Puppet"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+PUPPET_DIR = os.environ.get('PACKSTACK_PUPPETDIR',
+                            '/usr/share/openstack-puppet/')
+MODULE_DIR = os.path.join(PUPPET_DIR, 'modules')
+
+
+def initConfig(controller):
+    group = {"GROUP_NAME": "PUPPET",
+             "DESCRIPTION": "Puppet Config parameters",
+             "PRE_CONDITION": lambda x: 'yes',
+             "PRE_CONDITION_MATCH": "yes",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, [])
+
+
+def initSequences(controller):
+    puppetpresteps = [
+        {'title': 'Clean Up', 'functions': [run_cleanup]},
+    ]
+    controller.insertSequence("Clean Up", [], [], puppetpresteps, index=0)
+
+    puppetsteps = [
+        {'title': 'Preparing Puppet manifests',
+            'functions': [prepare_puppet_modules]},
+        {'title': 'Copying Puppet modules and manifests',
+            'functions': [copy_puppet_modules]},
+        {'title': 'Applying Puppet manifests',
+            'functions': [apply_puppet_manifest]},
+        {'title': 'Finalizing',
+            'functions': [finalize]}
+    ]
+    controller.addSequence("Puppet", [], [], puppetsteps)
+
+
+# ------------------------- helper functions -------------------------
+
+def wait_for_puppet(currently_running, messages):
+    log_len = 0
+    twirl = ["-", "\\", "|", "/"]
+    while currently_running:
+        for hostname, finished_logfile in currently_running:
+            log_file = os.path.splitext(os.path.basename(finished_logfile))[0]
+            if len(log_file) > log_len:
+                log_len = len(log_file)
+            if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
+                twirl = twirl[-1:] + twirl[:-1]
+                sys.stdout.write(("\rTesting if puppet apply is finished: %s"
+                                 % log_file).ljust(40 + log_len))
+                sys.stdout.write("[ %s ]" % twirl[0])
+                sys.stdout.flush()
+            try:
+                # Once a remote puppet run has finished, we retrieve the log
+                # file and check it for errors
+                local_server = utils.ScriptRunner()
+                log = os.path.join(basedefs.PUPPET_MANIFEST_DIR,
+                                   os.path.basename(finished_logfile))
+                log = log.replace(".finished", ".log")
+                local_server.append('scp -o StrictHostKeyChecking=no '
+                                    '-o UserKnownHostsFile=/dev/null '
+                                    'root@[%s]:%s %s'
+                                    % (hostname, finished_logfile, log))
+                # To not pollute logs we turn of logging of command execution
+                local_server.execute(log=False)
+
+                # If we got to this point the puppet apply has finished
+                currently_running.remove((hostname, finished_logfile))
+
+                # clean off the last "testing apply" msg
+                if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
+                    sys.stdout.write(("\r").ljust(45 + log_len))
+
+            except ScriptRuntimeError:
+                # the test raises an exception if the file doesn't exist yet
+                # TO-DO: We need to start testing 'e' for unexpected exceptions
+                time.sleep(3)
+                continue
+
+            # check log file for relevant notices
+            messages.extend(scan_logfile(log))
+
+            # check the log file for errors
+            sys.stdout.write('\r')
+            try:
+                validate_logfile(log)
+                state = utils.state_message('%s:' % log_file, 'DONE', 'green')
+                sys.stdout.write('%s\n' % state)
+                sys.stdout.flush()
+            except PuppetError:
+                state = utils.state_message('%s:' % log_file, 'ERROR', 'red')
+                sys.stdout.write('%s\n' % state)
+                sys.stdout.flush()
+                raise
+
+
+# -------------------------- step functions --------------------------
+
+def run_cleanup(config, messages):
+    localserver = utils.ScriptRunner()
+    localserver.append("rm -rf %s/*pp" % basedefs.PUPPET_MANIFEST_DIR)
+    localserver.execute()
+
+
+def copy_puppet_modules(config, messages):
+    os_modules = ' '.join(('aodh', 'apache', 'ceilometer', 'certmonger',
+                           'cinder', 'concat', 'firewall', 'glance',
+                           'gnocchi', 'heat', 'horizon', 'inifile', 'ironic',
+                           'keystone', 'magnum', 'manila', 'memcached', 'mongodb',
+                           'mysql', 'neutron', 'nova', 'nssdb', 'openstack',
+                           'openstacklib', 'oslo', 'packstack', 'panko', 'rabbitmq',
+                           'redis', 'remote', 'rsync', 'sahara', 'ssh',
+                           'stdlib', 'swift', 'sysctl', 'tempest', 'trove',
+                           'vcsrepo', 'vswitch', 'xinetd', ))
+
+    # write puppet manifest to disk
+    manifestfiles.writeManifests()
+    # write hieradata file to disk
+    generateHieraDataFile()
+
+    server = utils.ScriptRunner()
+    for hostname in filtered_hosts(config):
+        host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
+        # copy hiera defaults.yaml file
+        server.append("cd %s" % basedefs.HIERADATA_DIR)
+        server.append("tar --dereference -cpzf - ../hieradata | "
+                      "ssh -o StrictHostKeyChecking=no "
+                      "-o UserKnownHostsFile=/dev/null "
+                      "root@%s tar -C %s -xpzf -" % (hostname, host_dir))
+
+        # copy Packstack manifests
+        server.append("cd %s/puppet" % basedefs.DIR_PROJECT_DIR)
+        server.append("cd %s" % basedefs.PUPPET_MANIFEST_DIR)
+        server.append("tar --dereference -cpzf - ../manifests | "
+                      "ssh -o StrictHostKeyChecking=no "
+                      "-o UserKnownHostsFile=/dev/null "
+                      "root@%s tar -C %s -xpzf -" % (hostname, host_dir))
+
+        # copy resources
+        resources = config.get('RESOURCES', {})
+        for path, localname in resources.get(hostname, []):
+            server.append("scp -o StrictHostKeyChecking=no "
+                          "-o UserKnownHostsFile=/dev/null "
+                          "%s root@[%s]:%s/resources/%s" %
+                          (path, hostname, host_dir, localname))
+
+        # copy Puppet modules required by Packstack
+        server.append("cd %s" % MODULE_DIR)
+        server.append("tar --dereference -cpzf - %s | "
+                      "ssh -o StrictHostKeyChecking=no "
+                      "-o UserKnownHostsFile=/dev/null "
+                      "root@%s tar -C %s -xpzf -" %
+                      (os_modules, hostname,
+                       os.path.join(host_dir, 'modules')))
+    server.execute()
+
+
+def apply_puppet_manifest(config, messages):
+    if config.get("DRY_RUN"):
+        return
+    currently_running = []
+    lastmarker = None
+    loglevel = ''
+    logcmd = False
+    if logging.root.level <= logging.DEBUG:
+        loglevel = '--debug'
+        logcmd = True
+    for manifest, marker in manifestfiles.getFiles():
+        # if the marker has changed then we don't want to proceed until
+        # all of the previous puppet runs have finished
+        if lastmarker is not None and lastmarker != marker:
+            wait_for_puppet(currently_running, messages)
+        lastmarker = marker
+
+        for hostname in filtered_hosts(config):
+            if "%s_" % hostname not in manifest:
+                continue
+
+            host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
+            print("Applying %s" % manifest)
+            server = utils.ScriptRunner(hostname)
+
+            man_path = os.path.join(config['HOST_DETAILS'][hostname]['tmpdir'],
+                                    basedefs.PUPPET_MANIFEST_RELATIVE,
+                                    manifest)
+
+            running_logfile = "%s.running" % man_path
+            finished_logfile = "%s.finished" % man_path
+            currently_running.append((hostname, finished_logfile))
+
+            server.append("touch %s" % running_logfile)
+            server.append("chmod 600 %s" % running_logfile)
+            server.append("export PACKSTACK_VAR_DIR=%s" % host_dir)
+            cmd = ("( flock %s/ps.lock "
+                   "puppet apply %s --modulepath %s/modules %s > %s "
+                   "2>&1 < /dev/null ; "
+                   "mv %s %s ) > /dev/null 2>&1 < /dev/null &"
+                   % (host_dir, loglevel, host_dir, man_path, running_logfile,
+                      running_logfile, finished_logfile))
+            server.append(cmd)
+            server.execute(log=logcmd)
+
+    # wait for outstanding puppet runs before exiting
+    wait_for_puppet(currently_running, messages)
+
+
+def prepare_puppet_modules(config, messages):
+    network_hosts = split_hosts(config['CONFIG_NETWORK_HOSTS'])
+    compute_hosts = split_hosts(config['CONFIG_COMPUTE_HOSTS'])
+
+    manifestdata = getManifestTemplate("controller")
+    manifestfile = "%s_controller.pp" % config['CONFIG_CONTROLLER_HOST']
+    appendManifestFile(manifestfile, manifestdata, marker='controller')
+
+    for host in network_hosts:
+        manifestdata = getManifestTemplate("network")
+        manifestfile = "%s_network.pp" % host
+        appendManifestFile(manifestfile, manifestdata, marker='network')
+
+    for host in compute_hosts:
+        manifestdata = getManifestTemplate("compute")
+        manifestfile = "%s_compute.pp" % host
+        appendManifestFile(manifestfile, manifestdata, marker='compute')
+
+
+def finalize(config, messages):
+    for hostname in filtered_hosts(config):
+        server = utils.ScriptRunner(hostname)
+        server.append("installed=$(rpm -q kernel --last | head -n1 | "
+                      "sed 's/kernel-\([a-z0-9\.\_\-]*\).*/\\1/g')")
+        server.append("loaded=$(uname -r | head -n1)")
+        server.append('[ "$loaded" == "$installed" ]')
+        try:
+            rc, out = server.execute()
+        except ScriptRuntimeError:
+            messages.append('Because of the kernel update the host %s '
+                            'requires reboot.' % hostname)

+ 108 - 0
packstack/plugins/sahara_900.py

@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Sahara
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------------ Sahara installer initialization ------------------
+
+PLUGIN_NAME = "OS-Sahara"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, "blue")
+
+
+def initConfig(controller):
+    params = [
+        {"CONF_NAME": "CONFIG_SAHARA_DB_PW",
+         "CMD_OPTION": "sahara-db-passwd",
+         "PROMPT": "Enter the password to use for Sahara to access the DB",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_SAHARA_KS_PW",
+         "CMD_OPTION": "sahara-ks-passwd",
+         "PROMPT": "Enter the password for Sahara Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "SAHARA",
+             "DESCRIPTION": "Sahara Config parameters",
+             "PRE_CONDITION": "CONFIG_SAHARA_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    conf = controller.CONF
+    if conf["CONFIG_SAHARA_INSTALL"] != 'y':
+        return
+
+    saharasteps = [
+        {"title": "Preparing Sahara entries",
+         "functions": [create_manifest]},
+    ]
+    controller.addSequence("Installing Sahara", [], [], saharasteps)
+
+
+# -------------------------- step functions --------------------------
+def create_manifest(config, messages):
+    if config['CONFIG_UNSUPPORTED'] != 'y':
+        config['CONFIG_SAHARA_HOST'] = config['CONFIG_CONTROLLER_HOST']
+
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_host = config['CONFIG_SAHARA_HOST']
+        ssl_cert_file = config['CONFIG_SAHARA_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_sahara.crt'
+        )
+        ssl_key_file = config['CONFIG_SAHARA_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_sahara.key'
+        )
+        service = 'sahara'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    fw_details = dict()
+    key = "sahara-api"
+    fw_details.setdefault(key, {})
+    fw_details[key]["host"] = "ALL"
+    fw_details[key]["service_name"] = "sahara api"
+    fw_details[key]["chain"] = "INPUT"
+    fw_details[key]["ports"] = ["8386"]
+    fw_details[key]["proto"] = "tcp"
+    config["FIREWALL_SAHARA_CFN_RULES"] = fw_details

+ 294 - 0
packstack/plugins/ssl_001.py

@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Plugin responsible for managing SSL options
+"""
+import os
+
+from OpenSSL import crypto
+from socket import gethostname
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- SSL Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "SSL"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = {
+        "SSL": [
+            {"CMD_OPTION": "ssl-cacert-file",
+             "PROMPT": ("Enter the filename of the SSL CAcertificate, if the"
+                        " CONFIG_SSL_CACERT_SELFSIGN is set to y the path "
+                        "will be CONFIG_SSL_CERT_DIR/certs/selfcert.crt"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "/etc/pki/tls/certs/selfcert.crt",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_SSL_CACERT_FILE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "ssl-cacert-key-file",
+             "PROMPT": ("Enter the filename of the SSL CAcertificate Key file"
+                        ", if the CONFIG_SSL_CACERT_SELFSIGN is set to y the "
+                        "path will be CONFIG_SSL_CERT_DIR/keys/selfkey.key"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "/etc/pki/tls/private/selfkey.key",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_SSL_CACERT_KEY_FILE",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "ssl-cert-dir",
+             "PROMPT": ("Enter the path to use to store generated SSL certificates in"),
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty,
+                            validators.validate_writeable_directory],
+             "DEFAULT_VALUE": "~/packstackca/",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": True,
+             "CONF_NAME": "CONFIG_SSL_CERT_DIR",
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "ssl-cacert-selfsign",
+             "PROMPT": "Should packstack use selfsigned CAcert.",
+             "OPTION_LIST": ["y", "n"],
+             "VALIDATORS": [validators.validate_options],
+             "DEFAULT_VALUE": "y",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CACERT_SELFSIGN',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False},
+
+            {"CMD_OPTION": "ssl-cert-subject-country",
+             "PROMPT": "Enter the ssl certificates subject country.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "--",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_C',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_C']},
+
+            {"CMD_OPTION": "ssl-cert-subject-state",
+             "PROMPT": "Enter the ssl certificates subject state.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "State",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_ST',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_ST']},
+
+            {"CMD_OPTION": "ssl-cert-subject-location",
+             "PROMPT": "Enter the ssl certificate subject location.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "City",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_L',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_L']},
+
+            {"CMD_OPTION": "ssl-cert-subject-organization",
+             "PROMPT": "Enter the ssl certificate subject organization.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "openstack",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_O',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_O']},
+
+            {"CMD_OPTION": "ssl-cert-subject-organizational-unit",
+             "PROMPT": "Enter the ssl certificate subject organizational unit.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "packstack",
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_OU',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_OU']},
+
+            {"CMD_OPTION": "ssl-cert-subject-common-name",
+             "PROMPT": "Enter the ssl certificaate subject common name.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": gethostname(),
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_CN',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_CN']},
+
+            {"CMD_OPTION": "ssl-cert-subject-email",
+             "PROMPT": "Enter the ssl certificate subject admin email.",
+             "OPTION_LIST": [],
+             "VALIDATORS": [validators.validate_not_empty],
+             "DEFAULT_VALUE": "admin@%s" % gethostname(),
+             "MASK_INPUT": False,
+             "LOOSE_VALIDATION": False,
+             "CONF_NAME": 'CONFIG_SSL_CERT_SUBJECT_MAIL',
+             "USE_DEFAULT": False,
+             "NEED_CONFIRM": False,
+             "CONDITION": False,
+             "DEPRECATES": ['CONFIG_SELFSIGN_CACERT_SUBJECT_MAIL']},
+        ]
+    }
+    update_params_usage(basedefs.PACKSTACK_DOC, params)
+
+    groups = [
+        {"GROUP_NAME": "SSL",
+         "DESCRIPTION": "SSL Config parameters",
+         "PRE_CONDITION": lambda x: 'yes',
+         "PRE_CONDITION_MATCH": "yes",
+         "POST_CONDITION": False,
+         "POST_CONDITION_MATCH": True},
+    ]
+    for group in groups:
+        controller.addGroup(group, params[group['GROUP_NAME']])
+
+
+def initSequences(controller):
+    ssl_steps = [
+        {'title': 'Setting up CACERT',
+         'functions': [create_self_signed_cert]}
+    ]
+    controller.addSequence("Setting up SSL", [], [],
+                           ssl_steps)
+
+
+# ------------------------- helper functions -------------------------
+
+def create_self_signed_cert(config, messages):
+    """
+    OpenSSL wrapper to create selfsigned CA.
+    """
+
+    # for now hardcoded place for landing CACert file on servers
+    config['CONFIG_SSL_CACERT'] = '/etc/pki/tls/certs/packstack_cacert.crt'
+
+#    if (config['CONFIG_AMQP_ENABLE_SSL'] != 'y' and
+#       config["CONFIG_HORIZON_SSL"] != 'y'):
+    if config['CONFIG_AMQP_ENABLE_SSL'] != 'y':
+        return
+
+    config['CONFIG_SSL_CERT_DIR'] = os.path.expanduser(
+        config['CONFIG_SSL_CERT_DIR']
+    )
+
+    if not os.path.isdir(config['CONFIG_SSL_CERT_DIR']):
+        os.mkdir(config['CONFIG_SSL_CERT_DIR'])
+    certs = os.path.join(config['CONFIG_SSL_CERT_DIR'], 'certs')
+    if not os.path.isdir(certs):
+        os.mkdir(certs)
+    keys = os.path.join(config['CONFIG_SSL_CERT_DIR'], 'keys')
+    if not os.path.isdir(keys):
+        os.mkdir(keys)
+
+    if config['CONFIG_SSL_CACERT_SELFSIGN'] != 'y':
+        return
+
+    CERT_FILE = config['CONFIG_SSL_CACERT_FILE'] = (
+        '%s/certs/selfcert.crt' % config['CONFIG_SSL_CERT_DIR']
+    )
+    KEY_FILE = config['CONFIG_SSL_CACERT_KEY_FILE'] = (
+        '%s/keys/selfcert.crt' % config['CONFIG_SSL_CERT_DIR']
+    )
+    if not os.path.exists(CERT_FILE) or not os.path.exists(KEY_FILE):
+        # create a key pair
+        k = crypto.PKey()
+        k.generate_key(crypto.TYPE_RSA, 4096)
+
+        # create a self-signed cert
+        mail = config['CONFIG_SSL_CERT_SUBJECT_MAIL']
+        cert = crypto.X509()
+        subject = cert.get_subject()
+        subject.C = config['CONFIG_SSL_CERT_SUBJECT_C']
+        subject.ST = config['CONFIG_SSL_CERT_SUBJECT_ST']
+        subject.L = config['CONFIG_SSL_CERT_SUBJECT_L']
+        subject.O = config['CONFIG_SSL_CERT_SUBJECT_O']
+        subject.OU = config['CONFIG_SSL_CERT_SUBJECT_OU']
+        subject.CN = config['CONFIG_SSL_CERT_SUBJECT_CN']
+        subject.emailAddress = mail
+        cert.set_serial_number(1000)
+        cert.gmtime_adj_notBefore(0)
+        cert.gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60)
+        cert.set_issuer(cert.get_subject())
+        cert.set_pubkey(k)
+
+        # CA extensions
+        cert.add_extensions([
+            crypto.X509Extension("basicConstraints".encode('ascii'), False,
+                                 "CA:TRUE".encode('ascii')),
+            crypto.X509Extension("keyUsage".encode('ascii'), False,
+                                 "keyCertSign, cRLSign".encode('ascii')),
+            crypto.X509Extension("subjectKeyIdentifier".encode('ascii'), False,
+                                 "hash".encode('ascii'),
+                                 subject=cert),
+        ])
+
+        cert.add_extensions([
+            crypto.X509Extension(
+                "authorityKeyIdentifier".encode('ascii'), False,
+                "keyid:always".encode('ascii'), issuer=cert)
+        ])
+
+        cert.sign(k, 'sha1')
+
+        open((CERT_FILE), "wt").write(
+            crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
+        open((KEY_FILE), "wt").write(
+            crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
+
+        messages.append(
+            "%sNOTE%s : A selfsigned CA certificate was generated to be used "
+            "for ssl, you should still change it do subordinate CA cert. In "
+            "any case please save the contents of %s."
+            % (utils.COLORS['red'], utils.COLORS['nocolor'],
+                config['CONFIG_SSL_CERT_DIR']))

+ 326 - 0
packstack/plugins/swift_600.py

@@ -0,0 +1,326 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Swift
+"""
+
+import re
+import uuid
+import netaddr
+
+from packstack.installer import basedefs
+from packstack.installer import validators
+from packstack.installer import processors
+from packstack.installer.exceptions import ParamValidationError
+from packstack.installer import utils
+from packstack.installer.utils import split_hosts
+
+from packstack.modules.documentation import update_params_usage
+
+# ------------- Swift Packstack Plugin Initialization --------------
+
+PLUGIN_NAME = "OS-Swift"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+def initConfig(controller):
+    params = [
+        {"CMD_OPTION": "os-swift-ks-passwd",
+         "PROMPT": "Enter the password for the Swift Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_SWIFT_KS_PW",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-swift-storages",
+         "PROMPT": "Enter the Swift Storage devices e.g. /path/to/dev",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validate_storage],
+         "DEFAULT_VALUE": '',
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_SWIFT_STORAGES",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False,
+         "DEPRECATES": ['CONFIG_SWIFT_STORAGE_HOSTS']},
+
+        {"CMD_OPTION": "os-swift-storage-zones",
+         "PROMPT": ("Enter the number of swift storage zones, MUST be no "
+                    "bigger than the number of storage devices configured"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_integer],
+         "DEFAULT_VALUE": "1",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_SWIFT_STORAGE_ZONES",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-swift-storage-replicas",
+         "PROMPT": ("Enter the number of swift storage replicas, MUST be no "
+                    "bigger than the number of storage zones configured"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_integer],
+         "DEFAULT_VALUE": "1",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_SWIFT_STORAGE_REPLICAS",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-swift-storage-fstype",
+         "PROMPT": "Enter FileSystem type for storage nodes",
+         "OPTION_LIST": ['xfs', 'ext4'],
+         "VALIDATORS": [validators.validate_options],
+         "DEFAULT_VALUE": "ext4",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_SWIFT_STORAGE_FSTYPE",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-swift-hash",
+         "PROMPT": "Enter hash for Swift shared secret",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": uuid.uuid4().hex[:16],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "CONF_NAME": "CONFIG_SWIFT_HASH",
+         "USE_DEFAULT": True,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CMD_OPTION": "os-swift-storage-size",
+         "PROMPT": ("Enter the size of the storage device (eg. 2G, 2000M, "
+                    "2000000K)"),
+         "OPTION_LIST": [],
+         "VALIDATORS": [validate_storage_size],
+         "DEFAULT_VALUE": "2G",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": True,
+         "CONF_NAME": "CONFIG_SWIFT_STORAGE_SIZE",
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": False,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, params, sectioned=False)
+    group = {"GROUP_NAME": "OSSWIFT",
+             "DESCRIPTION": "OpenStack Swift Config parameters",
+             "PRE_CONDITION": "CONFIG_SWIFT_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+    controller.addGroup(group, params)
+
+
+def initSequences(controller):
+    if controller.CONF['CONFIG_SWIFT_INSTALL'] != 'y':
+        return
+
+    steps = [
+        {'title': 'Preparing Swift builder entries',
+         'functions': [create_builder_manifest]},
+        {'title': 'Preparing Swift proxy entries',
+         'functions': [create_proxy_manifest]},
+        {'title': 'Preparing Swift storage entries',
+         'functions': [create_storage_manifest]},
+    ]
+    controller.addSequence("Installing OpenStack Swift", [], [], steps)
+
+
+# ------------------------- helper functions -------------------------
+
+def validate_storage(param, options=None):
+    if not param:
+        return
+    if not param.startswith('/'):
+        raise ParamValidationError(
+            'Storage value has to be in format "/path/to/device".'
+        )
+
+
+def validate_storage_size(param, options=None):
+    match = re.match(r'\d+G|\d+M|\d+K', param, re.IGNORECASE)
+    if not match:
+        msg = 'Storage size not have a valid value (eg. 1G, 1000M, 1000000K)'
+        raise ParamValidationError(msg)
+
+
+def parse_devices(config):
+    """
+    Returns dict containing information about Swift storage devices.
+    """
+    devices = []
+    device_number = 0
+    num_zones = int(config["CONFIG_SWIFT_STORAGE_ZONES"])
+    for device in config["CONFIG_SWIFT_STORAGES"].split(","):
+        # we have to get rid of host part in case deprecated parameter
+        # CONFIG_SWIFT_STORAGE_HOSTS has been used
+        if ':' in device:
+            device = device.split(':')[1]
+        # device should be empty string in case only IP address has been used
+        try:
+            netaddr.IPAddress(device)
+        except Exception:
+            device = device.strip()
+        else:
+            device = ''
+
+        if not device:
+            continue
+        device_number += 1
+        zone = str((device_number % num_zones) + 1)
+        devices.append({'device': device, 'zone': zone,
+                        'device_name': 'device%s' % device_number})
+    if not devices:
+        devices.append({'device': None, 'zone': 1,
+                        'device_name': 'swiftloopback'})
+        config['CONFIG_SWIFT_LOOPBACK'] = 'y'
+    else:
+        config['CONFIG_SWIFT_LOOPBACK'] = 'n'
+    return devices
+
+
+def check_device(host, device):
+    """
+    Raises ScriptRuntimeError if given device is not mounted on given
+    host.
+    """
+    server = utils.ScriptRunner(host)
+
+    # the device MUST exist
+    cmd = 'ls -l %s'
+    server.append(cmd % device)
+
+    # if it is not mounted then we can use it
+    cmd = 'grep "%s " /proc/self/mounts || exit 0'
+    server.append(cmd % device)
+
+    # if it is mounted then the mount point has to be in /srv/node
+    cmd = 'grep "%s /srv/node" /proc/self/mounts && exit 0'
+    server.append(cmd % device)
+
+    # if we got here without exiting then we can't use this device
+    server.append('exit 1')
+    server.execute()
+
+
+def get_storage_size(config):
+    ranges = {'G': 1048576, 'M': 1024, 'K': 1}
+    size = config['CONFIG_SWIFT_STORAGE_SIZE'].strip()
+    for measure in ['G', 'M', 'K']:
+        if re.match('\d+' + measure, size, re.IGNORECASE):
+            intsize = int(size.rstrip(measure)) * ranges[measure]
+            return intsize
+
+
+# -------------------------- step functions --------------------------
+
+
+def create_builder_manifest(config, messages):
+    global devices
+    devices = parse_devices(config)
+    # The ring file should be built and distributed before the storage services
+    # come up. Specifically the replicator crashes if the ring isn't present
+
+    def device_def(dev_type, host, dev_port, devicename, zone):
+        # device host has to be IP address
+        host = utils.force_ip(host)
+        fmt = ('\n@@%s { "%s:%s/%s":\n'
+               '  zone   => %s,\n'
+               '  weight => 10, }\n')
+        return fmt % (dev_type, host, dev_port, devicename, zone)
+
+    # Add each device to the ring
+    devicename = 0
+    for configkey, dev_type, dev_port in (
+        [('SWIFT_RING_OBJECT_DEVICES', 'ring_object_device', 6000),
+         ('SWIFT_RING_CONTAINER_DEVICES', 'ring_container_device', 6001),
+         ('SWIFT_RING_ACCOUNT_DEVICES', 'ring_account_device', 6002)]):
+        swift_dev_details = dict()
+        host = utils.force_ip(config['CONFIG_STORAGE_HOST_URL'])
+        fstype = config["CONFIG_SWIFT_STORAGE_FSTYPE"]
+        for device in devices:
+            devicename = device['device_name']
+            key = "dev_%s_%s" % (host, devicename)
+            swift_dev_details.setdefault(key, {})
+            zone = device['zone']
+            swift_dev_details[key]['name'] = "%s:%s/%s" % (host, dev_port,
+                                                           devicename)
+            swift_dev_details[key]['weight'] = "%s" % 10
+            swift_dev_details[key]['zone'] = "%s" % zone
+        config[configkey] = swift_dev_details
+
+
+def create_proxy_manifest(config, messages):
+    fw_details = dict()
+    key = "swift_proxy"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "swift proxy"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8080']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_SWIFT_PROXY_RULES'] = fw_details
+
+
+def create_storage_manifest(config, messages):
+    global devices
+
+    devicename = 0
+    swift_dev_details = dict()
+    host = utils.force_ip(config['CONFIG_STORAGE_HOST_URL'])
+    fstype = config["CONFIG_SWIFT_STORAGE_FSTYPE"]
+
+    # this need to happen once per storage device
+    for device in devices:
+        if device['device'] is None:
+            config['CONFIG_SWIFT_STORAGE_SEEK'] = get_storage_size(config)
+        else:
+            devicename = device['device_name']
+            devicedev = device['device']
+            key = "dev_%s_%s" % (host, devicename)
+            swift_dev_details.setdefault(key, {})
+            swift_dev_details[key]['device'] = "%s" % devicename
+            swift_dev_details[key]['dev'] = "%s" % devicedev
+            swift_dev_details[key]['fstype'] = "%s" % fstype
+    config['CONFIG_SWIFT_STORAGE_DEVICES'] = swift_dev_details
+
+    # set allowed hosts for firewall
+    hosts = set([config['CONFIG_STORAGE_HOST']])
+    if config['CONFIG_NOVA_INSTALL'] == 'y':
+        hosts |= split_hosts(config['CONFIG_COMPUTE_HOSTS'])
+
+    fw_details = dict()
+    for host in hosts:
+        key = "swift_storage_and_rsync_%s" % host
+        fw_details.setdefault(key, {})
+        fw_details[key]['host'] = "%s" % host
+        fw_details[key]['service_name'] = "swift storage and rsync"
+        fw_details[key]['chain'] = "INPUT"
+        fw_details[key]['ports'] = ['6000', '6001', '6002', '873']
+        fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_SWIFT_STORAGE_RULES'] = fw_details

+ 158 - 0
packstack/plugins/trove_850.py

@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Installs and configures Trove
+"""
+
+from packstack.installer import basedefs
+from packstack.installer import utils
+from packstack.installer import validators
+from packstack.installer import processors
+
+from packstack.modules.documentation import update_params_usage
+from packstack.modules.ospluginutils import generate_ssl_cert
+
+# ------------------ Trove Packstack Plugin initialization ------------------
+
+PLUGIN_NAME = "OS-Trove"
+PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
+
+
+# NOVA_USER, NOVA_TENANT, NOVA_PW
+
+def process_trove_nova_pw(param, param_name, config=None):
+    if (param == 'PW_PLACEHOLDER' and
+            config['CONFIG_TROVE_NOVA_USER'] == 'trove'):
+        return config['CONFIG_TROVE_KS_PW']
+    else:
+        return param
+
+
+def initConfig(controller):
+    parameters = [
+        {"CONF_NAME": "CONFIG_TROVE_DB_PW",
+         "CMD_OPTION": "trove-db-passwd",
+         "PROMPT": "Enter the password to use for Trove to access the DB",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_TROVE_KS_PW",
+         "CMD_OPTION": "trove-ks-passwd",
+         "PROMPT": "Enter the password for Trove Keystone access",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",
+         "PROCESSORS": [processors.process_password],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_TROVE_NOVA_USER",
+         "CMD_OPTION": "trove-nova-user",
+         "PROMPT": "Enter the user for Trove to use to connect to Nova",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "trove",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": True,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_TROVE_NOVA_TENANT",
+         "CMD_OPTION": "trove-nova-tenant",
+         "PROMPT": "Enter the tenant for Trove to use to connect to Nova",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "services",
+         "MASK_INPUT": False,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": True,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+
+        {"CONF_NAME": "CONFIG_TROVE_NOVA_PW",
+         "CMD_OPTION": "trove-nova-passwd",
+         "PROMPT": "Enter the password for Trove to use to connect to Nova",
+         "OPTION_LIST": [],
+         "VALIDATORS": [validators.validate_not_empty],
+         "DEFAULT_VALUE": "PW_PLACEHOLDER",  # default is trove pass
+         "PROCESSORS": [process_trove_nova_pw],
+         "MASK_INPUT": True,
+         "LOOSE_VALIDATION": False,
+         "USE_DEFAULT": False,
+         "NEED_CONFIRM": True,
+         "CONDITION": False},
+    ]
+    update_params_usage(basedefs.PACKSTACK_DOC, parameters, sectioned=False)
+    group = {"GROUP_NAME": "Trove",
+             "DESCRIPTION": "Trove config parameters",
+             "PRE_CONDITION": "CONFIG_TROVE_INSTALL",
+             "PRE_CONDITION_MATCH": "y",
+             "POST_CONDITION": False,
+             "POST_CONDITION_MATCH": True}
+
+    controller.addGroup(group, parameters)
+
+
+def initSequences(controller):
+    config = controller.CONF
+    if config['CONFIG_TROVE_INSTALL'] != 'y':
+        return
+
+    steps = [
+        {'title': 'Preparing Trove entries',
+         'functions': [create_manifest]}
+    ]
+
+    controller.addSequence("Installing Trove", [], [], steps)
+
+
+# ------------------------ step functions --------------------------
+def create_manifest(config, messages):
+    if config['CONFIG_AMQP_ENABLE_SSL'] == 'y':
+        ssl_cert_file = config['CONFIG_TROVE_SSL_CERT'] = (
+            '/etc/pki/tls/certs/ssl_amqp_trove.crt'
+        )
+        ssl_key_file = config['CONFIG_TROVE_SSL_KEY'] = (
+            '/etc/pki/tls/private/ssl_amqp_trove.key'
+        )
+        ssl_host = config['CONFIG_CONTROLLER_HOST']
+        service = 'trove'
+        generate_ssl_cert(config, ssl_host, service, ssl_key_file,
+                          ssl_cert_file)
+
+    if (config['CONFIG_TROVE_NOVA_USER'] == 'trove' and
+            config['CONFIG_TROVE_NOVA_PW'] == ''):
+        config['CONFIG_TROVE_NOVA_PW'] = config['CONFIG_TROVE_KS_PW']
+
+    fw_details = dict()
+    key = "trove"
+    fw_details.setdefault(key, {})
+    fw_details[key]['host'] = "ALL"
+    fw_details[key]['service_name'] = "trove api"
+    fw_details[key]['chain'] = "INPUT"
+    fw_details[key]['ports'] = ['8779']
+    fw_details[key]['proto'] = "tcp"
+    config['FIREWALL_TROVE_API_RULES'] = fw_details

+ 14 - 0
packstack/puppet/modules/packstack/Gemfile

@@ -0,0 +1,14 @@
+source 'https://rubygems.org'
+
+group :development, :test do
+  gem 'puppetlabs_spec_helper', :require => false
+  gem 'puppet-lint', '~> 0.3.2'
+  gem 'rake', '10.1.1'
+  gem 'rspec', '< 2.99'
+end
+
+if puppetversion = ENV['PUPPET_GEM_VERSION']
+  gem 'puppet', puppetversion, :require => false
+else
+  gem 'puppet', :require => false
+end

+ 6 - 0
packstack/puppet/modules/packstack/Rakefile

@@ -0,0 +1,6 @@
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+
+PuppetLint.configuration.fail_on_warnings = true
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.send('disable_class_parameter_defaults')

+ 16 - 0
packstack/puppet/modules/packstack/lib/facter/default_hypervisor.rb

@@ -0,0 +1,16 @@
+
+# Custom fact to keep backwards compatibility to default to qemu when the
+# is_virtual fact is true and otherwise default to kvm
+# This fact is then used as a default value for the
+# CONFIG_NOVA_LIBVIRT_VIRT_TYPE packstack parameter.
+
+Facter.add(:default_hypervisor) do
+  setcode do
+    if Facter.value(:is_virtual) == true
+      output = 'qemu'
+    else
+      output = 'kvm'
+    end
+    output
+  end
+end

+ 8 - 0
packstack/puppet/modules/packstack/lib/facter/home_dir.rb

@@ -0,0 +1,8 @@
+
+# Current users home directory
+
+Facter.add("home_dir") do
+  setcode do
+    Facter::Util::Resolution.exec('/bin/echo $HOME')
+  end
+end

+ 14 - 0
packstack/puppet/modules/packstack/lib/facter/mariadb.rb

@@ -0,0 +1,14 @@
+
+# Check if mariadb provides galera server
+
+Facter.add(:mariadb_provides_galera) do
+  setcode do
+    if Facter.value(:operatingsystem) == 'Fedora' and Facter.value(:operatingsystemmajrelease).to_i > 22
+      command = 'dnf repoquery --whatprovides mariadb-galera-server'
+    else
+      command = 'repoquery --whatprovides mariadb-galera-server'
+    end
+    output = Facter::Util::Resolution.exec(command)
+    (output =~ /mariadb-server-galera.*/) != nil
+  end
+end

+ 218 - 0
packstack/puppet/modules/packstack/lib/facter/netns.py

@@ -0,0 +1,218 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import inspect
+import os
+import random
+import subprocess
+import sys
+import tempfile
+import uuid
+import unittest
+
+
+def execute(cmd_string, check_error=True, return_code=0, input=None,
+            block=True, error_msg='Error executing cmd'):
+    print(cmd_string)
+    cmd = cmd_string.split(' ')
+    proc = subprocess.Popen(cmd,
+                            stdin=subprocess.PIPE,
+                            stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    if input:
+        proc.communicate(input=input)
+    elif block:
+        proc.wait()
+    if (check_error and
+            proc.returncode is not None and
+            proc.returncode != return_code):
+        msg = """
+%(error_msg)s
+ Command: %(cmd)s
+ Exit Code: %(code)s
+""".strip() % dict(cmd=' '.join(cmd),
+                   code=proc.returncode,
+                   error_msg=error_msg)
+        if input:
+            msg += "\n Stdin: %s" % input
+        if not proc.stdout.closed:
+            msg += "\n Stdout: %s" % proc.stdout.read()
+        if not proc.stderr.closed:
+            msg += "\n Stderr: %s" % proc.stderr.read()
+        raise Exception(msg)
+    return proc
+
+
+def e(cmd, prefix='ip netns exec ', sudo=False, **kwargs):
+    frame_locals = inspect.getargvalues(sys._getframe(1))[3]
+    if sudo:
+        prefix = 'sudo ' + prefix
+    return execute(prefix + cmd % frame_locals, **kwargs)
+
+
+def rand_name(name='test'):
+    return '%s-%s' % (name, str(random.randint(1, 0x7fffffff)))
+
+
+@contextlib.contextmanager
+def add_namespace():
+    name = rand_name('testns')
+    try:
+        e('ip netns add %(name)s', prefix='')
+        e('%(name)s ip link set lo up')
+        yield name
+    finally:
+        e('ip netns delete %(name)s', prefix='')
+
+
+@contextlib.contextmanager
+def add_namespaces():
+    with add_namespace() as ns1:
+        with add_namespace() as ns2:
+            yield ns1, ns2
+
+
+def add_veth_pair(ns1, ns2, veth1, veth2, address1, address2):
+    e('ip link add %(veth1)s netns %(ns1)s type veth '
+      'peer name %(veth2)s netns %(ns2)s', prefix='')
+    e('%(ns1)s ip link show %(veth1)s')
+    e('%(ns2)s ip link show %(veth2)s')
+    e('%(ns1)s ip -4 addr add %(address1)s/24 brd 255.255.255.0 '
+      'scope global dev %(veth1)s')
+    e('%(ns2)s ip -4 addr add %(address2)s/24 brd 255.255.255.0 '
+      'scope global dev %(veth2)s')
+    e('%(ns1)s ip link set %(veth1)s up')
+    e('%(ns2)s ip link set %(veth2)s up')
+
+
+class TestNetns(unittest.TestCase):
+
+    def test_neutron_netns_cmds(self):
+        """Exercise the netns functionality required by neutron.
+
+          - Check that a veth pair can be configured to transit traffic
+            between 2 namespaces
+          - Check that iptables filtering can be configured
+          - Check that iptables routing can be configured
+
+        """
+        # Naming scheme [resource][id]_[namespace id]
+        veth1_1 = 'veth1_1'
+        veth1_2 = 'veth1_2'
+        address1_1 = '192.168.0.1'
+        address1_2 = '192.168.0.2'
+        with add_namespaces() as (ns1, ns2):
+            # Check that inter-namespace connectivity can be established
+            add_veth_pair(ns1, ns2, veth1_1, veth1_2, address1_1, address1_2)
+            e('%(ns1)s ip link list')
+            e('%(ns1)s ip link show %(veth1_1)s')
+            e('%(ns1)s arping -A -U -I %(veth1_1)s '
+              '-c 1 %(address1_1)s')
+            e('%(ns2)s route add default gw %(address1_1)s')
+            e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
+            e('ping -c 1 -w 1 %(address1_1)s', prefix='', return_code=1,
+              error_msg='Namespace isolation not supported!')
+
+            # Check that iptables filtering and save/restore can be performed
+            try:
+                iptables_filename = os.path.join(
+                    tempfile.gettempdir(),
+                    'iptables-%s' % str(uuid.uuid4()))
+                e('%%(ns1)s iptables-save > %s' % iptables_filename)
+                e('%(ns1)s iptables -A INPUT -p icmp --icmp-type 8 -j DROP')
+                e('%(ns2)s ping -c 1 -w 1 %(address1_1)s', return_code=1)
+                e('%%(ns1)s iptables-restore < %s' % iptables_filename)
+                e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
+            finally:
+                if os.path.exists(iptables_filename):
+                    os.unlink(iptables_filename)
+
+            # Create another namespace (ns3) that is connected to ns1
+            # via a different subnet, so that traffic between ns3 and
+            # ns2 will have to be routed by ns1:
+            #
+            #  ns2 <- 192.168.0.0/24 -> ns1 <- 192.168.1.0/24 -> ns3
+            #
+            with add_namespace() as ns3:
+                veth2_1 = 'veth2_1'
+                veth2_3 = 'veth2_3'
+                address2_1 = '192.168.1.1'
+                address2_3 = '192.168.1.2'
+                add_veth_pair(ns1, ns3, veth2_1, veth2_3,
+                              address2_1, address2_3)
+                e('%(ns1)s sysctl -w net.ipv4.ip_forward=1')
+                e('%(ns1)s iptables -t nat -A POSTROUTING -o %(veth2_1)s -j '
+                  'MASQUERADE')
+                e('%(ns1)s iptables -A FORWARD -i %(veth2_1)s -o %(veth1_1)s '
+                  '-m state --state RELATED,ESTABLISHED -j ACCEPT')
+                e('%(ns1)s iptables -A FORWARD -i %(veth1_1)s -o %(veth2_1)s '
+                  '-j ACCEPT')
+                e('%(ns2)s ping -c 1 -w 1 %(address2_3)s')
+
+            # Check that links can be torn down
+            e('%(ns1)s ip -4 addr del %(address1_1)s/24 '
+              'dev %(veth1_1)s')
+            e('%(ns1)s ip link delete %(veth1_1)s')
+
+    def test_domain_socket_access(self):
+        """Check that a domain socket can be accessed regardless of namespace.
+
+        Neutron extends nova' metadata service - which identifies VM's
+        by their ip addresses - to configurations with overlapping
+        ips.  Support is provided by:
+
+          - a proxy in each namespace (neutron-ns-metadata-proxy)
+
+            - the proxy can uniquely identify a given VM by its ip
+              address in the context of the router or network of the
+              namespace.
+
+          - a metadata agent (neutron-metadata-agent) that forwards
+            requests from the namespace proxies to nova's metadata
+            service.
+
+        Communication between the proxies and the agent is over a unix
+        domain socket.  It is necessary that access to a domain socket
+        not be restricted by namespace, or such communication will not
+        be possible.
+
+        """
+        try:
+            execute('which nc')
+        except Exception:
+            self.fail("The 'nc' command is not available - please install it.")
+
+        sock_filename = os.path.join(tempfile.gettempdir(),
+                                     'testsock-%s' % str(uuid.uuid4()))
+        server = None
+        try:
+            # Create a server in the root namespace attached to a domain socket
+            server = e('nc -lU %(sock_filename)s', sudo=False, prefix='',
+                       block=False)
+            # Attempt to connect to the domain socket from within a namespace
+            with add_namespace() as ns:
+                e('%(ns)s nc -U %(sock_filename)s', input='magic',
+                  error_msg='Unable to communicate between namespaces via '
+                            'domain sockets.')
+        finally:
+            if server:
+                server.kill()
+            if os.path.exists(sock_filename):
+                os.unlink(sock_filename)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 16 - 0
packstack/puppet/modules/packstack/lib/facter/netns_support.rb

@@ -0,0 +1,16 @@
+Facter.add(:netns_support) do
+  setcode do
+    oldout = $stdout.clone
+    olderr = $stderr.clone
+    $stdout.reopen("/dev/null", "w")
+    $stderr.reopen("/dev/null", "w")
+
+    script_path = File.join(File.dirname(__FILE__), 'netns.py')
+    passed = system "python #{script_path}"
+
+    $stdout.reopen(oldout)
+    $stderr.reopen(olderr)
+
+    passed
+  end
+end

+ 4 - 0
packstack/puppet/modules/packstack/lib/facter/network.rb

@@ -0,0 +1,4 @@
+require 'facter'
+Facter.add(:gateway_device) do
+  setcode "awk '$2==00000000 && $8==00000000 {print $1}' /proc/net/route|sort -r -n -k 7|head -n 1"
+end

+ 30 - 0
packstack/puppet/modules/packstack/lib/puppet/parser/functions/choose_my_ip.rb

@@ -0,0 +1,30 @@
+
+# Function returns host's IP selected from list of IPs
+module Puppet::Parser::Functions
+  newfunction(:choose_my_ip, :type => :rvalue) do |args|
+
+    if args.size < 1
+      raise(
+        Puppet::ParseError,
+        "choose_my_ip(): Wrong number of arguments given (#{args.size} for 1)"
+      )
+    end
+
+    host_list = args[0]
+    if not host_list.kind_of?(Array)
+      host_list = [host_list]
+    end
+    my_ips = lookupvar('interfaces').split(',').map do |interface|
+        interface.strip!
+        lookupvar("ipaddress_#{interface}")
+    end
+
+    result = nil
+    host_list.each do |ip|
+      if my_ips.include? ip
+        result = ip
+      end
+    end
+    result
+  end
+end

+ 55 - 0
packstack/puppet/modules/packstack/lib/puppet/parser/functions/force_interface.rb

@@ -0,0 +1,55 @@
+
+require 'ipaddr'
+
+# Returns value
+module Puppet::Parser::Functions
+  newfunction(:force_interface, :type => :rvalue) do |args|
+
+    if args.size < 2
+      raise(
+        Puppet::ParseError,
+        "force_interface(): Wrong number of arguments given (#{args.size} for 2)"
+      )
+    end
+
+    value = args[0]
+    allow = args[1]
+
+    was_array = value.kind_of?(Array)
+    if not was_array
+      value = [value]
+    end
+
+    result = []
+    if allow
+      value.each do |val|
+        translated = []
+        val.split(':').each do |fragment|
+          if fragment.include?('/') # this is CIDR, so translate it
+            cidr = IPAddr.new fragment
+            lookupvar('interfaces').split(',').each do |interface|
+              interface.strip!
+              ifaddr = lookupvar("ipaddress_#{interface}")
+              if ifaddr == nil
+                next
+              end
+              ifcidr = IPAddr.new ifaddr
+              if cidr.include?(ifcidr)
+                translated.push(interface)
+              end
+            end
+          else
+            translated.push(fragment)
+          end
+        end
+        result.push(translated.join(':'))
+      end
+    else
+      result = value
+    end
+    if not was_array
+      result = result[0]
+    end
+    result
+  end
+end

+ 20 - 0
packstack/puppet/modules/packstack/lib/puppet/parser/functions/force_ip.rb

@@ -0,0 +1,20 @@
+
+require 'resolv'
+require 'ipaddr'
+
+
+module Puppet::Parser::Functions
+  newfunction(:force_ip, :type => :rvalue) do |args|
+    if args.size < 1
+      raise(
+        Puppet::ParseError,
+        "force_ip(): Wrong number of arguments given (#{args.size} for 1)"
+      )
+    end
+    if (!!IPAddr.new(args[0]) rescue false)
+      args[0]
+    else
+      Resolv.getaddress args[0]
+    end
+  end
+end

+ 21 - 0
packstack/puppet/modules/packstack/lib/puppet/parser/functions/hiera_undef.rb

@@ -0,0 +1,21 @@
+# Copyright (c) 2013 puppet@camptocamp.com All rights reserved.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+module Puppet::Parser::Functions
+  newfunction(:hiera_undef, :type => :rvalue) do |*args|
+    require 'hiera_puppet'
+    key, override = HieraPuppet.parse_args(args)
+    HieraPuppet.lookup(key, :undef, self, override, :priority)
+  end
+end

+ 89 - 0
packstack/puppet/modules/packstack/manifests/amqp.pp

@@ -0,0 +1,89 @@
+define enable_rabbitmq {
+  create_resources(packstack::firewall, hiera('FIREWALL_AMQP_RULES', {}))
+  $amqp_enable_ssl = hiera('CONFIG_AMQP_SSL_ENABLED')
+
+  if $amqp_enable_ssl {
+    $kombu_ssl_ca_certs = hiera('CONFIG_AMQP_SSL_CACERT_FILE', undef)
+    $kombu_ssl_keyfile = '/etc/pki/tls/private/ssl_amqp.key'
+    $kombu_ssl_certfile = '/etc/pki/tls/certs/ssl_amqp.crt'
+
+    $files_to_set_owner = [ $kombu_ssl_keyfile, $kombu_ssl_certfile ]
+    file { $files_to_set_owner:
+      owner   => 'rabbitmq',
+      group   => 'rabbitmq',
+      require => Package['rabbitmq-server'],
+      notify  => Service['rabbitmq-server'],
+    }
+
+    class { '::rabbitmq':
+      port                     => undef,
+      ssl_port                 => hiera('CONFIG_AMQP_CLIENTS_PORT'),
+      ssl_only                 => true,
+      ssl                      => true,
+      ssl_cacert               => $kombu_ssl_ca_certs,
+      ssl_cert                 => $kombu_ssl_certfile,
+      ssl_key                  => $kombu_ssl_keyfile,
+      default_user             => hiera('CONFIG_AMQP_AUTH_USER'),
+      default_pass             => hiera('CONFIG_AMQP_AUTH_PASSWORD'),
+      package_provider         => 'yum',
+      repos_ensure             => false,
+      admin_enable             => false,
+      # FIXME: it's ugly to not to require client certs
+      ssl_fail_if_no_peer_cert => true,
+      config_variables         => {
+     'tcp_listen_options' => '[binary,{packet, raw},{reuseaddr, true},{backlog, 128},{nodelay, true},{exit_on_close, false},{keepalive, true}]',
+     'loopback_users'     => '[]',
+      },
+    }
+  } else {
+    class { '::rabbitmq':
+      port             => hiera('CONFIG_AMQP_CLIENTS_PORT'),
+      ssl              => false,
+      default_user     => hiera('CONFIG_AMQP_AUTH_USER'),
+      default_pass     => hiera('CONFIG_AMQP_AUTH_PASSWORD'),
+      package_provider => 'yum',
+      repos_ensure     => false,
+      admin_enable     => false,
+      config_variables => {
+     'tcp_listen_options' => '[binary,{packet, raw},{reuseaddr, true},{backlog, 128},{nodelay, true},{exit_on_close, false},{keepalive, true}]',
+     'loopback_users'     => '[]',
+      },
+    }
+  }
+
+  # TO-DO: remove this workaround as soon as this is fixed in puppetlabs-rabbitmq module
+  #        https://github.com/puppetlabs/puppetlabs-rabbitmq/pull/454
+  File <| path == '/etc/rabbitmq/rabbitmq.config' |> {
+    ensure  => present,
+    owner   => 'rabbitmq',
+    group   => 'rabbitmq',
+    mode    => '0640',
+  }
+}
+
+class packstack::amqp ()
+{
+     $amqp = hiera('CONFIG_AMQP_BACKEND')
+
+     case $amqp  {
+       'rabbitmq': {
+         enable_rabbitmq { 'rabbitmq': }
+
+          # The following kernel parameters help alleviate some RabbitMQ
+          # connection issues
+
+          sysctl::value { 'net.ipv4.tcp_keepalive_intvl':
+            value => '1',
+          }
+
+          sysctl::value { 'net.ipv4.tcp_keepalive_probes':
+            value => '5',
+          }
+
+          sysctl::value { 'net.ipv4.tcp_keepalive_time':
+            value => '5',
+          }
+       }
+       default: {}
+     }
+}

+ 40 - 0
packstack/puppet/modules/packstack/manifests/aodh.pp

@@ -0,0 +1,40 @@
+class packstack::aodh ()
+{
+    create_resources(packstack::firewall, hiera('FIREWALL_AODH_RULES', {}))
+
+    $config_aodh_coordination_backend = hiera('CONFIG_CEILOMETER_COORDINATION_BACKEND')
+
+    if $config_aodh_coordination_backend == 'redis' {
+      $redis_host = hiera('CONFIG_REDIS_HOST_URL')
+      $redis_port = hiera('CONFIG_REDIS_PORT')
+      $coordination_url = "redis://${redis_host}:${redis_port}"
+    } else {
+      $coordination_url = ''
+    }
+
+    class { '::aodh::keystone::authtoken':
+      password => hiera('CONFIG_AODH_KS_PW'),
+      auth_url => hiera('CONFIG_KEYSTONE_ADMIN_URL'),
+    }
+
+    class { '::aodh::api':
+      enabled      => true,
+      service_name => 'httpd',
+    }
+
+    class { '::aodh::wsgi::apache':
+      workers => hiera('CONFIG_SERVICE_WORKERS'),
+      ssl     => false
+    }
+
+    class { '::aodh::auth':
+      auth_password => hiera('CONFIG_AODH_KS_PW'),
+      auth_url => hiera('CONFIG_KEYSTONE_PUBLIC_URL_VERSIONLESS'),
+    }
+    class { '::aodh::evaluator':
+      coordination_url => $coordination_url,
+    }
+    class { '::aodh::notifier': }
+    class { '::aodh::listener': }
+    class { '::aodh::client': }
+}

+ 37 - 0
packstack/puppet/modules/packstack/manifests/aodh/rabbitmq.pp

@@ -0,0 +1,37 @@
+class packstack::aodh::rabbitmq ()
+{
+    $kombu_ssl_ca_certs = hiera('CONFIG_AMQP_SSL_CACERT_FILE', undef)
+    $kombu_ssl_keyfile = hiera('CONFIG_AODH_SSL_KEY', undef)
+    $kombu_ssl_certfile = hiera('CONFIG_AODH_SSL_CERT', undef)
+
+    $aodh_db_pw = hiera('CONFIG_AODH_DB_PW')
+    $aodh_mariadb_host = hiera('CONFIG_MARIADB_HOST_URL')
+
+    $rabbit_host = hiera('CONFIG_AMQP_HOST_URL')
+    $rabbit_port = hiera('CONFIG_AMQP_CLIENTS_PORT')
+    $rabbit_userid = hiera('CONFIG_AMQP_AUTH_USER')
+    $rabbit_password = hiera('CONFIG_AMQP_AUTH_PASSWORD')
+
+
+    if $kombu_ssl_keyfile {
+      $files_to_set_owner = [ $kombu_ssl_keyfile, $kombu_ssl_certfile ]
+      file { $files_to_set_owner:
+        owner   => 'aodh',
+        group   => 'aodh',
+        require => Package['openstack-aodh-common'],
+      }
+      File[$files_to_set_owner] ~> Service<| tag == 'aodh-service' |>
+    }
+
+    $config_mongodb_host = hiera('CONFIG_MONGODB_HOST_URL')
+
+    class { '::aodh':
+      debug              => hiera('CONFIG_DEBUG_MODE'),
+      rabbit_use_ssl     => hiera('CONFIG_AMQP_SSL_ENABLED'),
+      default_transport_url => "rabbit://${rabbit_userid}:${rabbit_password}@${rabbit_host}:${rabbit_port}/",
+      kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
+      kombu_ssl_keyfile  => $kombu_ssl_keyfile,
+      kombu_ssl_certfile => $kombu_ssl_certfile,
+      database_connection => "mysql+pymysql://aodh:${aodh_db_pw}@${aodh_mariadb_host}/aodh",
+    }
+}

+ 40 - 0
packstack/puppet/modules/packstack/manifests/apache.pp

@@ -0,0 +1,40 @@
+class packstack::apache ()
+{
+    class {'::apache':
+      purge_configs => false,
+    }
+
+    if hiera('CONFIG_HORIZON_SSL')  == 'y' {
+      ensure_packages(['mod_ssl'], {'ensure' => 'present'})
+      Package['mod_ssl'] -> Class['::apache']
+      apache::listen { '443': }
+    }
+
+    # Keystone port
+    apache::listen { '5000': }
+    # Keystone admin port
+    apache::listen { '35357': }
+
+    if hiera('CONFIG_CEILOMETER_INSTALL') == 'y' {
+      if hiera('CONFIG_CEILOMETER_SERVICE_NAME') == 'httpd' {
+        # Ceilometer port
+        apache::listen { '8777': }
+      }
+    }
+
+    if hiera('CONFIG_AODH_INSTALL') == 'y' {
+      # Aodh port
+      apache::listen { '8042': }
+    }
+
+    if hiera('CONFIG_GNOCCHI_INSTALL') == 'y' {
+      # Gnocchi port
+      apache::listen { '8041': }
+    }
+
+    if hiera('CONFIG_PANKO_INSTALL') == 'y' {
+      # Panko port
+      apache::listen { '8779': }
+    }
+}
+

+ 90 - 0
packstack/puppet/modules/packstack/manifests/ceilometer.pp

@@ -0,0 +1,90 @@
+class packstack::ceilometer ()
+{
+    create_resources(packstack::firewall, hiera('FIREWALL_CEILOMETER_RULES', {}))
+
+    $config_mongodb_host = hiera('CONFIG_MONGODB_HOST_URL')
+
+    $config_ceilometer_coordination_backend = hiera('CONFIG_CEILOMETER_COORDINATION_BACKEND')
+
+    $config_ceilometer_metering_backend = hiera('CONFIG_CEILOMETER_METERING_BACKEND')
+
+    $config_ceilometer_events_backend = hiera('CONFIG_CEILOMETER_EVENTS_BACKEND')
+
+    $config_gnocchi_host = hiera('CONFIG_KEYSTONE_HOST_URL')
+
+    if $config_ceilometer_coordination_backend == 'redis' {
+      $redis_host = hiera('CONFIG_REDIS_HOST_URL')
+      $redis_port = hiera('CONFIG_REDIS_PORT')
+      $coordination_url = "redis://${redis_host}:${redis_port}"
+
+      ensure_resource('package', 'python-redis', {
+        name   => 'python-redis',
+        tag    => 'openstack',
+      })
+    } else {
+      $coordination_url = ''
+    }
+
+    if hiera('CONFIG_CEILOMETER_SERVICE_NAME') == 'ceilometer' {
+          $ceilometer_service_name = 'openstack-ceilometer-api'
+    } else {
+          $ceilometer_service_name = 'httpd'
+    }
+
+
+    class { '::ceilometer::db':
+      database_connection => "mongodb://${config_mongodb_host}:27017/ceilometer",
+    }
+
+    class { '::ceilometer::collector':
+      meter_dispatcher => $config_ceilometer_metering_backend,
+      event_dispatcher => $config_ceilometer_events_backend,
+    }
+
+    if $config_ceilometer_metering_backend == 'gnocchi' {
+
+      include ::gnocchi::client
+      class { '::ceilometer::dispatcher::gnocchi':
+        filter_service_activity   => false,
+        url                       => "http://${config_gnocchi_host}:8041",
+        archive_policy            => 'high',
+        resources_definition_file => 'gnocchi_resources.yaml',
+      }
+    }
+
+    class { '::ceilometer::agent::notification': }
+
+    class { '::ceilometer::agent::auth':
+      auth_url      => hiera('CONFIG_KEYSTONE_PUBLIC_URL_VERSIONLESS'),
+      auth_password => hiera('CONFIG_CEILOMETER_KS_PW'),
+      auth_region   => hiera('CONFIG_KEYSTONE_REGION'),
+    }
+
+    class { '::ceilometer::agent::central':
+      coordination_url => $coordination_url,
+    }
+
+    $bind_host = hiera('CONFIG_IP_VERSION') ? {
+      'ipv6'  => '::0',
+      default => '0.0.0.0',
+      # TO-DO(mmagr): Add IPv6 support when hostnames are used
+    }
+
+    class { '::ceilometer::keystone::authtoken':
+      auth_uri => hiera('CONFIG_KEYSTONE_PUBLIC_URL'),
+      auth_url => hiera('CONFIG_KEYSTONE_ADMIN_URL'),
+      password => hiera('CONFIG_CEILOMETER_KS_PW'),
+    }
+
+    class { '::ceilometer::api':
+      host         => $bind_host,
+      api_workers  => hiera('CONFIG_SERVICE_WORKERS'),
+      service_name => $ceilometer_service_name,
+    }
+
+    if $ceilometer_service_name == 'httpd' {
+       class { '::ceilometer::wsgi::apache':
+         ssl => false,
+       }
+    }
+}

+ 8 - 0
packstack/puppet/modules/packstack/manifests/ceilometer/nova_disabled.pp

@@ -0,0 +1,8 @@
+class packstack::ceilometer::nova_disabled ()
+{
+    group { 'nova':
+      ensure => present,
+    }
+
+    Group['nova'] -> Class['ceilometer']
+}

+ 31 - 0
packstack/puppet/modules/packstack/manifests/ceilometer/rabbitmq.pp

@@ -0,0 +1,31 @@
+class packstack::ceilometer::rabbitmq ()
+{
+    $kombu_ssl_ca_certs = hiera('CONFIG_AMQP_SSL_CACERT_FILE', undef)
+    $kombu_ssl_keyfile = hiera('CONFIG_CEILOMETER_SSL_KEY', undef)
+    $kombu_ssl_certfile = hiera('CONFIG_CEILOMETER_SSL_CERT', undef)
+
+    $rabbit_host = hiera('CONFIG_AMQP_HOST_URL')
+    $rabbit_port = hiera('CONFIG_AMQP_CLIENTS_PORT')
+    $rabbit_userid = hiera('CONFIG_AMQP_AUTH_USER')
+    $rabbit_password = hiera('CONFIG_AMQP_AUTH_PASSWORD')
+
+    if $kombu_ssl_keyfile {
+      $files_to_set_owner = [ $kombu_ssl_keyfile, $kombu_ssl_certfile ]
+      file { $files_to_set_owner:
+        owner   => 'ceilometer',
+        group   => 'ceilometer',
+        require => Package['openstack-ceilometer-common'],
+      }
+      File[$files_to_set_owner] ~> Service<| tag == 'ceilometer-service' |>
+    }
+
+    class { '::ceilometer':
+      telemetry_secret   => hiera('CONFIG_CEILOMETER_SECRET'),
+      debug              => hiera('CONFIG_DEBUG_MODE'),
+      rabbit_use_ssl     => hiera('CONFIG_AMQP_SSL_ENABLED'),
+      default_transport_url => "rabbit://${rabbit_userid}:${rabbit_password}@${rabbit_host}:${rabbit_port}/",
+      kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
+      kombu_ssl_keyfile  => $kombu_ssl_keyfile,
+      kombu_ssl_certfile => $kombu_ssl_certfile,
+    }
+}

+ 101 - 0
packstack/puppet/modules/packstack/manifests/chrony.pp

@@ -0,0 +1,101 @@
+class packstack::chrony ()
+{
+    $cfg_ntp_server_def = hiera('CONFIG_NTP_SERVER_DEF')
+    $cfg_ntp_servers    = hiera('CONFIG_NTP_SERVERS')
+
+    $config_content = "
+    # Use public servers from the pool.ntp.org project.
+    # Please consider joining the pool (http://www.pool.ntp.org/join.html).
+    ${cfg_ntp_server_def}
+
+    # Ignore stratum in source selection.
+    stratumweight 0
+
+    # Record the rate at which the system clock gains/losses time.
+    driftfile /var/lib/chrony/drift
+
+    # Enable kernel RTC synchronization.
+    rtcsync
+
+    # In first three updates step the system clock instead of slew
+    # if the adjustment is larger than 10 seconds.
+    makestep 10 3
+
+    # Allow NTP client access from local network.
+    #allow 192.168/16
+
+    # Listen for commands only on localhost.
+    bindcmdaddress 127.0.0.1
+    bindcmdaddress ::1
+
+    # Serve time even if not synchronized to any NTP server.
+    #local stratum 10
+
+    keyfile /etc/chrony.keys
+
+    # Specify the key used as password for chronyc.
+    commandkey 1
+
+    # Generate command key if missing.
+    generatecommandkey
+
+    # Disable logging of client accesses.
+    noclientlog
+
+    # Send a message to syslog if a clock adjustment is larger than 0.5 seconds.
+    logchange 0.5
+
+    logdir /var/log/chrony
+    #log measurements statistics tracking
+    "
+
+    package { 'chrony':
+      ensure => 'installed',
+      name   => 'chrony',
+    }
+
+    package { 'ntpdate':
+      ensure => 'installed',
+      name   => 'ntpdate',
+    }
+
+    file { 'chrony_conf':
+      ensure  => file,
+      path    => '/etc/chrony.conf',
+      mode    => '0644',
+      content => $config_content,
+    }
+
+    exec { 'stop-chronyd':
+      path    => '/bin:/usr/bin:/sbin:/usr/sbin',
+      command => 'systemctl stop chronyd.service',
+      onlyif  => 'systemctl status chronyd.service'
+    }
+
+    # for cases where ntpd is running instead of default chronyd
+    service { 'ntpd':
+      ensure => stopped,
+      enable => false,
+    }
+
+    exec { 'ntpdate':
+      command => "/usr/sbin/ntpdate ${cfg_ntp_servers}",
+      tries   => 3,
+    }
+
+    service { 'chronyd':
+      ensure     => running,
+      enable     => true,
+      name       => 'chronyd',
+      hasstatus  => true,
+      hasrestart => true,
+    }
+
+    Package['chrony'] ->
+    Package['ntpdate'] ->
+    File['chrony_conf'] ->
+    Exec['stop-chronyd'] ->
+    Service['ntpd'] ->
+    Exec['ntpdate'] ->
+    Service['chronyd']
+}

+ 73 - 0
packstack/puppet/modules/packstack/manifests/cinder.pp

@@ -0,0 +1,73 @@
+class packstack::cinder ()
+{
+    create_resources(packstack::firewall, hiera('FIREWALL_CINDER_RULES', {}))
+    create_resources(packstack::firewall, hiera('FIREWALL_CINDER_API_RULES', {}))
+
+    $cinder_backends = hiera_array('CONFIG_CINDER_BACKEND')
+
+    case $cinder_backends[0] {
+      'lvm':       { $default_volume_type = 'iscsi' }
+      'gluster':   { $default_volume_type = 'glusterfs' }
+      'nfs':       { $default_volume_type = 'nfs' }
+      'vmdk':      { $default_volume_type = 'vmdk' }
+      'netapp':    { $default_volume_type = 'netapp' }
+      'solidfire': { $default_volume_type = 'solidfire' }
+      default:     { $default_volume_type = 'iscsi' }
+    }
+
+    cinder_config {
+      'DEFAULT/glance_host': value => hiera('CONFIG_STORAGE_HOST_URL');
+    }
+
+    $bind_host = hiera('CONFIG_IP_VERSION') ? {
+      'ipv6'  => '::0',
+      default => '0.0.0.0',
+      # TO-DO(mmagr): Add IPv6 support when hostnames are used
+    }
+
+    class { '::cinder::keystone::authtoken':
+      auth_uri => hiera('CONFIG_KEYSTONE_PUBLIC_URL_VERSIONLESS'),
+      auth_url => hiera('CONFIG_KEYSTONE_ADMIN_URL'),
+      password => hiera('CONFIG_CINDER_KS_PW'),
+    }
+
+    class { '::cinder::api':
+      bind_host               => $bind_host,
+      nova_catalog_info       => 'compute:nova:publicURL',
+      nova_catalog_admin_info => 'compute:nova:adminURL',
+      service_workers         => hiera('CONFIG_SERVICE_WORKERS'),
+      default_volume_type     => $default_volume_type,
+    }
+
+    class { '::cinder::scheduler': }
+
+    class { '::cinder::volume': }
+
+    class { '::cinder::client': }
+
+    $cinder_keystone_admin_username = hiera('CONFIG_KEYSTONE_ADMIN_USERNAME')
+    $cinder_keystone_admin_password = hiera('CONFIG_KEYSTONE_ADMIN_PW')
+    $cinder_keystone_auth_url = hiera('CONFIG_KEYSTONE_PUBLIC_URL')
+    $cinder_keystone_api = hiera('CONFIG_KEYSTONE_API_VERSION')
+
+    # Cinder::Type requires keystone credentials
+    Cinder::Type {
+      os_password    => hiera('CONFIG_CINDER_KS_PW'),
+      os_tenant_name => 'services',
+      os_username    => 'cinder',
+      os_auth_url    => hiera('CONFIG_KEYSTONE_PUBLIC_URL'),
+    }
+
+    class { '::cinder::backends':
+      enabled_backends => hiera_array('CONFIG_CINDER_BACKEND'),
+    }
+
+    $db_purge = hiera('CONFIG_CINDER_DB_PURGE_ENABLE')
+    if $db_purge {
+      class { '::cinder::cron::db_purge':
+        hour        => '*/24',
+        destination => '/dev/null',
+        age         => 1
+      }
+    }
+}

+ 16 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/gluster.pp

@@ -0,0 +1,16 @@
+class packstack::cinder::backend::gluster ()
+{
+    ensure_packages(['glusterfs-fuse'], {'ensure' => 'present'})
+
+    cinder::backend::glusterfs { 'gluster':
+      glusterfs_shares        => hiera_array('CONFIG_CINDER_GLUSTER_MOUNTS'),
+      require                 => Package['glusterfs-fuse'],
+      glusterfs_shares_config => '/etc/cinder/glusterfs_shares.conf',
+    }
+
+    cinder::type { 'glusterfs':
+      set_key   => 'volume_backend_name',
+      set_value => 'gluster',
+      require   => Class['cinder::api'],
+    }
+}

+ 96 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/lvm.pp

@@ -0,0 +1,96 @@
+class packstack::cinder::backend::lvm ()
+{
+    $create_cinder_volume = hiera('CONFIG_CINDER_VOLUMES_CREATE')
+
+    if $create_cinder_volume == 'y' {
+        # Find an available loop device
+        $loop_dev = chomp(generate('/usr/sbin/losetup', '-f'))
+
+        class { '::cinder::setup_test_volume':
+          size            => hiera('CONFIG_CINDER_VOLUMES_SIZE'),
+          loopback_device => $loop_dev,
+          volume_path     => '/var/lib/cinder',
+          volume_name     => 'cinder-volumes',
+        }
+
+        # Add loop device on boot
+        $el_releases = ['RedHat', 'CentOS', 'Scientific']
+        if $::operatingsystem in $el_releases and (versioncmp($::operatingsystemmajrelease, '7') < 0) {
+
+          file_line{ 'rc.local_losetup_cinder_volume':
+            path  => '/etc/rc.d/rc.local',
+            match => '^.*/var/lib/cinder/cinder-volumes.*$',
+            line  => 'losetup -f /var/lib/cinder/cinder-volumes && service openstack-cinder-volume restart',
+          }
+
+          file { '/etc/rc.d/rc.local':
+            mode  => '0755',
+          }
+
+        } else {
+
+          file { 'openstack-losetup':
+            path    => '/usr/lib/systemd/system/openstack-losetup.service',
+            before  => Service['openstack-losetup'],
+            notify  => Exec['reload systemd files for cinder-volume'],
+            content => '[Unit]
+    Description=Setup cinder-volume loop device
+    DefaultDependencies=false
+    Before=openstack-cinder-volume.service
+    After=local-fs.target
+
+    [Service]
+    Type=oneshot
+    ExecStart=/usr/bin/sh -c \'/usr/sbin/losetup -j /var/lib/cinder/cinder-volumes | /usr/bin/grep /var/lib/cinder/cinder-volumes || /usr/sbin/losetup -f /var/lib/cinder/cinder-volumes\'
+    ExecStop=/usr/bin/sh -c \'/usr/sbin/losetup -j /var/lib/cinder/cinder-volumes | /usr/bin/cut -d : -f 1 | /usr/bin/xargs /usr/sbin/losetup -d\'
+    TimeoutSec=60
+    RemainAfterExit=yes
+
+    [Install]
+    RequiredBy=openstack-cinder-volume.service',
+          }
+
+          exec { 'reload systemd files for cinder-volume':
+            command     => '/usr/bin/systemctl daemon-reload',
+            refreshonly => true,
+            before      => Service['openstack-losetup'],
+          }
+
+          service { 'openstack-losetup':
+            ensure  => running,
+            enable  => true,
+            require => Class['cinder::setup_test_volume'],
+          }
+
+        }
+    }
+    else {
+        ensure_packages(['lvm2'], {'ensure' => 'present'})
+    }
+
+
+    file_line { 'snapshot_autoextend_threshold':
+      path    => '/etc/lvm/lvm.conf',
+      match   => '^\s*snapshot_autoextend_threshold +=.*',
+      line    => '   snapshot_autoextend_threshold = 80',
+      require => Package['lvm2'],
+    }
+
+    file_line { 'snapshot_autoextend_percent':
+      path    => '/etc/lvm/lvm.conf',
+      match   => '^\s*snapshot_autoextend_percent +=.*',
+      line    => '   snapshot_autoextend_percent = 20',
+      require => Package['lvm2'],
+    }
+
+    cinder::backend::iscsi { 'lvm':
+      iscsi_ip_address => hiera('CONFIG_STORAGE_HOST_URL'),
+      require          => Package['lvm2'],
+    }
+
+    cinder::type { 'iscsi':
+      set_key   => 'volume_backend_name',
+      set_value => 'lvm',
+      require   => Class['cinder::api'],
+    }
+}

+ 133 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/netapp.pp

@@ -0,0 +1,133 @@
+# Copyright (c) – 2014, Ryan Hefner.  All rights reserved.
+class packstack::cinder::backend::netapp ()
+{
+    $netapp_storage_family = hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY')
+    $netapp_storage_protocol = hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL')
+    $netapp_backend_name = 'netapp'
+
+    if $netapp_storage_family == 'ontap_cluster' {
+      if $netapp_storage_protocol == 'nfs' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login              => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password           => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname    => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port        => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_storage_family     => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol   => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type     => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          netapp_vserver            => hiera('CONFIG_CINDER_NETAPP_VSERVER'),
+          expiry_thres_minutes      => hiera('CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES'),
+          thres_avl_size_perc_start => hiera('CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START'),
+          thres_avl_size_perc_stop  => hiera('CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP'),
+          nfs_shares                => hiera_array('CONFIG_CINDER_NETAPP_NFS_SHARES'),
+          nfs_shares_config         => hiera('CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG'),
+        }
+        ensure_packages(['nfs-utils'], {'ensure' => 'present'})
+      }
+      elsif $netapp_storage_protocol == 'iscsi' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login            => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password         => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname  => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port      => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_size_multiplier  => hiera('CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER'),
+          netapp_storage_family   => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type   => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          netapp_vserver          => hiera('CONFIG_CINDER_NETAPP_VSERVER'),
+        }
+
+        ensure_packages(['iscsi-initiator-utils'], {'ensure' => 'present'})
+      }
+
+      elsif $netapp_storage_protocol == 'fc' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login            => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password         => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname  => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port      => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_size_multiplier  => hiera('CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER'),
+          netapp_storage_family   => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type   => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          netapp_vserver          => hiera('CONFIG_CINDER_NETAPP_VSERVER'),
+        }
+      }
+    }
+    elsif $netapp_storage_family == 'ontap_7mode' {
+      if $netapp_storage_protocol == 'nfs' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login              => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password           => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname    => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port        => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_storage_family     => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol   => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type     => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          expiry_thres_minutes      => hiera('CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES'),
+          thres_avl_size_perc_start => hiera('CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START'),
+          thres_avl_size_perc_stop  => hiera('CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP'),
+          nfs_shares                => hiera_array('CONFIG_CINDER_NETAPP_NFS_SHARES'),
+          nfs_shares_config         => hiera('CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG'),
+        }
+
+        ensure_packages(['nfs-utils'], {'ensure' => 'present'})
+      }
+      elsif $netapp_storage_protocol == 'iscsi' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login            => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password         => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname  => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port      => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_size_multiplier  => hiera('CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER'),
+          netapp_storage_family   => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type   => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          netapp_vfiler           => hiera('CONFIG_CINDER_NETAPP_VFILER'),
+          netapp_volume_list      => hiera('CONFIG_CINDER_NETAPP_VOLUME_LIST'),
+        }
+
+        ensure_packages(['iscsi-initiator-utils'], {'ensure' => 'present'})
+      }
+
+      elsif $netapp_storage_protocol == 'fc' {
+        cinder::backend::netapp { $netapp_backend_name:
+          netapp_login                => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+          netapp_password             => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+          netapp_server_hostname      => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+          netapp_server_port          => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+          netapp_size_multiplier      => hiera('CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER'),
+          netapp_storage_family       => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+          netapp_storage_protocol     => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+          netapp_transport_type       => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+          netapp_vfiler               => hiera('CONFIG_CINDER_NETAPP_VFILER'),
+          netapp_partner_backend_name => hiera('CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME'),
+          netapp_volume_list          => hiera('CONFIG_CINDER_NETAPP_VOLUME_LIST'),
+        }
+      }
+    }
+    elsif $netapp_storage_family == 'eseries' {
+      cinder::backend::netapp { $netapp_backend_name:
+        netapp_login             => hiera('CONFIG_CINDER_NETAPP_LOGIN'),
+        netapp_password          => hiera('CONFIG_CINDER_NETAPP_PASSWORD'),
+        netapp_server_hostname   => hiera('CONFIG_CINDER_NETAPP_HOSTNAME'),
+        netapp_server_port       => hiera('CONFIG_CINDER_NETAPP_SERVER_PORT'),
+        netapp_storage_family    => hiera('CONFIG_CINDER_NETAPP_STORAGE_FAMILY'),
+        netapp_storage_protocol  => hiera('CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL'),
+        netapp_transport_type    => hiera('CONFIG_CINDER_NETAPP_TRANSPORT_TYPE'),
+        netapp_controller_ips    => hiera('CONFIG_CINDER_NETAPP_CONTROLLER_IPS'),
+        netapp_sa_password       => hiera('CONFIG_CINDER_NETAPP_SA_PASSWORD'),
+        netapp_storage_pools     => hiera('CONFIG_CINDER_NETAPP_STORAGE_POOLS'),
+        netapp_eseries_host_type => hiera('CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE'),
+        netapp_webservice_path   => hiera('CONFIG_CINDER_NETAPP_WEBSERVICE_PATH'),
+      }
+
+        ensure_packages(['iscsi-initiator-utils'], {'ensure' => 'present'})
+    }
+
+    cinder::type { $netapp_backend_name:
+      set_key   => 'volume_backend_name',
+      set_value => $netapp_backend_name,
+      require   => Class['cinder::api'],
+    }
+}

+ 16 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/nfs.pp

@@ -0,0 +1,16 @@
+class packstack::cinder::backend::nfs ()
+{
+    ensure_packages(['nfs-utils'], {'ensure' => 'present'})
+
+    cinder::backend::nfs { 'nfs':
+      nfs_servers       => hiera_array('CONFIG_CINDER_NFS_MOUNTS'),
+      require           => Package['nfs-utils'],
+      nfs_shares_config => '/etc/cinder/nfs_shares.conf',
+    }
+
+    cinder::type { 'nfs':
+      set_key   => 'volume_backend_name',
+      set_value => 'nfs',
+      require   => Class['cinder::api'],
+    }
+}

+ 20 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/solidfire.pp

@@ -0,0 +1,20 @@
+# Copyright (c) – 2016, Edward Balduf. All rights reserved.
+class packstack::cinder::backend::solidfire ()
+{
+    $solidfire_backend_name = 'solidfire'
+
+    cinder::backend::solidfire { $solidfire_backend_name :
+        san_ip               => hiera('CONFIG_CINDER_SOLIDFIRE_LOGIN'),
+        san_login            => hiera('CONFIG_CINDER_SOLIDFIRE_PASSWORD'),
+        san_password         => hiera('CONFIG_CINDER_SOLIDFIRE_HOSTNAME'),
+        volume_backend_name  => $solidfire_backend_name,
+    }
+
+    ensure_packages(['iscsi-initiator-utils'], {'ensure' => 'present'})
+
+    cinder::type { $solidfire_backend_name:
+      set_key   => 'volume_backend_name',
+      set_value => $solidfire_backend_name,
+      require   => Class['cinder::api'],
+    }
+}

+ 14 - 0
packstack/puppet/modules/packstack/manifests/cinder/backend/vmdk.pp

@@ -0,0 +1,14 @@
+class packstack::cinder::backend::vmdk ()
+{
+    cinder::backend::vmdk { 'vmdk':
+      host_ip       => hiera('CONFIG_VCENTER_HOST'),
+      host_username => hiera('CONFIG_VCENTER_USER'),
+      host_password => hiera('CONFIG_VCENTER_PASSWORD'),
+    }
+
+    cinder::type { 'vmdk':
+      set_key   => 'volume_backend_name',
+      set_value => 'vmdk',
+      require   => Class['cinder::api'],
+    }
+}

+ 12 - 0
packstack/puppet/modules/packstack/manifests/cinder/backup.pp

@@ -0,0 +1,12 @@
+class packstack::cinder::backup ()
+{
+    class { '::cinder::backup': }
+
+    $cinder_backup_conf_ctrl_host = hiera('CONFIG_KEYSTONE_HOST_URL')
+
+    class { '::cinder::backup::swift':
+      backup_swift_url => "http://${cinder_backup_conf_ctrl_host}:8080/v1/AUTH_",
+    }
+
+    Class['cinder::api'] ~> Service['cinder-backup']
+}

+ 4 - 0
packstack/puppet/modules/packstack/manifests/cinder/ceilometer.pp

@@ -0,0 +1,4 @@
+class packstack::cinder::ceilometer ()
+{
+    class { '::cinder::ceilometer': }
+}

+ 34 - 0
packstack/puppet/modules/packstack/manifests/cinder/rabbitmq.pp

@@ -0,0 +1,34 @@
+class packstack::cinder::rabbitmq ()
+{
+    $cinder_rab_cfg_cinder_db_pw = hiera('CONFIG_CINDER_DB_PW')
+    $cinder_rab_cfg_mariadb_host = hiera('CONFIG_MARIADB_HOST_URL')
+
+    $kombu_ssl_ca_certs = hiera('CONFIG_AMQP_SSL_CACERT_FILE', undef)
+    $kombu_ssl_keyfile = hiera('CONFIG_CINDER_SSL_KEY', undef)
+    $kombu_ssl_certfile = hiera('CONFIG_CINDER_SSL_CERT', undef)
+
+    $rabbit_host = hiera('CONFIG_AMQP_HOST_URL')
+    $rabbit_port = hiera('CONFIG_AMQP_CLIENTS_PORT')
+    $rabbit_userid = hiera('CONFIG_AMQP_AUTH_USER')
+    $rabbit_password = hiera('CONFIG_AMQP_AUTH_PASSWORD')
+
+    if $kombu_ssl_keyfile {
+      $files_to_set_owner = [ $kombu_ssl_keyfile, $kombu_ssl_certfile ]
+      file { $files_to_set_owner:
+        owner   => 'cinder',
+        group   => 'cinder',
+        require => Class['cinder'],
+        notify  => Service['cinder-api'],
+      }
+    }
+
+    class { '::cinder':
+      rabbit_use_ssl        => hiera('CONFIG_AMQP_SSL_ENABLED'),
+      default_transport_url => "rabbit://${rabbit_userid}:${rabbit_password}@${rabbit_host}:${rabbit_port}/",
+      database_connection   => "mysql+pymysql://cinder:${cinder_rab_cfg_cinder_db_pw}@${cinder_rab_cfg_mariadb_host}/cinder",
+      debug                 => hiera('CONFIG_DEBUG_MODE'),
+      kombu_ssl_ca_certs    => $kombu_ssl_ca_certs,
+      kombu_ssl_keyfile     => $kombu_ssl_keyfile,
+      kombu_ssl_certfile    => $kombu_ssl_certfile,
+    }
+}

+ 52 - 0
packstack/puppet/modules/packstack/manifests/firewall.pp

@@ -0,0 +1,52 @@
+# Create firewall rules to allow only the FIREWALL_ALLOWED
+# hosts that need to connect via FIREWALL_PORTS
+# using FIREWALL_CHAIN
+
+define packstack::firewall (
+  $host,
+  $service_name,
+  $chain = 'INPUT',
+  $ports = undef,
+  $proto = 'tcp'
+) {
+  $ip_version = hiera('CONFIG_IP_VERSION')
+
+  $provider = $ip_version ? {
+    'ipv6'  => 'ip6tables',
+    default => 'iptables',
+    # TO-DO(mmagr): Add IPv6 support when hostnames are used
+  }
+
+  $source = $host ? {
+    'ALL' => $ip_version ? {
+      'ipv6'  => '::/0',
+      default => '0.0.0.0/0'
+    },
+    default => $host,
+  }
+
+  $heading = $chain ? {
+    'OUTPUT' => 'outgoing',
+    default => 'incoming',
+  }
+
+  if $ports == undef {
+    firewall { "001 ${service_name} ${heading} ${title}":
+      chain    => $chain,
+      proto    => $proto,
+      action   => 'accept',
+      source   => $source,
+      provider => $provider,
+    }
+  }
+  else {
+    firewall { "001 ${service_name} ${heading} ${title}":
+      chain    => $chain,
+      proto    => $proto,
+      dport    => $ports,
+      action   => 'accept',
+      source   => $source,
+      provider => $provider,
+    }
+  }
+}

+ 56 - 0
packstack/puppet/modules/packstack/manifests/glance.pp

@@ -0,0 +1,56 @@
+class packstack::glance ()
+{
+    create_resources(packstack::firewall, hiera('FIREWALL_GLANCE_RULES', {}))
+
+    $glance_ks_pw = hiera('CONFIG_GLANCE_DB_PW')
+    $glance_mariadb_host = hiera('CONFIG_MARIADB_HOST_URL')
+    $glance_cfg_ctrl_host = hiera('CONFIG_KEYSTONE_HOST_URL')
+
+    # glance option bind_host requires address without brackets
+    $bind_host = hiera('CONFIG_IP_VERSION') ? {
+      'ipv6'  => '::0',
+      default => '0.0.0.0',
+      # TO-DO(mmagr): Add IPv6 support when hostnames are used
+    }
+    # magical hack for magical config - glance option registry_host requires brackets
+    $registry_host = hiera('CONFIG_IP_VERSION') ? {
+      'ipv6'  => '[::0]',
+      default => '0.0.0.0',
+      # TO-DO(mmagr): Add IPv6 support when hostnames are used
+    }
+    $default_store = hiera('CONFIG_GLANCE_BACKEND') ? {
+      'swift' => 'swift',
+      default => 'file',
+    }
+
+    class { '::glance::api::authtoken':
+      auth_uri => hiera('CONFIG_KEYSTONE_PUBLIC_URL'),
+      auth_url => hiera('CONFIG_KEYSTONE_ADMIN_URL'),
+      password => hiera('CONFIG_GLANCE_KS_PW'),
+    }
+
+    class { '::glance::api':
+      bind_host           => $bind_host,
+      registry_host       => $registry_host,
+      pipeline            => 'keystone',
+      database_connection => "mysql+pymysql://glance:${glance_ks_pw}@${glance_mariadb_host}/glance",
+      debug               => hiera('CONFIG_DEBUG_MODE'),
+      os_region_name      => hiera('CONFIG_KEYSTONE_REGION'),
+      workers             => hiera('CONFIG_SERVICE_WORKERS'),
+      stores              => ['file', 'http', 'swift'],
+      default_store       => $default_store,
+    }
+
+    class { '::glance::registry::authtoken':
+      auth_uri => hiera('CONFIG_KEYSTONE_PUBLIC_URL'),
+      auth_url => hiera('CONFIG_KEYSTONE_ADMIN_URL'),
+      password => hiera('CONFIG_GLANCE_KS_PW'),
+    }
+
+    class { '::glance::registry':
+      bind_host           => $bind_host,
+      database_connection => "mysql+pymysql://glance:${glance_ks_pw}@${glance_mariadb_host}/glance",
+      debug               => hiera('CONFIG_DEBUG_MODE'),
+      workers             => hiera('CONFIG_SERVICE_WORKERS'),
+    }
+}

+ 7 - 0
packstack/puppet/modules/packstack/manifests/glance/backend/file.pp

@@ -0,0 +1,7 @@
+class packstack::glance::backend::file ()
+{
+    # TO-DO: Make this configurable
+    class { '::glance::backend::file':
+      filesystem_store_datadir => '/var/lib/glance/images/',
+    }
+}

+ 0 - 0
packstack/puppet/modules/packstack/manifests/glance/backend/swift.pp


Some files were not shown because too many files changed in this diff