From 7f8865278cd733954166a2dfff577826ae12d9e3 Mon Sep 17 00:00:00 2001 From: Miroslav-Anashkin Date: Wed, 9 Oct 2013 18:16:35 +0400 Subject: [PATCH 01/15] Fix for [PRD-1792] MySQL wants to reverse resolve Turned off name resolution by default for non-HA configurations. --- deployment/puppet/mysql/manifests/config.pp | 1 + deployment/puppet/mysql/manifests/server.pp | 4 ++++ deployment/puppet/mysql/templates/my.cnf.erb | 6 ++++++ 3 files changed, 11 insertions(+) diff --git a/deployment/puppet/mysql/manifests/config.pp b/deployment/puppet/mysql/manifests/config.pp index 655bb1dd1..730d20110 100644 --- a/deployment/puppet/mysql/manifests/config.pp +++ b/deployment/puppet/mysql/manifests/config.pp @@ -50,6 +50,7 @@ $use_syslog = false, $custom_setup_class = undef, $server_id = $mysql::params::server_id, + $mysql_skip_name_resolve = false, ) inherits mysql::params { if $custom_setup_class != "pacemaker_mysql" { diff --git a/deployment/puppet/mysql/manifests/server.pp b/deployment/puppet/mysql/manifests/server.pp index b8e436390..eb505a44e 100644 --- a/deployment/puppet/mysql/manifests/server.pp +++ b/deployment/puppet/mysql/manifests/server.pp @@ -41,6 +41,10 @@ Class['mysql::server'] -> Class['mysql::config'] Class['mysql'] -> Class['mysql::server'] + class{ 'mysql::config': + mysql_skip_name_resolve => $mysql_skip_name_resolve, + } + create_resources( 'class', { 'mysql::config' => $config_hash }) # exec { "debug-mysql-server-installation" : # command => "/usr/bin/yum -d 10 -e 10 -y install MySQL-server-5.5.28-6 2>&1 | tee mysql_install.log", diff --git a/deployment/puppet/mysql/templates/my.cnf.erb b/deployment/puppet/mysql/templates/my.cnf.erb index 4afd85616..42fb490f0 100644 --- a/deployment/puppet/mysql/templates/my.cnf.erb +++ b/deployment/puppet/mysql/templates/my.cnf.erb @@ -7,6 +7,9 @@ nice = 0 <% if use_syslog -%> syslog <% end -%> +<% if skip_name_resolve -%> +skip-name-resolve +<% end -%> [mysqld] user = mysql #pid-file = <%= pidfile %> @@ -46,6 +49,9 @@ log-bin=mysql-bin relay-log=mysqld-relay-bin replicate-ignore-db=mysql <% end %> +<% if skip_name_resolve -%> +skip-name-resolve +<% end -%> [mysqldump] quick quote-names From 6ed658efd010d554e45f21c4ccea84da51134899 Mon Sep 17 00:00:00 2001 From: Miroslav-Anashkin Date: Wed, 9 Oct 2013 22:36:16 +0400 Subject: [PATCH 02/15] Fix [PRD-1792] MySQL wants to reverse resolve Turned off name resolution for all deployment types. --- deployment/puppet/mysql/manifests/config.pp | 2 +- deployment/puppet/mysql/manifests/server.pp | 6 ++---- .../puppet/osnailyfacter/manifests/cluster_ha_full.pp | 3 ++- deployment/puppet/osnailyfacter/manifests/cluster_simple.pp | 1 + 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/deployment/puppet/mysql/manifests/config.pp b/deployment/puppet/mysql/manifests/config.pp index 730d20110..756a2df89 100644 --- a/deployment/puppet/mysql/manifests/config.pp +++ b/deployment/puppet/mysql/manifests/config.pp @@ -50,7 +50,7 @@ $use_syslog = false, $custom_setup_class = undef, $server_id = $mysql::params::server_id, - $mysql_skip_name_resolve = false, + $skip_name_resolve = false, ) inherits mysql::params { if $custom_setup_class != "pacemaker_mysql" { diff --git a/deployment/puppet/mysql/manifests/server.pp b/deployment/puppet/mysql/manifests/server.pp index eb505a44e..ccf8a95ee 100644 --- a/deployment/puppet/mysql/manifests/server.pp +++ b/deployment/puppet/mysql/manifests/server.pp @@ -41,10 +41,8 @@ Class['mysql::server'] -> Class['mysql::config'] Class['mysql'] -> Class['mysql::server'] - class{ 'mysql::config': - mysql_skip_name_resolve => $mysql_skip_name_resolve, - } - + $config_hash['skip_name_resolve'] = $mysql_skip_name_resolve + create_resources( 'class', { 'mysql::config' => $config_hash }) # exec { "debug-mysql-server-installation" : # command => "/usr/bin/yum -d 10 -e 10 -y install MySQL-server-5.5.28-6 2>&1 | tee mysql_install.log", diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_ha_full.pp b/deployment/puppet/osnailyfacter/manifests/cluster_ha_full.pp index 145e911c4..181331d47 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_ha_full.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_ha_full.pp @@ -258,7 +258,8 @@ cinder_rate_limits => $cinder_rate_limits, horizon_use_ssl => $::horizon_use_ssl, use_unicast_corosync => $::use_unicast_corosync, - nameservers => $::dns_nameservers, + nameservers => $::dns_nameservers, + mysql_skip_name_resolve => true, } if $primary_controller { diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp index 6e4348ec7..9a7d33e98 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp @@ -192,6 +192,7 @@ cinder_rate_limits => $cinder_rate_limits, horizon_use_ssl => $horizon_use_ssl, nameservers => $::dns_nameservers, + mysql_skip_name_resolve => true, } nova_config { 'DEFAULT/start_guests_on_host_boot': value => $::fuel_settings['start_guests_on_host_boot'] } nova_config { 'DEFAULT/use_cow_images': value => $::fuel_settings['use_cow_images'] } From cff1dda3f2c74a9a486d6735fc82bc284a2b95c7 Mon Sep 17 00:00:00 2001 From: Sergey Vasilenko Date: Sun, 20 Oct 2013 18:09:27 +0400 Subject: [PATCH 03/15] Type and provider for quantum_floatingip_pool --- .../lib/puppet/type/l3_if_downup.rb | 2 +- .../l23network/manifests/l3/ifconfig.pp | 2 +- .../osnailyfacter/manifests/cluster_simple.pp | 16 +- deployment/puppet/quantum/Rakefile | 5 + .../quantum/lib/puppet/provider/quantum.rb | 12 +- .../quantum_floatingip_pool/quantum.rb | 212 ++++++++++++++++++ .../puppet/type/quantum_floatingip_pool.rb | 34 +++ .../manifests/network/predefined_netwoks.pp | 7 + .../quantum_floatingip_pool/quantum__spec.rb | 146 ++++++++++++ 9 files changed, 423 insertions(+), 13 deletions(-) create mode 100644 deployment/puppet/quantum/lib/puppet/provider/quantum_floatingip_pool/quantum.rb create mode 100644 deployment/puppet/quantum/lib/puppet/type/quantum_floatingip_pool.rb create mode 100644 deployment/puppet/quantum/spec/provider/quantum_floatingip_pool/quantum__spec.rb diff --git a/deployment/puppet/l23network/lib/puppet/type/l3_if_downup.rb b/deployment/puppet/l23network/lib/puppet/type/l3_if_downup.rb index c0b4b6a76..2c6ae0b0b 100644 --- a/deployment/puppet/l23network/lib/puppet/type/l3_if_downup.rb +++ b/deployment/puppet/l23network/lib/puppet/type/l3_if_downup.rb @@ -54,7 +54,7 @@ end newparam(:check_by_ping_timeout) do - defaultto(120) + defaultto(30) end def refresh diff --git a/deployment/puppet/l23network/manifests/l3/ifconfig.pp b/deployment/puppet/l23network/manifests/l3/ifconfig.pp index 159c83ce4..ac639fb20 100644 --- a/deployment/puppet/l23network/manifests/l3/ifconfig.pp +++ b/deployment/puppet/l23network/manifests/l3/ifconfig.pp @@ -92,7 +92,7 @@ $dhcp_nowait = false, $ifname_order_prefix = false, $check_by_ping = 'gateway', - $check_by_ping_timeout = 120, + $check_by_ping_timeout = 30, #todo: label => "XXX", # -- "ip addr add..... label XXX" ){ include ::l23network::params diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp index 7f39f68c9..dc2825916 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp @@ -248,15 +248,15 @@ if $savanna_hash['enabled'] { class { 'savanna' : savanna_api_host => $controller_node_address, - + savanna_db_password => $savanna_hash['db_password'], savanna_db_host => $controller_node_address, - + savanna_keystone_host => $controller_node_address, savanna_keystone_user => 'admin', savanna_keystone_password => 'admin', savanna_keystone_tenant => 'admin', - + use_neutron => $::use_quantum, } } @@ -269,10 +269,10 @@ murano_rabbit_host => $controller_node_public, murano_rabbit_login => 'murano', murano_rabbit_password => $heat_hash['rabbit_password'], - + murano_db_host => $controller_node_address, murano_db_password => $murano_hash['db_password'], - + murano_keystone_host => $controller_node_address, murano_keystone_user => 'admin', murano_keystone_password => 'admin', @@ -282,17 +282,17 @@ class { 'heat' : pacemaker => false, external_ip => $controller_node_public, - + heat_keystone_host => $controller_node_address, heat_keystone_user => 'heat', heat_keystone_password => 'heat', heat_keystone_tenant => 'services', - + heat_rabbit_host => $controller_node_address, heat_rabbit_login => $rabbit_hash['user'], heat_rabbit_password => $rabbit_hash['password'], heat_rabbit_port => '5672', - + heat_db_host => $controller_node_address, heat_db_password => $heat_hash['db_password'], } diff --git a/deployment/puppet/quantum/Rakefile b/deployment/puppet/quantum/Rakefile index 4c489155a..f52901c41 100644 --- a/deployment/puppet/quantum/Rakefile +++ b/deployment/puppet/quantum/Rakefile @@ -6,3 +6,8 @@ RSpec::Core::RakeTask.new(:rspec) do |t| t.pattern = 'spec/*/*__spec.rb' t.rspec_opts = File.read("spec/spec.opts").chomp || "" end + +RSpec::Core::RakeTask.new(:provider) do |t| + t.pattern = 'spec/provider/*/*__spec.rb' + t.rspec_opts = File.read("spec/spec.opts").chomp || "" +end diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum.rb index 21498207e..804837ca2 100644 --- a/deployment/puppet/quantum/lib/puppet/provider/quantum.rb +++ b/deployment/puppet/quantum/lib/puppet/provider/quantum.rb @@ -4,6 +4,7 @@ require 'tempfile' class Puppet::Provider::Quantum < Puppet::Provider + def self.quantum_credentials @quantum_credentials ||= get_quantum_credentials end @@ -121,7 +122,7 @@ def check_quantum_api_availability(timeout) end - private + #private # def self.list_quantum_objects # ids = [] # (auth_quantum('index').split("\n")[2..-1] || []).collect do |line| @@ -147,12 +148,17 @@ def self.list_keystone_tenants '--os-username', q['admin_user'], '--os-password', q['admin_password'], '--os-auth-url', auth_endpoint, - #'tenant-list').grep(/\|\s+#{tenant_name}\s+\|/) { |tenant| tenant.split[1] }.to_s 'tenant-list').split("\n")[3..-2].collect do |tenant| - tenants_id[tenant.split[3]] = tenant.split[1] + t_id = tenant.split[1] + t_name = tenant.split[3] + tenants_id[t_name] = t_id end tenants_id end + # def list_keystone_tenants + # self.class.list_keystone_tenants + # end end +# vim: set ts=2 sw=2 et : \ No newline at end of file diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum_floatingip_pool/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum_floatingip_pool/quantum.rb new file mode 100644 index 000000000..913caf105 --- /dev/null +++ b/deployment/puppet/quantum/lib/puppet/provider/quantum_floatingip_pool/quantum.rb @@ -0,0 +1,212 @@ +# Load the Quantum provider library to help +require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/quantum') + +Puppet::Type.type(:quantum_floatingip_pool).provide( + :quantum, + :parent => Puppet::Provider::Quantum +) do + + desc "Manage floating-IP pool for given tenant" + + commands :quantum => 'quantum' + commands :keystone => 'keystone' + commands :sleep => 'sleep' + + # I need to setup caching and what-not to make this lookup performance not suck + def self.instances + @floating_ip_cache ||= {} + # get floating IP list + f_ip_list = self.floatingip_list('--format=csv', '--field=id', '--field=floating_ip_address') + return [] if f_ip_list.chomp.empty? + + pools_by_tenant_id = {} + f_ip_list.split("\n").each do |fip| + fields=fip.split(',').map{|x| x[1..-2]} + next if (fields[0] == 'id') or (fields[0].nil?) or (fields.size != 2) + details = Hash[ + self.floatingip_show('--format', 'shell', fields[0]).split("\n").map{|x| x.split('=')}.select{|x| !x[0].nil?}.map{|x| [x[0].to_sym,x[1][1..-2]]} + ] + pools_by_tenant_id[details[:tenant_id]] ? pools_by_tenant_id[details[:tenant_id]] += 1 : pools_by_tenant_id[details[:tenant_id]] = 1 + @floating_ip_cache[details[:id]] = { + :tenant_id => details[:tenant_id], + :tenant => tenant_name_by_id[details[:tenant_id]], + :ip => details[:floating_ip_address] + } + end + rv = [] + pools_by_tenant_id.each do |tenn_id, ip_count| + Puppet::debug("tenant-id='#{tenn_id}' tenant='#{tenant_name_by_id[tenn_id]}' size=#{ip_count}") + rv << new( + :name => tenant_name_by_id[tenn_id], + :pool_size => ip_count, + :ensure => :present + ) + end + rv + end + + def self.prefetch(resources) + instances.each do |i| + res = resources[i.name.to_s] + if ! res.nil? + res.provider = i + end + end + end + + def pool_size + @property_hash[:pool_size] + end + def pool_size=(value) + delta = @property_hash[:pool_size].to_i - value + if delta == 0 + nil + elsif delta > 0 + Puppet::debug("*** will be destroyed #{delta} floating IPs for tenant '#{@property_hash[:name]}'") + _destroy_N(delta) + else + Puppet::debug("*** will be created #{-delta} floating IPs for tenant '#{@property_hash[:name]}'") + _create_N(-delta) + end + end + + def exists? + @property_hash[:ensure] == :present + end + + def create + _create_N(@resource[:pool_size]) + end + + def _create_N(n) + for i in 0...n.to_i do + retries = 30 + loop do + begin + auth_quantum('floatingip-create', '--tenant-id', tenant_id[@resource[:name]], @resource[:ext_net]) + break + rescue Exception => e + notice("Can't connect to quantum backend. Waiting for retry...") + retries -= 1 + if retries <= 1 + notice("Can't connect to quantum backend. No more retries.") + raise(e) + end + sleep 2 + end + end + end + end + + def destroy + _destroy_N((2**((8*0.size)-2))-1) # ruby maxint emulation + end + + def _destroy_N(n) + nn = n.to_i + t_id = tenant_id[@resource[:name]] + # get floating IP list + f_ip_list = floatingip_list('--format=csv', '--field=id', '--field=floating_ip_address') + return if f_ip_list.chomp.empty? + f_ip_list.split("\n").each do |fip| + fields=fip.split(',').map{|x| x[1..-2]} + next if (fields[0].nil?) or (fields.size != 2) or (fields[0] == 'id') + fip_id = fields[0] + details = floatingip_cache[fip_id.to_s] + if details.nil? + Puppet::debug("*** Can't find in cache floating IP with ID:'#{fip_id}'") + end + if details[:tenant_id] == t_id + retries = 30 + loop do + begin + auth_quantum('floatingip-delete', fip_id) + break + rescue Exception => e + notice("Can't connect to quantum backend. Waiting for retry...") + retries -= 1 + if retries <= 1 + notice("Can't connect to quantum backend. No more retries.") + raise(e) + end + sleep 2 + end + end + nn -= 1 + break if nn <= 0 + end + end + end + + private + + def self.floatingip_cache + @floating_ip_cache + end + def floatingip_cache + self.class.floatingip_cache + end + + def self.tenant_id + @tenant_id ||= list_keystone_tenants + end + def tenant_id + self.class.tenant_id + end + + def self.tenant_name_by_id + @tenant_name_by_id ||= list_keystone_tenants.invert() + end + def tenant_name_by_id + self.class.tenant_name_by_id + end + + def floatingip_list(*args) + self.class.floatingip_list(args) + end + def self.floatingip_list(*args) + rv = '' + retries = 30 + loop do + begin + rv = auth_quantum('floatingip-list', args) + break + rescue Exception => e + notice("Can't connect to quantum backend. Waiting for retry...") + retries -= 1 + if retries <= 1 + notice("Can't connect to quantum backend. No more retries.") + raise(e) + end + sleep 2 + end + end + return rv + end + + + def floatingip_show(*args) + self.class.floatingip_show(args) + end + def self.floatingip_show(*args) + rv = '' + retries = 30 + loop do + begin + rv = auth_quantum('floatingip-show', args) + break + rescue Exception => e + notice("Can't connect to quantum backend. Waiting for retry...") + retries -= 1 + if retries <= 1 + notice("Can't connect to quantum backend. No more retries.") + raise(e) + end + sleep 2 + end + end + return rv + end + +end +# vim: set ts=2 sw=2 et : \ No newline at end of file diff --git a/deployment/puppet/quantum/lib/puppet/type/quantum_floatingip_pool.rb b/deployment/puppet/quantum/lib/puppet/type/quantum_floatingip_pool.rb new file mode 100644 index 000000000..b8e5966e9 --- /dev/null +++ b/deployment/puppet/quantum/lib/puppet/type/quantum_floatingip_pool.rb @@ -0,0 +1,34 @@ +Puppet::Type.newtype(:quantum_floatingip_pool) do + + @doc = "Manage creation/deletion of floating IP pool" + + ensurable + + newparam(:name) do + desc "The name of tenant, that the pool is associated with" + defaultto "admin" + end + + newproperty(:pool_size) do + desc "Size of floating IP pool" + defaultto 1 + validate do |val| + if val.to_i < 0 + fail("Invalid size: '#{val}'") + end + end + munge do |val| + rv = val.to_i + end + end + + newparam(:ext_net) do + desc "Set an external network" + defaultto "net04_ext" + end + + autorequire(:package) do + ['python-quantumclient'] + end + +end diff --git a/deployment/puppet/quantum/manifests/network/predefined_netwoks.pp b/deployment/puppet/quantum/manifests/network/predefined_netwoks.pp index 9ce3424c2..71d90602b 100644 --- a/deployment/puppet/quantum/manifests/network/predefined_netwoks.pp +++ b/deployment/puppet/quantum/manifests/network/predefined_netwoks.pp @@ -6,5 +6,12 @@ Keystone_user_role<| title=="$auth_user@$auth_tenant"|> -> Quantum_net<| |> Service <| title == 'keystone' |> -> Quantum_net <| |> Anchor['quantum-plugin-ovs-done'] -> Quantum_net <| |> + + quantum_floatingip_pool{'admin': + pool_size => get_floatingip_pool_size_for_admin($quantum_config) + } + Quantum_net<||> -> Quantum_floatingip_pool<||> + Quantum_subnet<||> -> Quantum_floatingip_pool<||> + Quantum_router<||> -> Quantum_floatingip_pool<||> } # vim: set ts=2 sw=2 et : \ No newline at end of file diff --git a/deployment/puppet/quantum/spec/provider/quantum_floatingip_pool/quantum__spec.rb b/deployment/puppet/quantum/spec/provider/quantum_floatingip_pool/quantum__spec.rb new file mode 100644 index 000000000..820779224 --- /dev/null +++ b/deployment/puppet/quantum/spec/provider/quantum_floatingip_pool/quantum__spec.rb @@ -0,0 +1,146 @@ +require 'spec_helper' + +describe Puppet::Type.type(:quantum_floatingip_pool).provider(:quantum) do + + let(:resource) { Puppet::Type.type(:quantum_floatingip_pool).new(:name => 'admin', :provider => :quantum) } + let(:provider) { resource.provider } + + describe "#instances" do + before(:each) do + provider.class.stubs(:quantum).with( + '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', + ['floatingip-list', ['--format=csv', '--field=id', '--field=floating_ip_address']]).returns(''' +"id","floating_ip_address" +"17029d36-72c3-4ab4-9da2-cdecc689842f","10.20.3.132" +"324355f8-0992-4950-8d16-dea6d670b0fe","10.20.3.137" +"aaebec15-b59b-4e03-9b74-e17b49ffa528","10.20.3.133" +"bce4e408-03e3-421a-80c7-a5c96a835c4e","10.20.3.136" + ''') + provider.class.stubs(:quantum).with( + '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', + ['floatingip-show', ['--format', 'shell', '17029d36-72c3-4ab4-9da2-cdecc689842f']] + ).returns(''' +fixed_ip_address="" +floating_ip_address="10.20.3.132" +floating_network_id="47ce6a63-e967-42cc-8710-b6feca522ac7" +id="17029d36-72c3-4ab4-9da2-cdecc689842f" +port_id="" +router_id="" +tenant_id="70e116e152c34eac8966f3eaa7080e89" + ''') + provider.class.stubs(:quantum).with( + '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', + ['floatingip-show', ['--format', 'shell', '324355f8-0992-4950-8d16-dea6d670b0fe']] + ).returns(''' +fixed_ip_address="" +floating_ip_address="10.20.3.137" +floating_network_id="47ce6a63-e967-42cc-8710-b6feca522ac7" +id="324355f8-0992-4950-8d16-dea6d670b0fe" +port_id="" +router_id="" +tenant_id="315f150b76874b2bb07b9f03530fafc4" + ''') + provider.class.stubs(:quantum).with( + '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', + ['floatingip-show', ['--format', 'shell', 'aaebec15-b59b-4e03-9b74-e17b49ffa528']] + ).returns(''' +fixed_ip_address="" +floating_ip_address="10.20.3.133" +floating_network_id="47ce6a63-e967-42cc-8710-b6feca522ac7" +id="aaebec15-b59b-4e03-9b74-e17b49ffa528" +port_id="" +router_id="" +tenant_id="315f150b76874b2bb07b9f03530fafc4" + ''') + provider.class.stubs(:quantum).with( + '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', + ['floatingip-show', ['--format', 'shell', 'bce4e408-03e3-421a-80c7-a5c96a835c4e']] + ).returns(''' +fixed_ip_address="" +floating_ip_address="10.20.3.136" +floating_network_id="47ce6a63-e967-42cc-8710-b6feca522ac7" +id="bce4e408-03e3-421a-80c7-a5c96a835c4e" +port_id="" +router_id="" +tenant_id="315f150b76874b2bb07b9f03530fafc4" + ''') + provider.stubs(:keystone).with( + '--os-tenant-name', "admin", '--os-username', "admin", '--os-password', "admin", + '--os-auth-url', "http://10.20.1.2:5000/v2.0/", 'tenant-list' + ).returns(''' ++----------------------------------+----------+---------+ +| id | name | enabled | ++----------------------------------+----------+---------+ +| 70e116e152c34eac8966f3eaa7080e89 | admin | True | +| 719179116f3c47129cb2d2a96ed62177 | services | True | +| 315f150b76874b2bb07b9f03530fafc4 | ttt | True | ++----------------------------------+----------+---------+ + ''') + + # provider.class.stubs(:tenants_by_name => { + # 'admin' => '70e116e152c34eac8966f3eaa7080e89', + # 'services' => '719179116f3c47129cb2d2a96ed62177', + # 'ttt' => '315f150b76874b2bb07b9f03530fafc4' + # }) + # provider.class.stubs(:tenants_by_id =>{ + # '70e116e152c34eac8966f3eaa7080e89' => 'admin', + # '719179116f3c47129cb2d2a96ed62177' => 'services', + # '315f150b76874b2bb07b9f03530fafc4' => 'ttt' + # }) + + provider.class.stubs(:quantum_credentials).returns({ + 'auth_url' => "http://10.20.1.2:5000/v2.0/", + 'admin_user' => "admin", + 'admin_password' => "admin", + 'admin_tenant_name' => "admin" + }) + # export OS_TENANT_NAME=admin + # export OS_USERNAME=admin + # export OS_PASSWORD=admin + # export OS_AUTH_URL="http://10.20.1.2:5000/v2.0/" + # export OS_AUTH_STRATEGY=keystone + # export SERVICE_TOKEN=jgi14qOR + # export SERVICE_ENDPOINT=http://10.20.1.2:35357/v2.0/ + end + + it "should find instances" do + +# provider.class.stubs(:floatingip_show).with(['--format', 'shell', '324355f8-0992-4950-8d16-dea6d670b0fe']).returns( +# ''' +# ''' +# ) + # out=File.open(File.dirname(__FILE__) + '/../../../../fixtures/cib/cib.xml') + # provider.class.stubs(:dump_cib).returns(out,nil) + resources = [] + provider.class.instances.each do |instance| + resources << instance.instance_eval{@property_hash} + end + + resources[0].should eql( + {:name=>:bar,:provided_by=>"pacemaker",:ensure=>:present,:parameters=>{},:primitive_class=>"ocf",:primitive_type=>"Dummy",:operations=>{"monitor"=>{"interval"=>"20"}},:metadata=>{},:ms_metadata=>{},:multistate_hash=>{},:provider=>:crm} + ) + end + end + + # describe "#create" do + + # before(:each) do + # provider.class.stubs(:exec_withenv).returns(0) + # end + + # it "should create resource with 5 floatings for admin tenant" do + # provider.class.stubs(:prefetch) + # resource[:tenant] = "admin" + # resource[:size] = 5 + # # tmpfile = StringIO.new() + # # Tempfile.stubs(:open).with("puppet_crm_update").yields(tmpfile) + # # tmpfile.stubs(:path) + # # tmpfile.expects(:write).with("primitive myresource ocf:pacemaker:Dummy op monitor interval=20 ") + # provider.class.prefetch({}) + # provider.create + # provider.flush + # end + + # end +end +# vim: set ts=2 sw=2 et : \ No newline at end of file From cc1508f439d2215741e97320b04ba3176cced3e2 Mon Sep 17 00:00:00 2001 From: Sergey Vasilenko Date: Mon, 21 Oct 2013 10:26:35 +0400 Subject: [PATCH 04/15] Parser function for calculate autogenerated floatingip pool size Returns: 10 for big allocation size 1/2 of pool size for middle allocation size 0 if allocation size too small --- .../get_floatingip_pool_size_for_admin.rb | 66 +++++++++++ ...et_floatingip_pool_size_for_admin__spec.rb | 103 ++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 deployment/puppet/quantum/lib/puppet/parser/functions/get_floatingip_pool_size_for_admin.rb create mode 100644 deployment/puppet/quantum/spec/functions/get_floatingip_pool_size_for_admin__spec.rb diff --git a/deployment/puppet/quantum/lib/puppet/parser/functions/get_floatingip_pool_size_for_admin.rb b/deployment/puppet/quantum/lib/puppet/parser/functions/get_floatingip_pool_size_for_admin.rb new file mode 100644 index 000000000..da28d0a11 --- /dev/null +++ b/deployment/puppet/quantum/lib/puppet/parser/functions/get_floatingip_pool_size_for_admin.rb @@ -0,0 +1,66 @@ +# require 'ipaddr' +# require 'yaml' +# require 'json' + +class MrntQuantumFA + def initialize(scope, cfg) + @scope = scope + @quantum_config = cfg + end + + #class method + def self.sanitize_array(aa) + aa.reduce([]) do |rv, v| + rv << case v.class + when Hash then sanitize_hash(v) + when Array then sanitize_array(v) + else v + end + end + end + + #class method + def self.sanitize_hash(hh) + rv = {} + hh.each do |k, v| + rv[k.to_sym] = case v.class.to_s + when "Hash" then sanitize_hash(v) + when "Array" then sanitize_array(v) + else v + end + end + return rv + end + + def get_pool_size() + floating_range = @quantum_config[:predefined_networks][:net04_ext][:L3][:floating] + Puppet::debug("Floating range is #{floating_range}") + borders = floating_range.split(':').map{|x| x.split('.')[-1].to_i} + rv = borders[1]-borders[0] + if rv <= 4 + return 0 + elsif rv > 10 + rv = 10 + else + rv = (rv / 2).to_i - 1 + end + return rv + end +end + +module Puppet::Parser::Functions + newfunction(:get_floatingip_pool_size_for_admin, :type => :rvalue, :doc => <<-EOS + This function get Hash of Quantum configuration + and calculate autogenerated floating IPs pool size for admin tenant. + + Example call: + $pool_size = get_floatingip_pool_size_for_admin($quantum_settings_hash) + + EOS + ) do |argv| + #Puppet::Parser::Functions.autoloader.loadall + nr_conf = MrntQuantumFA.new(self, MrntQuantumFA.sanitize_hash(argv[0])) + nr_conf.get_pool_size() + end +end +# vim: set ts=2 sw=2 et : \ No newline at end of file diff --git a/deployment/puppet/quantum/spec/functions/get_floatingip_pool_size_for_admin__spec.rb b/deployment/puppet/quantum/spec/functions/get_floatingip_pool_size_for_admin__spec.rb new file mode 100644 index 000000000..e87e916b9 --- /dev/null +++ b/deployment/puppet/quantum/spec/functions/get_floatingip_pool_size_for_admin__spec.rb @@ -0,0 +1,103 @@ +require 'spec_helper' +require 'json' +require 'yaml' + +class QuantumNRConfig + def initialize(init_v) + @def_v = {} + @def_v.replace(init_v) + @def_config = { + 'predefined_networks' => { + 'net04_ext' => { + 'shared' => false, + 'L2' => { + 'router_ext' => true, + 'network_type' => 'flat', + 'physnet' => 'physnet1', + 'segment_id' => nil, + }, + 'L3' => { + 'subnet' => "10.100.100.0/24", + 'gateway' => "10.100.100.1", + 'nameservers' => [], + 'floating' => "10.100.100.130:10.100.100.254", + }, + }, + 'net04' => { + 'shared' => false, + 'L2' => { + 'router_ext' => false, + 'network_type' => 'gre', # or vlan + 'physnet' => 'physnet2', + 'segment_id' => nil, + }, + 'L3' => { + 'subnet' => "192.168.111.0/24", + 'gateway' => "192.168.111.1", + 'nameservers' => ["8.8.4.4", "8.8.8.8"], + 'floating' => nil, + }, + }, + }, + } + init_v.each() do |k,v| + @def_config[k.to_s()] = v + end + end + + def get_def_config() + return Marshal.load(Marshal.dump(@def_config)) + end + + def get_def(k) + return @def_v[k] + end + +end + +describe 'create_floating_ips_for_admin' , :type => :puppet_function do + let(:scope) { PuppetlabsSpec::PuppetInternals.scope } + + before :each do + @qnr_config = QuantumNRConfig.new({ + :management_vip => '192.168.0.254', + :management_ip => '192.168.0.11' + }) + # Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with('management', 'ipaddr').returns(@q_config.get_def(:management_ip)) + @cfg = @qnr_config.get_def_config() + end + + it 'should exist' do + Puppet::Parser::Functions.function('get_floatingip_pool_size_for_admin').should == 'function_get_floatingip_pool_size_for_admin' + end + + it 'Must return 10' do + subject.call([@cfg, 'quantum_settings']).should == 10 + # [ + # '10.100.100.244', + # '10.100.100.245', + # '10.100.100.246', + # '10.100.100.247', + # '10.100.100.248', + # '10.100.100.249', + # '10.100.100.250', + # '10.100.100.251', + # '10.100.100.252', + # '10.100.100.253', + # '10.100.100.254' + # ] + end + + it 'Must return zero' do + @cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.250:10.100.100.254" + subject.call([@cfg, 'quantum_settings']).should == 0 #[] + end + + it 'Must return array of 3 ip address' do + @cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.247:10.100.100.254" + subject.call([@cfg, 'quantum_settings']).should == 3 #["10.100.100.252", "10.100.100.253", "10.100.100.254"] + end + +end + +# vim: set ts=2 sw=2 et : \ No newline at end of file From 3365d04d21e9981fd9a1b4db657a43c096255aee Mon Sep 17 00:00:00 2001 From: Sergey Vasilenko Date: Mon, 21 Oct 2013 15:54:37 +0400 Subject: [PATCH 05/15] Make router, net, subnet more enshurable. --- .../puppet/provider/quantum_net/quantum.rb | 30 +++++++++++-------- .../puppet/provider/quantum_router/quantum.rb | 29 ++++++++++-------- .../puppet/provider/quantum_subnet/quantum.rb | 30 +++++++++++-------- 3 files changed, 51 insertions(+), 38 deletions(-) diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb index 23c887827..42ea95638 100644 --- a/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb +++ b/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb @@ -18,10 +18,26 @@ def self.instances return [] if network_list.chomp.empty? network_list.split("\n")[3..-2].collect do |net| - new(:name => net.split[3]) + new( + :name => net.split[3], + :ensure => :present + ) end end + def self.prefetch(resources) + instances.each do |i| + res = resources[i.name.to_s] + if ! res.nil? + res.provider = i + end + end + end + + def exists? + @property_hash[:ensure] == :present + end + def self.tenant_id @tenant_id ||= get_tenants_id end @@ -58,18 +74,6 @@ def create ) end - def exists? - begin - network_list = auth_quantum("net-list") - return network_list.split("\n")[3..-2].detect do |net| - # n =~ /^(\S+)\s+(#{@resource[:network].split('/').first})/ - net.split[3] == @resource[:name] - end - rescue - return false - end - end - def destroy auth_quantum("net-delete", @resource[:name]) end diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum_router/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum_router/quantum.rb index 5fbed3e50..41d9e42b7 100644 --- a/deployment/puppet/quantum/lib/puppet/provider/quantum_router/quantum.rb +++ b/deployment/puppet/quantum/lib/puppet/provider/quantum_router/quantum.rb @@ -18,10 +18,26 @@ def self.instances return [] if router_list.chomp.empty? router_list.split("\n")[3..-2].collect do |net| - new(:name => net.split[3]) + new( + :name => net.split[3], + :ensure => :present + ) end end + def self.prefetch(resources) + instances.each do |i| + res = resources[i.name.to_s] + if ! res.nil? + res.provider = i + end + end + end + + def exists? + @property_hash[:ensure] == :present + end + def self.tenant_id @tenant_id ||= get_tenants_id end @@ -70,17 +86,6 @@ def create end end - def exists? - begin - router_list = auth_quantum("router-list") - return router_list.split("\n")[3..-2].detect do |router| - router.split[3] == @resource[:name] - end - rescue - return false - end - end - def destroy auth_quantum("router-delete", @resource[:name]) end diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum_subnet/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum_subnet/quantum.rb index 2d31977f3..e6b4761bc 100644 --- a/deployment/puppet/quantum/lib/puppet/provider/quantum_subnet/quantum.rb +++ b/deployment/puppet/quantum/lib/puppet/provider/quantum_subnet/quantum.rb @@ -18,10 +18,26 @@ def self.instances return [] if network_list.chomp.empty? network_list.split("\n")[3..-2].collect do |net| - new(:name => net.split[3]) + new( + :name => net.split[3], + :ensure => :present + ) end end + def self.prefetch(resources) + instances.each do |i| + res = resources[i.name.to_s] + if ! res.nil? + res.provider = i + end + end + end + + def exists? + @property_hash[:ensure] == :present + end + def self.tenant_id @tenant_id ||= get_tenants_id end @@ -71,18 +87,6 @@ def create ) end - def exists? - begin - network_list = auth_quantum("subnet-list") - return network_list.split("\n")[3..-2].detect do |net| - # n =~ /^(\S+)\s+(#{@resource[:network].split('/').first})/ - net.split[3] == @resource[:name] - end - rescue - return false - end - end - def destroy auth_quantum("subnet-delete", @resource[:name]) end From 65bae7ecae92c87cb27db825b4c43a8e6af1a0e1 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 22 Oct 2013 20:54:10 +0400 Subject: [PATCH 06/15] add ipsort function for RHOS deployment order --- .../nailgun/lib/puppet/parser/functions/ipsort.rb | 13 +++++++++++++ .../puppet/osnailyfacter/manifests/cluster_ha.pp | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 deployment/puppet/nailgun/lib/puppet/parser/functions/ipsort.rb diff --git a/deployment/puppet/nailgun/lib/puppet/parser/functions/ipsort.rb b/deployment/puppet/nailgun/lib/puppet/parser/functions/ipsort.rb new file mode 100644 index 000000000..e511ca81a --- /dev/null +++ b/deployment/puppet/nailgun/lib/puppet/parser/functions/ipsort.rb @@ -0,0 +1,13 @@ +module Puppet::Parser::Functions + newfunction(:ipsort, :type => :rvalue , :doc => <<-EOS +Returns list sorted of sorted IP addresses. + EOS +) do |args| + require 'rubygems' + require 'ipaddr' + ips = args[0] + sorted_ips = ips.sort { |a,b| IPAddr.new( a ) <=> IPAddr.new( b ) } + sorted_ips + end +end + diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp b/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp index 4679ace38..217c434bd 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp @@ -119,7 +119,7 @@ $controller_public_addresses = nodes_to_hash($controllers,'name','public_address') $controller_storage_addresses = nodes_to_hash($controllers,'name','storage_address') $controller_hostnames = keys($controller_internal_addresses) - $controller_nodes = sort(values($controller_internal_addresses)) + $controller_nodes = ipsort(values($controller_internal_addresses)) $controller_node_public = $::fuel_settings['public_vip'] $controller_node_address = $::fuel_settings['management_vip'] $mountpoints = filter_hash($mp_hash,'point') From 38d27400c516fec161d4e079b709aff9669a35ce Mon Sep 17 00:00:00 2001 From: Andrew Woodward Date: Wed, 9 Oct 2013 12:56:15 -0700 Subject: [PATCH 07/15] Fixup some un-needed string interpolations Fix horizon string interpolation Fix quantum string interpolation --- deployment/puppet/horizon/manifests/init.pp | 17 +++++++++++------ deployment/puppet/quantum/manifests/server.pp | 18 +++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/deployment/puppet/horizon/manifests/init.pp b/deployment/puppet/horizon/manifests/init.pp index 52602fe04..8387c4cdf 100644 --- a/deployment/puppet/horizon/manifests/init.pp +++ b/deployment/puppet/horizon/manifests/init.pp @@ -51,7 +51,8 @@ $wsgi_user = $::horizon::params::apache_user $wsgi_group = $::horizon::params::apache_group - package { ["$::horizon::params::http_service", "$::horizon::params::http_modwsgi"]: + package { [$::horizon::params::http_service, + $::horizon::params::http_modwsgi]: ensure => present, } @@ -100,7 +101,8 @@ } if $generate_sslcert_names { - $sslcert_pair = regsubst([$::horizon::params::ssl_cert_file, $::horizon::params::ssl_key_file], + $sslcert_pair = regsubst([$::horizon::params::ssl_cert_file, + $::horizon::params::ssl_key_file], '(.+\/).+(\..+)', "\1${::domain}\2") $ssl_cert_file = $sslcert_pair[0] @@ -169,7 +171,8 @@ owner => root, group => root, content => "LoadModule wsgi_module modules/mod_wsgi.so\n", - require => Package["$::horizon::params::http_service", "$::horizon::params::http_modwsgi"], + require => Package[$::horizon::params::http_service, + $::horizon::params::http_modwsgi], before => Package['dashboard'], } # ensure there is a HTTP redirect from / to /dashboard @@ -180,7 +183,7 @@ } } - augeas { "remove_listen_directive": + augeas { 'remove_listen_directive': context => "/files/etc/httpd/conf/httpd.conf", changes => [ "rm directive[. = 'Listen']" @@ -217,8 +220,10 @@ name => $::horizon::params::http_service, ensure => 'running', enable => true, - require => Package["$::horizon::params::http_service", "$::horizon::params::http_modwsgi"], - subscribe => File["$::horizon::params::local_settings_path", "$::horizon::params::logdir"] + require => Package[$::horizon::params::http_service, + $::horizon::params::http_modwsgi], + subscribe => File[$::horizon::params::local_settings_path, + $::horizon::params::logdir], } if $cache_server_ip =~ /^127\.0\.0\.1/ { diff --git a/deployment/puppet/quantum/manifests/server.pp b/deployment/puppet/quantum/manifests/server.pp index 213eef194..b5d9b71ff 100644 --- a/deployment/puppet/quantum/manifests/server.pp +++ b/deployment/puppet/quantum/manifests/server.pp @@ -14,10 +14,10 @@ if $::operatingsystem == 'Ubuntu' { if $service_provider == 'pacemaker' { - file { "/etc/init/quantum-metadata-agent.override": - replace => "no", - ensure => "present", - content => "manual", + file { '/etc/init/quantum-metadata-agent.override': + replace => 'no', + ensure => 'present', + content => 'manual', mode => 644, before => Package['quantum-server'], } @@ -39,7 +39,7 @@ Package[$server_package] -> Quantum_api_config<||> if defined(Anchor['quantum-plugin-ovs']) { - Package["$server_package"] -> Anchor['quantum-plugin-ovs'] + Package[$server_package] -> Anchor['quantum-plugin-ovs'] } Quantum_config<||> ~> Service['quantum-server'] @@ -84,11 +84,15 @@ class { 'quantum::network::predefined_netwoks': quantum_config => $quantum_config, } -> Anchor['quantum-server-done'] - Service['quantum-server'] -> Class['quantum::network::predefined_netwoks'] + + Service['quantum-server'] -> + Class['quantum::network::predefined_netwoks'] } anchor {'quantum-server-done':} - Anchor['quantum-server'] -> Anchor['quantum-server-done'] + + Anchor['quantum-server'] -> + Anchor['quantum-server-done'] } # vim: set ts=2 sw=2 et : From ab8adbed2f7caf34d6ed9c2eb19938c402b7ca11 Mon Sep 17 00:00:00 2001 From: Andrew Woodward Date: Thu, 24 Oct 2013 11:15:59 -0700 Subject: [PATCH 08/15] Fix ceph module syntax erros --- deployment/puppet/ceph/manifests/mon.pp | 2 +- deployment/puppet/ceph/manifests/radosgw.pp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deployment/puppet/ceph/manifests/mon.pp b/deployment/puppet/ceph/manifests/mon.pp index 99d5da122..fb6f4b00e 100644 --- a/deployment/puppet/ceph/manifests/mon.pp +++ b/deployment/puppet/ceph/manifests/mon.pp @@ -11,7 +11,7 @@ exec {'ceph-deploy mon create': command => "ceph-deploy mon create ${::hostname}:${::internal_address}", logoutput => true, - unless => 'ceph mon stat | grep ${::internal_address}', + unless => "ceph mon stat | grep ${::internal_address}", # TODO: need method to update mon_nodes in ceph.conf } diff --git a/deployment/puppet/ceph/manifests/radosgw.pp b/deployment/puppet/ceph/manifests/radosgw.pp index 60ad8fa0c..262a61bc8 100644 --- a/deployment/puppet/ceph/manifests/radosgw.pp +++ b/deployment/puppet/ceph/manifests/radosgw.pp @@ -11,6 +11,7 @@ $keyring_path = '/etc/ceph/keyring.radosgw.gateway', $radosgw_auth_key = 'client.radosgw.gateway', $rgw_user = $::ceph::params::user_httpd, + $use_ssl = $::ceph::use_ssl, # RadosGW settings $rgw_host = $::ceph::rgw_host, From 4c7958764fc0a2e9a7ecd67764394e5cb0c7ffc8 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Thu, 24 Oct 2013 12:16:52 +0300 Subject: [PATCH 09/15] Define CPU mode none for virtual, host-model for HW cases Fixes: #PRD-1950 Signed-off-by: Bogdan Dobrelya --- .../puppet/nova/manifests/compute/libvirt.pp | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/deployment/puppet/nova/manifests/compute/libvirt.pp b/deployment/puppet/nova/manifests/compute/libvirt.pp index d49653296..9eb192410 100644 --- a/deployment/puppet/nova/manifests/compute/libvirt.pp +++ b/deployment/puppet/nova/manifests/compute/libvirt.pp @@ -13,16 +13,16 @@ # priority => 10, # before => [Package['libvirt']] # }-> - + # package { 'qemu': # ensure => present, # } - - exec { 'symlink-qemu-kvm': + + exec { 'symlink-qemu-kvm': command => "/bin/ln -sf /usr/libexec/qemu-kvm /usr/bin/qemu-system-x86_64", - } - + } + stdlib::safe_package {'dnsmasq-utils':} package { 'avahi': @@ -95,4 +95,14 @@ 'DEFAULT/vncserver_listen': value => $vncserver_listen; 'DEFAULT/disk_cachemodes': value => '"file=writethrough"'; } + +if str2bool($::is_virtual) { + nova_config { + 'DEFAULT/libvirt_cpu_mode': value => 'none'; + } + } else { + nova_config { + 'DEFAULT/libvirt_cpu_mode': value => 'host-model'; + } + } } From e595da5a31809867671a1d57b119df744222fdee Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Oct 2013 16:25:22 +0300 Subject: [PATCH 10/15] Fix isvirtual flag for logging class Use fact is_virtual to determine the $virtual value Signed-off-by: Bogdan Dobrelya --- deployment/puppet/nailgun/manifests/init.pp | 2 +- deployment/puppet/osnailyfacter/examples/site.pp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployment/puppet/nailgun/manifests/init.pp b/deployment/puppet/nailgun/manifests/init.pp index 2112279b0..730b3b4c8 100644 --- a/deployment/puppet/nailgun/manifests/init.pp +++ b/deployment/puppet/nailgun/manifests/init.pp @@ -97,7 +97,7 @@ proto => 'udp', # use date-rfc3339 timestamps show_timezone => true, - virtual => true, + virtual => str2bool($::is_virtual), } class { "nailgun::user": diff --git a/deployment/puppet/osnailyfacter/examples/site.pp b/deployment/puppet/osnailyfacter/examples/site.pp index 22e26b16c..afbe55e14 100644 --- a/deployment/puppet/osnailyfacter/examples/site.pp +++ b/deployment/puppet/osnailyfacter/examples/site.pp @@ -178,7 +178,7 @@ # remote servers to send logs to rservers => $rservers, # should be true, if client is running at virtual node - virtual => true, + virtual => str2bool($::is_virtual), # facilities syslog_log_facility_glance => $syslog_log_facility_glance, syslog_log_facility_cinder => $syslog_log_facility_cinder, From 214d0c361f11f4ab6f4b133b749f7d46802c2dab Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Fri, 25 Oct 2013 16:32:26 +0300 Subject: [PATCH 11/15] Fix isvirtual case only for checksum_bootpc as well Signed-off-by: Bogdan Dobrelya --- deployment/puppet/nailgun/manifests/cobbler.pp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployment/puppet/nailgun/manifests/cobbler.pp b/deployment/puppet/nailgun/manifests/cobbler.pp index a145d280a..1e1b1542a 100644 --- a/deployment/puppet/nailgun/manifests/cobbler.pp +++ b/deployment/puppet/nailgun/manifests/cobbler.pp @@ -141,7 +141,7 @@ require => Cobbler_distro["bootstrap"], } - class { cobbler::checksum_bootpc: } + if str2bool($::is_virtual) { class { cobbler::checksum_bootpc: } } exec { "cobbler_system_add_default": command => "cobbler system add --name=default \ From 19c2324e6b9b00505be66b8465739149ee7470f2 Mon Sep 17 00:00:00 2001 From: Dmitry Borodaenko Date: Mon, 21 Oct 2013 13:46:49 -0700 Subject: [PATCH 12/15] always enable haproxy for swift or radosgw in HA Setup Swift HAProxy when Swift is enabled either for glance or for objects, not just when it's enabled for glance. Setup RadosGW HAProxy when Swift is disabled and RadosGW is enabled. Use active-passive balancing for RadosGW. --- .../puppet/openstack/manifests/controller_ha.pp | 17 ++++++++++++++--- .../osnailyfacter/manifests/cluster_ha.pp | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/deployment/puppet/openstack/manifests/controller_ha.pp b/deployment/puppet/openstack/manifests/controller_ha.pp index b4be73953..0b6fc7f5c 100644 --- a/deployment/puppet/openstack/manifests/controller_ha.pp +++ b/deployment/puppet/openstack/manifests/controller_ha.pp @@ -52,6 +52,12 @@ $balancer_port = 5673 } + 'radosgw': { + $haproxy_config_options = { 'option' => ['httplog'], 'balance' => 'roundrobin' } + $balancermember_options = 'check' + $balancer_port = '6780' + } + default: { $haproxy_config_options = { 'option' => ['httplog'], 'balance' => 'roundrobin' } $balancermember_options = 'check' @@ -114,7 +120,7 @@ $auto_assign_floating_ip = false, $mysql_root_password, $admin_email, $admin_user = 'admin', $admin_password, $keystone_admin_tenant='admin', $keystone_db_password, $keystone_admin_token, $glance_db_password, $glance_user_password, $glance_image_cache_max_size, $nova_db_password, $nova_user_password, $queue_provider, $rabbit_password, $rabbit_user, $rabbit_nodes, - $qpid_password, $qpid_user, $qpid_nodes, $memcached_servers, $export_resources, $glance_backend='file', $swift_proxies=undef, + $qpid_password, $qpid_user, $qpid_nodes, $memcached_servers, $export_resources, $glance_backend='file', $swift_proxies=undef, $rgw_balancers=undef, $quantum = false, $quantum_config={}, $cinder = false, $cinder_iscsi_bind_addr = false, @@ -215,8 +221,13 @@ if $custom_mysql_setup_class == 'galera' { haproxy_service { 'mysqld': order => 95, port => 3306, virtual_ips => [$internal_virtual_ip], define_backend => true } } - if $glance_backend == 'swift' { - haproxy_service { 'swift': order => 96, port => 8080, virtual_ips => [$public_virtual_ip,$internal_virtual_ip], balancers => $swift_proxies } + + if $swift_proxies { + haproxy_service { 'swift': order => '96', port => '8080', virtual_ips => [$public_virtual_ip,$internal_virtual_ip], balancers => $swift_proxies } + } + + if $rgw_balancers { + haproxy_service { 'radosgw': order => '97', port => '8080', virtual_ips => [$public_virtual_ip,$internal_virtual_ip], balancers => $rgw_balancers, define_backend => true } } Haproxy_service<| |> ~> Exec['restart_haproxy'] diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp b/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp index 217c434bd..9e419bcdb 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_ha.pp @@ -177,6 +177,8 @@ } else { $primary_proxy = false } + } elsif ($storage_hash['objects_ceph']) { + $rgw_balancers = $controller_storage_addresses } @@ -251,6 +253,7 @@ export_resources => false, glance_backend => $glance_backend, swift_proxies => $swift_proxies, + rgw_balancers => $rgw_balancers, quantum => $::use_quantum, quantum_config => $quantum_config, quantum_network_node => $::use_quantum, From c65c5bd6af8616943cccfac94145ef11b02e6070 Mon Sep 17 00:00:00 2001 From: Andrew Woodward Date: Sun, 20 Oct 2013 15:47:03 -0700 Subject: [PATCH 13/15] [PRD-2360] Radosgw wont start on multiple controllers refactor radosgw key creation so that its idempotent. --- deployment/puppet/ceph/manifests/radosgw.pp | 26 +++++++-------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/deployment/puppet/ceph/manifests/radosgw.pp b/deployment/puppet/ceph/manifests/radosgw.pp index 262a61bc8..18d34fcfb 100644 --- a/deployment/puppet/ceph/manifests/radosgw.pp +++ b/deployment/puppet/ceph/manifests/radosgw.pp @@ -185,24 +185,16 @@ mode => '0755', } - exec { "ceph-create-radosgw-keyring-on ${name}": - command => "ceph-authtool --create-keyring ${keyring_path}", - creates => $keyring_path, + exec { "ceph create ${radosgw_auth_key}": + command => "ceph auth get-or-create ${radosgw_auth_key} osd 'allow rwx' mon 'allow rw'", } - file { $keyring_path: mode => '0640', } - - exec { "ceph-generate-key-on ${name}": - command => "ceph-authtool ${keyring_path} -n ${radosgw_auth_key} --gen-key", - } - - exec { "ceph-add-capabilities-to-the-key-on ${name}": - command => "ceph-authtool -n ${radosgw_auth_key} --cap osd 'allow rwx' --cap mon 'allow rw' ${keyring_path}", + exec { "Populate ${radosgw_auth_key} keyring": + command => "ceph auth get-or-create ${radosgw_auth_key} > ${keyring_path}", + creates => $keyring_path } - exec { "ceph-add-to-ceph-keyring-entries-on ${name}": - command => "ceph -k /etc/ceph/ceph.client.admin.keyring auth add ${radosgw_auth_key} -i ${keyring_path}", - } + file { $keyring_path: mode => '0640', } Ceph_conf <||> -> Package[$::ceph::params::package_httpd] -> @@ -218,11 +210,9 @@ $dir_httpd_root, $rgw_nss_db_path, $rgw_log_file,]] -> - Exec["ceph-create-radosgw-keyring-on ${name}"] -> + Exec["ceph create ${radosgw_auth_key}"] -> + Exec["Populate ${radosgw_auth_key} keyring"] -> File[$keyring_path] -> - Exec["ceph-generate-key-on ${name}"] -> - Exec["ceph-add-capabilities-to-the-key-on ${name}"] -> - Exec["ceph-add-to-ceph-keyring-entries-on ${name}"] -> Firewall['012 RadosGW allow'] ~> Service ['httpd'] ~> Service['radosgw'] From 374c048dac336c92d189596dd292b660d033eb8c Mon Sep 17 00:00:00 2001 From: Igor Shishkin Date: Thu, 31 Oct 2013 14:55:02 +0400 Subject: [PATCH 14/15] Logging improvements Debug info logging according to wild IP catching --- .../functions/generate_network_config.rb | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/deployment/puppet/l23network/lib/puppet/parser/functions/generate_network_config.rb b/deployment/puppet/l23network/lib/puppet/parser/functions/generate_network_config.rb index fa72b876e..78b0724b0 100644 --- a/deployment/puppet/l23network/lib/puppet/parser/functions/generate_network_config.rb +++ b/deployment/puppet/l23network/lib/puppet/parser/functions/generate_network_config.rb @@ -122,6 +122,8 @@ def create_endpoint() raise(Puppet::ParseError, "get_network_role_property(...): You must call prepare_network_config(...) first!") end + Puppet.debug "stage1@generate_network_config:config_hash: #{config_hash.inspect}" + # define internal puppet parameters for creating resources res_factory = { :br => { :name_of_resource => 'l23network::l2::bridge' }, @@ -142,6 +144,8 @@ def create_endpoint() end end + Puppet.debug "stage2@generate_network_config:res_factory: #{res_factory.inspect}" + # collect interfaces and endpoints endpoints = {} born_ports = [] @@ -150,6 +154,9 @@ def create_endpoint() endpoints[int_name] = create_endpoint() born_ports.insert(-1, int_name) end + + Puppet.debug "stage3@generate_network_config:endpoints: #{endpoints.inspect}" + config_hash[:endpoints].each do |e_name, e_properties| e_name = e_name.to_sym() if not endpoints[e_name] @@ -177,6 +184,8 @@ def create_endpoint() end end + Puppet.debug "stage4@generate_network_config:endpoints: #{endpoints.inspect}" + # execute transformations # todo: if provider="lnx" execute transformations for LNX bridges transformation_success = [] @@ -189,6 +198,8 @@ def create_endpoint() action = t[:action].to_sym() end + Puppet.debug "stage5@generate_network_config:action: #{action.inspect}" + trans = L23network.sanitize_transformation(t) resource = res_factory[action][:resource] p_resource = Puppet::Parser::Resource.new( @@ -197,9 +208,15 @@ def create_endpoint() :scope => self, :source => resource ) + + Puppet.debug "stage6@generate_network_config:p_resource: #{p_resource.inspect}" + trans.select{|k,v| k != :action}.each do |k,v| p_resource.set_parameter(k,v) end + + Puppet.debug "stage7@generate_network_config:p_resource: #{p_resource.inspect}" + p_resource.set_parameter(:require, [previous]) if previous resource.instantiate_resource(self, p_resource) compiler.add_resource(self, p_resource) @@ -225,12 +242,20 @@ def create_endpoint() :scope => self, :source => resource ) + + Puppet.debug "stage8@generate_network_config:p_resource: #{p_resource.inspect}" + p_resource.set_parameter(:interface, endpoint_name) + + Puppet.debug "stage9@generate_network_config:p_resource: #{p_resource.inspect}" + # set ipaddresses if endpoint_body[:IP].empty? p_resource.set_parameter(:ipaddr, 'none') + Puppet.debug "stage10@generate_network_config:p_resource: #{p_resource.inspect}" elsif ['none','dhcp'].index(endpoint_body[:IP][0]) p_resource.set_parameter(:ipaddr, endpoint_body[:IP][0]) + Puppet.debug "stage11@generate_network_config:p_resource: #{p_resource.inspect}" else ipaddrs = [] endpoint_body[:IP].each do |i| @@ -241,18 +266,27 @@ def create_endpoint() end end p_resource.set_parameter(:ipaddr, ipaddrs) + Puppet.debug "stage12@generate_network_config:p_resource: #{p_resource.inspect}" end #set another (see L23network::l3::ifconfig DOC) parametres endpoint_body[:properties].each do |k,v| p_resource.set_parameter(k,v) end + + Puppet.debug "stage13@generate_network_config:p_resource: #{p_resource.inspect}" + p_resource.set_parameter(:require, [previous]) if previous resource.instantiate_resource(self, p_resource) compiler.add_resource(self, p_resource) transformation_success.insert(-1, "endpoint(#{endpoint_name})") + + Puppet.debug "stage14@generate_network_config:transformation_success: #{transformation_success.inspect}" + previous = p_resource.to_s end + Puppet.debug "stage15@generate_network_config:transformation_success: #{transformation_success.inspect}" + return transformation_success.join(" -> ") end -# vim: set ts=2 sw=2 et : \ No newline at end of file +# vim: set ts=2 sw=2 et : From faab27b65de4f1f4c61c0fb73d72ff0957e5e2e3 Mon Sep 17 00:00:00 2001 From: manashkin Date: Wed, 6 Nov 2013 17:53:09 +0400 Subject: [PATCH 15/15] Aligned formatting to check if Jenkins build fails on such commits --- .../puppet/osnailyfacter/manifests/cluster_simple.pp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp index b0aa2e825..5f2493bfe 100644 --- a/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp +++ b/deployment/puppet/osnailyfacter/manifests/cluster_simple.pp @@ -192,11 +192,11 @@ syslog_log_facility_quantum => $syslog_log_facility_quantum, syslog_log_facility_nova => $syslog_log_facility_nova, syslog_log_facility_keystone=> $syslog_log_facility_keystone, - cinder_rate_limits => $cinder_rate_limits, - horizon_use_ssl => $horizon_use_ssl, - nameservers => $::dns_nameservers, - primary_controller => true, - mysql_skip_name_resolve => true, + cinder_rate_limits => $cinder_rate_limits, + horizon_use_ssl => $horizon_use_ssl, + nameservers => $::dns_nameservers, + primary_controller => true, + mysql_skip_name_resolve => true, } nova_config { 'DEFAULT/start_guests_on_host_boot': value => $::fuel_settings['start_guests_on_host_boot'] } nova_config { 'DEFAULT/use_cow_images': value => $::fuel_settings['use_cow_images'] }