-
Notifications
You must be signed in to change notification settings - Fork 3
/
Vagrantfile
105 lines (90 loc) · 3.69 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# vi: set ft=ruby :
#
node_data_disk_count = 5
driveletters = ('a'..'z').to_a
disk_size = 501 #GB
cpus = 1
memory = 950
node_count = 6 # node-0 is our client.
Vagrant.configure(2) do |config|
puts "Creating #{node_count} nodes."
puts "Creating #{node_data_disk_count} data disks (#{disk_size}G) each."
require "fileutils"
f = File.open("dist/hosts.ini","w")
f.puts "node-0 ansible_host=192.168.250.10"
f.puts ""
f.puts "[gluster_servers]"
(1..node_count).each do |num|
f.puts "node-#{num} ansible_host=192.168.250.#{num+10}"
end
f.close
(0..node_count).reverse_each do |num|
config.vm.define "node-#{num}" do |node|
vm_ip = "192.168.250.#{num+10}"
node.vm.box = "centos/7"
node.vm.synced_folder ".", "/vagrant", disabled: true
node.vm.network :private_network,
:ip => vm_ip,
:libvirt__driver_queues => "#{cpus}"
node.vm.post_up_message = "VM private ip: #{vm_ip}"
node.vm.hostname = "node-#{num}"
node.vm.provider "libvirt" do |lvt|
lvt.qemu_use_session = false
lvt.storage_pool_name = "default"
lvt.memory = "#{memory}"
if num == 0
lvt.cpus = 2
else
lvt.cpus = "#{cpus}"
end
lvt.nested = false
lvt.cpu_mode = "host-passthrough"
lvt.volume_cache = "writeback"
lvt.graphics_type = "none"
lvt.video_type = "vga"
lvt.video_vram = 1
lvt.usb_controller :model => "none" # (requires vagrant-libvirt >= 0.44 which is in Fedora 30 and up?)
lvt.random :model => 'random'
lvt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio'
#disk_config
if num != 0
(1..(node_data_disk_count)).each do |d|
lvt.storage :file, :size => "#{disk_size}G", :discard => 'unmap', :serial => "#{d}"
end #disk_config
end
end #libvirt
if num == 0
node.vm.synced_folder "./dist", "/vagrant", type: "rsync", create: true, rsync__args: ["--verbose", "--archive", "--delete", "-z"]
node.vm.post_up_message << "\nYou can now access the nodes (credential for root is 'foobar')"
end
# Prepare VMs and deploy Gluster packages on all of them.
node.vm.provision "ansible" do |ansible|
ansible.become = true
ansible.playbook = "ansible/machine_config.yml"
ansible.verbose = false
ansible.inventory_path = "dist/hosts.ini"
ansible.extra_vars = {
node_count: "#{node_count}",
servers: (1..node_count).map { |n| "node-#{n}" % n },
clients: ["node-0"]
}
end
# Deploy Glusto and Gluster using Gluster-Ansible via node-0
if num == 0
node.vm.provision "ansible" do |ansible|
ansible.become = true
ansible.playbook = "ansible/glusto.yml"
ansible.limit = "node-0"
ansible.inventory_path = "dist/hosts.ini"
end
node.vm.provision "shell", inline: <<-SHELL
set -u
echo "Running Gluster Ansible on node-0 to deploy Gluster..."
PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_CONFIG='/vagrant/ansible.cfg' ansible-playbook --limit="gluster_servers" --inventory-file=/vagrant/hosts.ini --extra-vars "node_count=#{node_count}" /vagrant/gluster.yml
echo "Cleaning up created volume, before running tests..."
PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_CONFIG='/vagrant/ansible.cfg' ansible-playbook --limit="node-1" --inventory-file=/vagrant/hosts.ini /vagrant/gluster-cleanup.yml
SHELL
end
end
end
end