-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy pathVagrantfile
More file actions
100 lines (79 loc) · 2.89 KB
/
Vagrantfile
File metadata and controls
100 lines (79 loc) · 2.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# -*- mode: ruby -*-
# vi: set ft=ruby :
#
# Create single node Storage Scale cluster
#
$message = <<EOT
--------------------------------------------------------------------------
Created virtual environment for IBM Storage Scale.
User Guide:
https://github.com/IBM/StorageScaleVagrant/blob/master/README.md
To logon on the management node execute:
vagrant ssh
To connect to the Storage Scale GUI, in a web browser:
https://localhost:8888
--------------------------------------------------------------------------
EOT
# Set provider
$StorageScaleVagrant_provider = 'libvirt'
# Load common settings
load File.expand_path('../../shared/Vagrantfile.common', __FILE__)
# Customize configuration specific settings
Vagrant.configure("2") do |config|
# Use the Vagrant box prepared for Storage Scale
config.vm.box = "StorageScale_base"
# config.vm.box_url = "./prep-box/StorageScale_base.box"
# Customize resources of virtual machines
config.vm.provider "libvirt" do |libvirt|
libvirt.qemu_use_session = false
libvirt.memory = 8192
libvirt.cpus = 2
end
config.ssh.forward_agent = true
config.ssh.insert_key = false
#config.hostmanager.enabled = true
#
# The single node cluster comprises one node only
#
config.vm.define "m1", primary: true do |node|
node.vm.hostname = "m1.example.com"
# Management network
node.vm.network "private_network", ip: "10.1.1.11"
# Data transfer network
node.vm.network "private_network", ip: "10.1.2.11"
# CES network
node.vm.network "private_network", ip: "192.168.56.11"
node.vm.network "forwarded_port", guest: 443, host: 8888
# Attach five disks for Storage Scale NSDs
node.vm.provider "libvirt" do |libvirt|
# 5 small disks
# 3 disks will be allocated to fs1, system pool, single failure group
# 2 disks will be allocated to cesShared, system pool, fg1
small = 5
(1..small).each do |disk|
libvirt.storage :file, :size => '4G', :cache => 'none'
end
# 2 large disks, will be allocated to fs1 capacity pool
large = 2
(small+1..small+large).each do |disk|
libvirt.storage :file, :size => '6G'
end
end
# Set message to be printed after VMs are up and running
config.vm.post_up_message = $message
# Sync Storage Scale install package to admin node
config.vm.synced_folder "../software", "/software", type: "rsync"
# Install and configure single node Storage Scale cluster
node.vm.provision "shell",
name: "Install and configure single node Storage Scale cluster",
inline: "
/vagrant/install/script.sh #{$StorageScaleVagrant_provider} #{$StorageScale_version}
"
# Configure Storage Scale for demo purposes
node.vm.provision "shell",
name: "Configure Storage Scale for demo purposes",
inline: "
/vagrant/demo/script.sh #{$StorageScaleVagrant_provider}
"
end
end