Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions RDU-Scale/ffwd/README
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
Use:
https://infrared.readthedocs.io/en/stable/
Testing:
ir tempest ...
http://pyshaker.readthedocs.io/en/latest/installation.html
pip install psutil
64 changes: 64 additions & 0 deletions RDU-Scale/ffwd/newton-latest/1029p-storage-environment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
## A Heat environment file which can be used to set up storage
## backends. Defaults to Ceph used as a backend for Cinder, Glance and
## Nova ephemeral storage.
resource_registry:
OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml

parameter_defaults:

#### BACKEND SELECTION ####

## Whether to enable iscsi backend for Cinder.
CinderEnableIscsiBackend: false
## Whether to enable rbd (Ceph) backend for Cinder.
CinderEnableRbdBackend: true
## Cinder Backup backend can be either 'ceph' or 'swift'.
CinderBackupBackend: ceph
## Whether to enable NFS backend for Cinder.
# CinderEnableNfsBackend: false
## Whether to enable rbd (Ceph) backend for Nova ephemeral storage.
NovaEnableRbdBackend: true
## Glance backend can be either 'rbd' (Ceph), 'swift' or 'file'.
GlanceBackend: rbd
## Gnocchi backend can be either 'rbd' (Ceph), 'swift' or 'file'.
#GnocchiBackend: rbd
ExtraConfig:
#ceph::profile::params::fsid: eb2bb192-b1c9-11e6-9205-525400330667
#ceph::profile::params::osd_pool_default_pg_num: 256
#ceph::profile::params::osd_pool_default_pgp_num: 256
ceph::profile::params::osd_pool_default_size: 3
ceph::profile::params::osd_pool_default_min_size: 2
ceph::profile::params::osd_recovery_max_active: 1
ceph::profile::params::osd_max_backfills: 1
ceph::profile::params::osd_recovery_op_priority: 1
# OpenStack Ocata creates 8 ceph osd pools:
# rbd, backups, images, manila_data, manila_metadata, metrics, vms, volumes
CephPools:
backups:
pg_num: 8
pgp_num: 8
images:
pg_num: 64
pgp_num: 64
manila_data:
pg_num: 8
pgp_num: 8
manila_metadata:
pg_num: 8
pgp_num: 8
metrics:
pg_num: 8
pgp_num: 8
vms:
pg_num: 64
pgp_num: 64
volumes:
pg_num: 8
pgp_num: 8

CephStorageExtraConfig:
# Just one OSD per 1029p.
ceph::profile::params::osds:
'/dev/nvme0n1': {}
26 changes: 26 additions & 0 deletions RDU-Scale/ffwd/newton-latest/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Templates for deploying overcloud for Scale CI

These templates can be used for deploying an overcloud with the following deploy
command:
```
openstack overcloud deploy --templates -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml -e templates/network-environment.yaml -e templates/deploy.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml -r templates/roles_data.yaml --ntp-server clock.redhat.com -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml -e templates/network-environment.yaml -e templates/deploy.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml -r templates/roles_data.yaml --ntp-server clock.redhat.com

```

It is assumed that the command is executed from /home/stack and the
configuration files in this directory are all present in /home/stack/templates
on the undercloud.

These templates were built based on the assumption that the undercloud
provioning interface would be over em2. Be sure to set *local_interface = em2*
in the undercloud.conf when deploying the undercloud.

## Hardware Details

The hardware consists of the following machine types

* R620
* R620
* R720xd
* 1029U
* 1029P
5 changes: 5 additions & 0 deletions RDU-Scale/ffwd/newton-latest/debug.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
parameter_defaults:
Debug: true
ConfigDebug: true
CephAnsiblePlaybookVerbosity: 2
3 changes: 3 additions & 0 deletions RDU-Scale/ffwd/newton-latest/environments/args.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
parameter_defaults:
ComputeKernelArgs: "intel_iommu=on iommu=pt"
ComputeHostnameFormat: "1029p"
8 changes: 8 additions & 0 deletions RDU-Scale/ffwd/newton-latest/environments/compute-params.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
parameter_defaults:
NovaPCIPassthrough:
- vendor_id: "144d"
product_id: "a804"
device_type: "type-PCI"
# 1029PComputeExtraConfig:
# nova::api::pci_alias: '[{ name: "nvme", product_id: "a804", vendor_id: "144d", device_type: "type-PCI"}]'

Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
parameter_defaults:
HeatMaxResourcesPerStack: -1
NovaSchedulerDefaultFilters: ['RetryFilter','AvailabilityZoneFilter','RamFilter','DiskFilter','ComputeFilter','ComputeCapabilitiesFilter','ImagePropertiesFilter','ServerGroupAntiAffinityFilter','ServerGroupAffinityFilter','PciPassthroughFilter']
NovaSchedulerAvailableFilters: ['nova.scheduler.filters.all_filters']
# ControllerExtraConfig:
# nova::api::pci_alias: '[{ name: "nvme", product_id: "a804", vendor_id: "144d", device_type: "type-PCI"}]'

2 changes: 2 additions & 0 deletions RDU-Scale/ffwd/newton-latest/environments/firstboot-env.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
resource_registry:
OS::TripleO::NodeUserData: ../firstboot/first-boot.yaml
83 changes: 83 additions & 0 deletions RDU-Scale/ffwd/newton-latest/firstboot/first-boot.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
heat_template_version: 2014-10-16

description: >
This is an example showing how you can do firstboot configuration
of the nodes via cloud-init. To enable this, replace the default
mapping of OS::TripleO::NodeUserData in ../overcloud_resource_registry*
parameters:
ComputeKernelArgs:
description: >
Space seprated list of Kernel args to be update to grub.
The given args will be appended to existing args of GRUB_CMDLINE_LINUX in file /etc/default/grub
Example: "intel_iommu=on"
type: string
default: ""
ComputeHostnameFormat:
type: string
default: ""

resources:
userdata:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: compute_kernel_args}
- config: {get_resource: wipe_disk}

# Verify the logs on /var/log/cloud-init.log on the overcloud node
compute_kernel_args:
type: OS::Heat::SoftwareConfig
properties:
config:
str_replace:
template: |
#!/bin/bash
set -x
FORMAT=$COMPUTE_HOSTNAME_FORMAT
if [[ $(hostname) == *$FORMAT* ]] ; then
sed "s/^\(GRUB_CMDLINE_LINUX=\".*\)\"/\1 $KERNEL_ARGS\"/g" -i /etc/default/grub ;
grub2-mkconfig -o /etc/grub2.cfg

# Sometimes, the IP of the provisioning network is not acquired at the time of
# cloud-init, which will fail the metadata query, looping for 10seconds to ensure
# the network connectivity is ready.
i=0
while [ $i -lt 5 ]; do
NETWORK=$(curl -m 10 http://169.254.169.254/openstack/latest/network_data.json)
if [ $? -eq 0 ]; then
# Rebooting without running os-net-config will have default ifcfg scripts which
# is boot the interfaces in alphanumberic order and if DHCPDISCOVER of an interface\
# fails, then the network.service gets failed and does not try others.
# Here we are identifying the provisioning network interface and keep the
# BOOTPROTO as dpch and for all other interfaces make it as none. So that
# network.service will invoke dhcp only on the provisioning network.
# We are identifying the provisioning network using the meta data of the node,
# which will provide the mac address of the provisioning network interface.
# NOTE: Only one provisioning network interface is supported with this script

MAC=$(echo $NETWORK | jq -r ".links[0].ethernet_mac_address")
IFACE=$(ip a | grep $MAC -B1 | awk 'NR==1{print $2;}' | cut -d ":" -f1)
find /etc/sysconfig/network-scripts/ -name 'ifcfg-*' ! -name 'ifcfg-'$IFACE -type f -exec sed 's/^BOOTPROTO=.*/BOOTPROTO=none/g' -i {} +

reboot
break
fi
sleep 2
i=`expr $i + 1`
done
fi
params:
$KERNEL_ARGS: {get_param: ComputeKernelArgs}
$COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
wipe_disk:
type: OS::Heat::SoftwareConfig
properties:
config: {get_file: ./wipe_disk.sh}

outputs:
# This means get_resource from the parent template will get the userdata, see:
# http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
# Note this is new-for-kilo, an alternative is returning a value then using
# get_attr in the parent template instead.
OS::stack_id:
value: {get_resource: userdata}
26 changes: 26 additions & 0 deletions RDU-Scale/ffwd/newton-latest/firstboot/wipe_disk.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#!/bin/bash
set -x ; VLOG=/var/log/ospd/firstboot-wipe-disk.log ; exec &> >(tee -a "${VLOG}")

echo "Number of disks detected: $(lsblk -no NAME,TYPE,MOUNTPOINT | grep "disk" | awk '{print $1}' | wc -l)"
for DEVICE in `lsblk -no NAME,TYPE,MOUNTPOINT | grep "disk" | awk '{print $1}'`
do
ROOTFOUND=0
echo "Checking /dev/$DEVICE..."
echo "Number of partitions on /dev/$DEVICE: $(expr $(lsblk -n /dev/$DEVICE | awk '{print $7}' | wc -l) - 1)"
for MOUNTS in `lsblk -n /dev/$DEVICE | awk '{print $7}'`
do
if [ "$MOUNTS" = "/" ]
then
ROOTFOUND=1
fi
done
if [ $ROOTFOUND = 0 ]
then
echo "Root not found in /dev/${DEVICE}"
echo "Wiping disk /dev/${DEVICE}"
sgdisk -Z /dev/${DEVICE}
sgdisk -g /dev/${DEVICE}
else
echo "Root found in /dev/${DEVICE}"
fi
done
59 changes: 59 additions & 0 deletions RDU-Scale/ffwd/newton-latest/network-environment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/r620-controller.yaml
#OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/1029p-controller.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/r620-cephstorage.yaml
#OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/1029p-cephstorage.yaml
OS::TripleO::P1029Compute::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/1029p-compute.yaml
OS::TripleO::P1029Compute::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::P1029Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::P1029Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::P1029Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::P1029Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::R630Compute::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/r630-compute.yaml
OS::TripleO::R630Compute::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::R630Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::R630Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::R630Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::R630Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml
OS::TripleO::R620Compute::Net::SoftwareConfig: /home/stack/ffwd/newton/nic-configs/r620-compute.yaml
OS::TripleO::R620Compute::Ports::ExternalPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::R620Compute::Ports::InternalApiPort: /usr/share/openstack-tripleo-heat-templates/network/ports/internal_api.yaml
OS::TripleO::R620Compute::Ports::StoragePort: /usr/share/openstack-tripleo-heat-templates/network/ports/storage.yaml
OS::TripleO::R620Compute::Ports::StorageMgmtPort: /usr/share/openstack-tripleo-heat-templates/network/ports/noop.yaml
OS::TripleO::R620Compute::Ports::TenantPort: /usr/share/openstack-tripleo-heat-templates/network/ports/tenant.yaml



parameter_defaults:
NeutronBridgeMappings: "datacentre:br-ex"
NeutronExternalNetworkBridge: ""
InternalApiNetCidr: 172.26.0.0/16
TenantNetCidr: 172.27.0.0/16
StorageNetCidr: 172.28.0.0/16
StorageMgmtNetCidr: 172.29.0.0/16
ManagementNetCidr: 172.30.0.0/16
ExternalNetCidr: 172.21.0.0/16
ControlPlaneSubnetCidr: 16
InternalApiAllocationPools: [{'start': '172.26.0.3', 'end': '172.26.255.254'}]
TenantAllocationPools: [{'start': '172.27.0.3', 'end': '172.27.255.254'}]
StorageAllocationPools: [{'start': '172.28.0.3', 'end': '172.28.255.254'}]
StorageMgmtAllocationPools: [{'start': '172.29.0.3', 'end': '172.29.255.254'}]
ManagementAllocationPools: [{'start': '172.30.0.3', 'end': '172.30.255.254'}]
ExternalAllocationPools: [{'start': '172.21.0.3', 'end': '172.21.250.250'}]
# Set to the router gateway on the external network
ExternalInterfaceDefaultRoute: 172.21.0.1
PublicVirtualFixedIPs: [{'ip_address':'172.21.0.10'}]
# Gateway router for the provisioning network (or Undercloud IP)
ControlPlaneDefaultRoute: 172.31.0.1
# The IP address of the EC2 metadata server. Generally the IP of the Undercloud
EC2MetadataIp: 172.31.0.1
# Define the DNS servers (maximum 2) for the overcloud nodes
DnsServers: ["10.11.5.19"]
InternalApiNetworkVlanID: 301
StorageNetworkVlanID: 302
StorageMgmtNetworkVlanID: 303
TenantNetworkVlanID: 304
ManagementNetworkVlanID: 305
ExternalNetworkVlanID: 10
NtpServer: ["clock.redhat.com"]
# Set to "br-ex" if using floating IPs on native VLAN on bridge br-ex
Loading