Red Hat Training

A Red Hat training course is available for Red Hat OpenStack Platform

Appendix C. Different interfaces on same compute node YAML files

This section provides sample YAML files as a reference for adding SR-IOV and DPDK interfaces on the same compute node.

Note

These templates are from a fully configured environment and include parameters unrelated to NFV, that may not be relevant or appropriate for your deployment.

C.1. Sample SR-IOV and DPDK on the same compute node YAML files

This section provides sample DPDK and SR-IOV YAML files as a reference.

C.1.1. first-boot.yaml

heat_template_version: 2014-10-16

description: >
  This is an example showing how you can do firstboot configuration
  of the nodes via cloud-init.  To enable this, replace the default
  mapping of OS::TripleO::NodeUserData in ../overcloud_resource_registry*

parameters:
  ComputeKernelArgs:
    description: >
      Space seprated list of Kernel args to be update to grub.
      The given args will be appended to existing args of GRUB_CMDLINE_LINUX in file /etc/default/grub
      Example: "intel_iommu=on default_hugepagesz=1GB hugepagesz=1G hugepages=1"
    type: string
    default: ""
  ComputeHostnameFormat:
    type: string
    default: ""
  NeutronDpdkCoreList:
    description: >
      List of logical cores for PMD threads. Its mandatory parameter.
    type: string
  NeutronDpdkSocketMemory:
    description: Memory allocated for each socket
    default: ""
    type: string
    constraints:
      - allowed_pattern: "'[0-9,]+'"
  NeutronVhostuserSocketDir:
    description: The vhost-user socket directory for OVS.
    default: ""
    type: string
  HostIsolatedCoreList:
    description: >
      A list or range of physical CPU cores to be tuned as isolated_cores.
      The given args will be appended to the tuned cpu-partitioning profile.
      Ex. HostIsolatedCoreList: '4-12' will tune cores from 4-12
    type: string
    default: ""
  HostCpusList:
    description: >
      List of logical cores to be used by ovs-dpdk processess (dpdk-lcore-mask)
    type: string
    constraints:
      - allowed_pattern: "'[0-9,]+'"

resources:
  userdata:
    type: OS::Heat::MultipartMime
    properties:
      parts:
      - config: {get_resource: set_dpdk_params}
      - config: {get_resource: install_tuned}
      - config: {get_resource: compute_kernel_args}

  # Verify the logs on /var/log/cloud-init.log on the overcloud node
  set_dpdk_params:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        str_replace:
          template: |
            #!/bin/bash
            set -x
            get_mask()
            {
              local list=$1
              local mask=0
              declare -a bm
              max_idx=0
              for core in $(echo $list | sed 's/,/ /g')
              do
                  index=$(($core/32))
                  bm[$index]=0
                  if [ $max_idx -lt $index ]; then
                     max_idx=$(($index))
                  fi
              done
              for ((i=$max_idx;i>=0;i--));
              do
                  bm[$i]=0
              done
              for core in $(echo $list | sed 's/,/ /g')
              do
                  index=$(($core/32))
                  temp=$((1<<$(($core % 32))))
                  bm[$index]=$((${bm[$index]} | $temp))
              done

              printf -v mask "%x" "${bm[$max_idx]}"
              for ((i=$max_idx-1;i>=0;i--));
              do
                  printf -v hex "%08x" "${bm[$i]}"
                  mask+=$hex
              done
              printf "%s" "$mask"
            }

            FORMAT=$COMPUTE_HOSTNAME_FORMAT
            if [[ -z $FORMAT ]] ; then
              FORMAT="compute" ;
            else
              # Assumption: only %index% and %stackname% are the variables in Host name format
              FORMAT=$(echo $FORMAT | sed  's/\%index\%//g' | sed 's/\%stackname\%//g') ;
            fi
            if [[ $(hostname) == *$FORMAT* ]] ; then
              # 42477 is the kolla hugetlbfs gid value.
              getent group hugetlbfs >/dev/null || \
                groupadd hugetlbfs -g 42477 && groupmod -g 42477 hugetlbfs

              pmd_cpu_mask=$( get_mask $PMD_CORES )
              host_cpu_mask=$( get_mask $LCORE_LIST )
              socket_mem=$(echo $SOCKET_MEMORY | sed s/\'//g )
              ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true
              ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem=$socket_mem
              ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask=$pmd_cpu_mask
              ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask=$host_cpu_mask
            fi
          params:
            $COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
            $LCORE_LIST: {get_param: HostCpusList}
            $PMD_CORES: {get_param: NeutronDpdkCoreList}
            $SOCKET_MEMORY: {get_param: NeutronDpdkSocketMemory}

  install_tuned:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        str_replace:
          template: |
            #!/bin/bash
            FORMAT=$COMPUTE_HOSTNAME_FORMAT
            if [[ -z $FORMAT ]] ; then
              FORMAT="compute" ;
            else
              # Assumption: only %index% and %stackname% are the variables in Host name format
              FORMAT=$(echo $FORMAT | sed  's/\%index\%//g' | sed 's/\%stackname\%//g') ;
            fi
            if [[ $(hostname) == *$FORMAT* ]] ; then
              # Install the tuned package
              yum install -y tuned-profiles-cpu-partitioning

              tuned_conf_path="/etc/tuned/cpu-partitioning-variables.conf"
              if [ -n "$TUNED_CORES" ]; then
                grep -q "^isolated_cores" $tuned_conf_path
                if [ "$?" -eq 0 ]; then
                  sed -i 's/^isolated_cores=.*/isolated_cores=$TUNED_CORES/' $tuned_conf_path
                else
                  echo "isolated_cores=$TUNED_CORES" >> $tuned_conf_path
                fi
                tuned-adm profile cpu-partitioning
              fi
            fi
          params:
            $COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
            $TUNED_CORES: {get_param: HostIsolatedCoreList}

  compute_kernel_args:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        str_replace:
          template: |
            #!/bin/bash
            FORMAT=$COMPUTE_HOSTNAME_FORMAT
            if [[ -z $FORMAT ]] ; then
              FORMAT="compute" ;
            else
              # Assumption: only %index% and %stackname% are the variables in Host name format
              FORMAT=$(echo $FORMAT | sed  's/\%index\%//g' | sed 's/\%stackname\%//g') ;
            fi
            if [[ $(hostname) == *$FORMAT* ]] ; then
              sed 's/^\(GRUB_CMDLINE_LINUX=".*\)"/\1 $KERNEL_ARGS isolcpus=$TUNED_CORES"/g' -i /etc/default/grub ;
              grub2-mkconfig -o /etc/grub2.cfg
              reboot
            fi
          params:
            $KERNEL_ARGS: {get_param: ComputeKernelArgs}
            $COMPUTE_HOSTNAME_FORMAT: {get_param: ComputeHostnameFormat}
            $TUNED_CORES: {get_param: HostIsolatedCoreList}

outputs:
  # This means get_resource from the parent template will get the userdata, see:
  # http://docs.openstack.org/developer/heat/template_guide/composition.html#making-your-template-resource-more-transparent
  # Note this is new-for-kilo, an alternative is returning a value then using
  # get_attr in the parent template instead.
  OS::stack_id:
    value: {get_resource: userdata}

C.1.2. network-environment.yaml

resource_registry:
  # Specify the relative/absolute path to the config files you want to use for override the default.
  OS::TripleO::Compute::Net::SoftwareConfig: nic-configs/compute.yaml
  OS::TripleO::Controller::Net::SoftwareConfig: nic-configs/controller.yaml
  OS::TripleO::NodeUserData: first-boot.yaml

parameter_defaults:
  # Customize all these values to match the local environment
  InternalApiNetCidr: 10.10.10.0/24
  TenantNetCidr: 10.10.2.0/24
  StorageNetCidr: 10.10.3.0/24
  StorageMgmtNetCidr: 10.10.4.0/24
  ExternalNetCidr: 172.20.12.112/28
  # CIDR subnet mask length for provisioning network
  ControlPlaneSubnetCidr: '24'
  InternalApiAllocationPools: [{'start': '10.10.10.10', 'end': '10.10.10.200'}]
  TenantAllocationPools: [{'start': '10.10.2.100', 'end': '10.10.2.200'}]
  StorageAllocationPools: [{'start': '10.10.3.100', 'end': '10.10.3.200'}]
  StorageMgmtAllocationPools: [{'start': '10.10.4.100', 'end': '10.10.4.200'}]
  # Use an External allocation pool which will leave room for floating IPs
  ExternalAllocationPools: [{'start': '172.20.12.114', 'end': '172.20.12.125'}]
  # Set to the router gateway on the external network
  ExternalInterfaceDefaultRoute: 172.20.12.126
  # Gateway router for the provisioning network (or Undercloud IP)
  ControlPlaneDefaultRoute: 192.168.24.1
  # Generally the IP of the Undercloud
  EC2MetadataIp: 192.168.24.1
  InternalApiNetworkVlanID: 10
  TenantNetworkVlanID: 11
  StorageNetworkVlanID: 12
  StorageMgmtNetworkVlanID: 13
  ExternalNetworkVlanID: 14
  # Define the DNS servers (maximum 2) for the overcloud nodes
  DnsServers: ["8.8.8.8","8.8.4.4"]
  # May set to br-ex if using floating IPs only on native VLAN on bridge br-ex
  NeutronExternalNetworkBridge: "''"
  # The tunnel type for the tenant network (vxlan or gre). Set to '' to disable tunneling.
  NeutronTunnelTypes: 'vxlan'
  # The tenant network type for Neutron (vlan or vxlan).
  NeutronNetworkType: 'vlan'
  # The OVS logical->physical bridge mappings to use.
  NeutronBridgeMappings: 'dpdk_mgmt:br-link0,tenant:br-link1'
  # The Neutron ML2 and OpenVSwitch vlan mapping range to support.
  NeutronNetworkVLANRanges: 'tenant:22:22,tenant:25:25'
  # Nova flavor to use.
  OvercloudControlFlavor: controller
  OvercloudComputeFlavor: compute
  #Number of nodes to deploy.
  ControllerCount: 1
  ComputeCount: 1
  # NTP server configuration.
  NtpServer: clock.redhat.com

  # Sets overcloud nodes custom names
  # http://docs.openstack.org/developer/tripleo-docs/advanced_deployment/node_placement.html#custom-hostnames
  ControllerHostnameFormat: 'controller-%index%'
  ComputeHostnameFormat: 'compute-%index%'
  CephStorageHostnameFormat: 'ceph-%index%'
  ObjectStorageHostnameFormat: 'swift-%index%'

  ########################
  # OVS DPDK configuration
  ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
  ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
  # List of cores to be used for DPDK Poll Mode Driver
  NeutronDpdkCoreList: "'2,22,3,23'"
  # Number of memory channels to be used for DPDK
  NeutronDpdkMemoryChannels: "4"
  # NeutronDpdkSocketMemory
  NeutronDpdkSocketMemory: "'3072,1024'"
  # NeutronDpdkDriverType
  NeutronDpdkDriverType: "vfio-pci"
  # The vhost-user socket directory for OVS
  NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"

  ########################
  # Additional settings
  ########################
  # Reserved RAM for host processes
  NovaReservedHostMemory: 4096
  # A list or range of physical CPU cores to reserve for virtual machine processes.
  NovaVcpuPinSet: "4-19,24-39"
  # An array of filters used by Nova to filter a node.These filters will be applied in the order they are listed,
  # so place your most restrictive filters first to make the filtering process more efficient.
  NovaSchedulerDefaultFilters:
    - "RetryFilter"
    - "AvailabilityZoneFilter"
    - "RamFilter"
    - "ComputeFilter"
    - "ComputeCapabilitiesFilter"
    - "ImagePropertiesFilter"
    - "ServerGroupAntiAffinityFilter"
    - "ServerGroupAffinityFilter"
    - "PciPassthroughFilter"
    - "NUMATopologyFilter"
    - "AggregateInstanceExtraSpecsFilter"
  # Kernel arguments for Compute node
  ComputeKernelArgs: "default_hugepagesz=1GB hugepagesz=1G hugepages=32 iommu=pt intel_iommu=on"
  # A list or range of physical CPU cores to be tuned.
  # The given args will be appended to the tuned cpu-partitioning profile.
  HostIsolatedCoreList: "2-19,22-39"
  # List of logical cores to be used by ovs-dpdk processess (dpdk-lcore-mask)
  HostCpusList: "'0,20,1,21'"
  NovaLibvirtRxQueueSize: 1024
  NovaLibvirtTxQueueSize: 1024

  # List of supported pci vendor devices in the format VendorID:ProductID.
  # Not merged into RHOSP10 refer BZ 1448919
  NeutronSupportedPCIVendorDevs: ['8086:154c', '8086:154d', '8086:10ed']
  NovaPCIPassthrough:
    - devname: "p5p2"
      physical_network: "tenant"

  NeutronPhysicalDevMappings: "tenant:p5p2"
  NeutronSriovNumVFs: "p5p2:5"
  # Global MTU.
  NeutronGlobalPhysnetMtu: 9000
  # Configure the classname of the firewall driver to use for implementing security groups.
  NeutronOVSFirewallDriver: openvswitch

  SshServerOptions:
    UseDns: 'no'

C.1.3. controller.yaml

heat_template_version: 2015-04-30

description: >
  Software Config to drive os-net-config to configure VLANs for the
  controller role.

parameters:
  ControlPlaneIp:
    default: ''
    description: IP address/subnet on the ctlplane network
    type: string
  ExternalIpSubnet:
    default: ''
    description: IP address/subnet on the external network
    type: string
  InternalApiIpSubnet:
    default: ''
    description: IP address/subnet on the internal API network
    type: string
  StorageIpSubnet:
    default: ''
    description: IP address/subnet on the storage network
    type: string
  StorageMgmtIpSubnet:
    default: ''
    description: IP address/subnet on the storage mgmt network
    type: string
  StorageNetworkVlanID:
    default: 30
    description: Vlan ID for the storage network traffic.
    type: number
  StorageMgmtNetworkVlanID:
    default: 40
    description: Vlan ID for the storage mgmt network traffic.
    type: number
  TenantIpSubnet:
    default: ''
    description: IP address/subnet on the tenant network
    type: string
  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
    default: ''
    description: IP address/subnet on the management network
    type: string
  ExternalNetworkVlanID:
    default: ''
    description: Vlan ID for the external network traffic.
    type: number
  InternalApiNetworkVlanID:
    default: ''
    description: Vlan ID for the internal_api network traffic.
    type: number
  StorageNetworkVlanID:
    default: 30
    description: Vlan ID for the storage network traffic.
    type: number
  StorageMgmtNetworkVlanID:
    default: 40
    description: Vlan ID for the storage mgmt network traffic.
    type: number
  TenantNetworkVlanID:
    default: ''
    description: Vlan ID for the tenant network traffic.
    type: number
  ManagementNetworkVlanID:
    default: 23
    description: Vlan ID for the management network traffic.
    type: number
  ExternalInterfaceDefaultRoute:
    default: ''
    description: default route for the external network
    type: string
  ControlPlaneSubnetCidr: # Override this via parameter_defaults
    default: '24'
    description: The subnet CIDR of the control plane network.
    type: string
  DnsServers: # Override this via parameter_defaults
    default: []
    description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
    type: comma_delimited_list
  EC2MetadataIp: # Override this via parameter_defaults
    description: The IP address of the EC2 metadata server.
    type: string

resources:
  OsNetConfigImpl:
    type: OS::Heat::StructuredConfig
    properties:
      group: os-apply-config
      config:
        os_net_config:
          network_config:
            -
              type: interface
              name: nic1
              use_dhcp: false
              defroute: false
            -
              type: interface
              name: nic2
              addresses:
                -
                  ip_netmask:
                    list_join:
                      - '/'
                      - - {get_param: ControlPlaneIp}
                        - {get_param: ControlPlaneSubnetCidr}
              routes:
                -
                  ip_netmask: 169.254.169.254/32
                  next_hop: {get_param: EC2MetadataIp}
            -
              type: linux_bond
              name: bond_api
              bonding_options: "mode=active-backup"
              use_dhcp: false
              dns_servers: {get_param: DnsServers}
              members:
                -
                  type: interface
                  name: nic3
                  primary: true
                -
                  type: interface
                  name: nic4
            -
              type: vlan
              vlan_id: {get_param: InternalApiNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: InternalApiIpSubnet}
            -
              type: vlan
              vlan_id: {get_param: TenantNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: TenantIpSubnet}
            -
              type: vlan
              vlan_id: {get_param: StorageNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: StorageIpSubnet}
            -
              type: vlan
              vlan_id: {get_param: StorageMgmtNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: StorageMgmtIpSubnet}
            -
              type: vlan
              vlan_id: {get_param: ExternalNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: ExternalIpSubnet}
              routes:
                -
                  default: true
                  next_hop: {get_param: ExternalInterfaceDefaultRoute}
            -
               type: ovs_bridge
               name: br-link0
               use_dhcp: false
               members:
                -
                   type: interface
                   name: nic7
                   mtu: 9000
                -
                  type: vlan
                  vlan_id: {get_param: TenantNetworkVlanID}
                  mtu: 9000
                  addresses:
                    -
                      ip_netmask: {get_param: TenantIpSubnet}
            -
               type: ovs_bridge
               name: br-link1
               use_dhcp: false
               members:
                -
                   type: interface
                   name: nic8
                   mtu: 9000

outputs:
  OS::stack_id:
    description: The OsNetConfigImpl resource.
    value: {get_resource: OsNetConfigImpl}

C.1.4. compute.yaml

heat_template_version: 2015-04-30

description: >
  Software Config to drive os-net-config to configure VLANs for the
  compute role.

parameters:
  ControlPlaneIp:
    default: ''
    description: IP address/subnet on the ctlplane network
    type: string
  ExternalIpSubnet:
    default: ''
    description: IP address/subnet on the external network
    type: string
  InternalApiIpSubnet:
    default: ''
    description: IP address/subnet on the internal API network
    type: string
  TenantIpSubnet:
    default: ''
    description: IP address/subnet on the tenant network
    type: string
  StorageNetworkVlanID:
    default: 30
    description: Vlan ID for the storage network traffic.
    type: number
  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
    default: ''
    description: IP address/subnet on the management network
    type: string
  InternalApiNetworkVlanID:
    default: ''
    description: Vlan ID for the internal_api network traffic.
    type: number
  TenantNetworkVlanID:
    default: ''
    description: Vlan ID for the tenant network traffic.
    type: number
  ManagementNetworkVlanID:
    default: 23
    description: Vlan ID for the management network traffic.
    type: number
  StorageIpSubnet:
    default: ''
    description: IP address/subnet on the storage network
    type: string
  StorageMgmtIpSubnet:
    default: ''
    description: IP address/subnet on the storage mgmt network
    type: string
  ControlPlaneSubnetCidr: # Override this via parameter_defaults
    default: '24'
    description: The subnet CIDR of the control plane network.
    type: string
  ControlPlaneDefaultRoute: # Override this via parameter_defaults
    description: The default route of the control plane network.
    type: string
  DnsServers: # Override this via parameter_defaults
    default: []
    description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
    type: comma_delimited_list
  EC2MetadataIp: # Override this via parameter_defaults
    description: The IP address of the EC2 metadata server.
    type: string
  ExternalInterfaceDefaultRoute:
    default: ''
    description: default route for the external network
    type: string

resources:
  OsNetConfigImpl:
    type: OS::Heat::StructuredConfig
    properties:
      group: os-apply-config
      config:
        os_net_config:
          network_config:
            -
              type: interface
              name: nic1
              use_dhcp: false
              defroute: false
            -
              type: interface
              name: nic2
              use_dhcp: false
              addresses:
               -
                 ip_netmask:
                   list_join:
                     - '/'
                     - - {get_param: ControlPlaneIp}
                       - {get_param: ControlPlaneSubnetCidr}
              routes:
               -
                 ip_netmask: 169.254.169.254/32
                 next_hop: {get_param: EC2MetadataIp}
               -
                 default: true
                 next_hop: {get_param: ControlPlaneDefaultRoute}
            -
              type: linux_bond
              name: bond_api
              bonding_options: "mode=active-backup"
              use_dhcp: false
              dns_servers: {get_param: DnsServers}
              members:
                -
                  type: interface
                  name: nic3
                  primary: true
                -
                  type: interface
                  name: nic4
            -
              type: vlan
              vlan_id: {get_param: InternalApiNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: InternalApiIpSubnet}
            -
              type: vlan
              vlan_id: {get_param: StorageNetworkVlanID}
              device: bond_api
              addresses:
                -
                  ip_netmask: {get_param: StorageIpSubnet}
            -
              type: ovs_user_bridge
              name: br-link0
              ovs_extra:
                -
                  str_replace:
                    template: set port br-link0 tag=_VLAN_TAG_
                    params:
                      _VLAN_TAG_: {get_param: TenantNetworkVlanID}
              addresses:
                -
                  ip_netmask: {get_param: TenantIpSubnet}
              use_dhcp: false
              members:
                -
                  type: ovs_dpdk_port
                  name: dpdk0
                  mtu: 9000
                  ovs_extra:
                  - set interface $DEVICE mtu_request=$MTU
                  - set interface $DEVICE options:n_rxq=2
                  members:
                    -
                      type: interface
                      name: nic7
                      primary: true

            - type: interface
              name: p7p2
              mtu: 9000
              use_dhcp: false
              defroute: false
              nm_controlled: true
              hotplug: true

outputs:
  OS::stack_id:
    description: The OsNetConfigImpl resource.
    value: {get_resource: OsNetConfigImpl}

C.1.5. overcloud_deploy.sh

#!/bin/bash

openstack overcloud deploy \
--templates \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/neutron-ovs-dpdk.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/neutron-sriov.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/ovs-dpdk-permissions.yaml \
-e /home/stack/ospd-10-vxlan-vlan-dpdk-sriov-ctlplane-bonding/network-environment.yaml \
--log-file overcloud_install.log &> overcloud_install.log