All variables

This page contains an automatically generated list of all default variables for each role. Unless labeled mandatory, there are reasonable defaults for each variable as documented below. In most cases, overriding the defaults is not necessary.

role-controller-content/defaults/main.yml

---
# all variables prefixed with "vault_" should not be declared here, but in a vault.yml file
#
# this is the password of the admin user for logging into controller
# NOTE: this password is also used as a default in several surveys and machine credentials, but can be overridden when launching the job template
# MANDATORY
controller_admin_password: "{{ controller_admin_password }}"

#
# automation controller
#

# these are settings to configure the automation controller

# the password for Postgres
# MANDATORY
controller_pg_password: "{{ controller_pg_password }}"

# All controller jobs will use the account "ansible" to log into the target machine.
# therefore controller needs to store the private key for this user
# note the key has to be provided in one line!
# MANDATORY
controller_ansible_private_key: "{{ controller_ansible_private_key }}"

# the password to decrypt and open vault safes (e.g. this file)
controller_vault_password: "{{ controller_vault_password }}"

# configure the Slack authentication token to send notifications to emeaansiblessa.slack.com
# controller_slack_token: "{{ vault_controller_slack_token }}"

#
# private automation hub
#

# enable automation hub support, if set to true, also specify token
controller_automation_hub: false
# create an API token on console.redhat.com to synchronize the Red Hat automation hub with your private automation hub
# controller_automation_hub_token: "{{ vault_controller_automation_hub_token }}"

# FQDN for automation hub
controller_ah_fqdn: "{{ controller_ah_instance_name }}.{{ dns_suffix }}.ansible-labs.de"

#
# Use Cases
#

# All of them are optional, having them disabled will slightly improve provisioning time

# Install Job Templates to deploy and manage Satellite
use_case_satellite: false
# when enabling these use case, which Red Hat repositories do you want to sync?
# NOTE: Both settings are optional and will increase provisioning time significantly since the role will wait for the initial lazy sync to complete
# RHEL 8: AppStream and BaseOS
satellite_sync_rhel8: false
# RHEL 7: base OS
satellite_sync_rhel7: false

# Windows use case default variables
use_case_windows: false

# Enable Workflow LAMP multi tier use case
use_case_workflow: false

# Enable Multi Cloud demo use case
use_case_multicloud: false

# Enable Workflow IdM use case
use_case_idm: false

# Enable Tomcat Demo use case
use_case_tomcat: false

# Enable job templates for an Ansible LearnFest
use_case_learnfest: false

# Enable job templates for EDA demos
controller_eda_enable: false

#
# NOTE: Enable at least one provider below and set type variable accordingly
#

# set type to
# "ec2" to deploy on Amazon AWS
# "azure" to deploy on Microsoft AZure
# "gcp" to deploy on Google Cloud
# "vmware" to deploy on VMware
type: ec2

#
# Amazon AWS
#

# create AWS credentials
# NOTE: if type is set to "ec2", this setting is ignored and the credentials are always created
controller_enable_provider_ec2: false

# AWS credentials are read from environment variables
controller_aws_secret_key: "{{ lookup('env', 'AWS_SECRET_ACCESS_KEY') }}"
controller_aws_access_key: "{{ lookup('env', 'AWS_ACCESS_KEY_ID') }}"

#
# Azure
#

# create GCP credentials
# NOTE: if type is set to "azure", this setting is ignored and the credentials are always created
controller_enable_provider_azure: false

# public key to inject into the linux instance for SSH
# MANDATORY
azure_ssh_public_key: "{{ azure_ssh_public_key }}"

#
# VMware
#

# create VMware credentials
# NOTE: if type is set to "vmware", this setting is ignored and the credentials are always created
controller_enable_provider_vmware: false

# setting VMware credentials
vmware_password: "{{ lookup('env', 'VMWARE_PASSWORD') }}"
vmware_username: "{{ lookup('env', 'VMWARE_USER') }}"
vmware_hostname: "{{ lookup('env', 'VMWARE_HOST') }}"

#
# Google Cloud
#

# create GCP credentials
# NOTE: if type is set to "gcp", this setting is ignored and the credentials are always created
controller_enable_provider_gcp: false

# the password is in fact the SSH key
# it is highly recommended to create a service account in Google and not user/password
gcp_password: "{{ gcp_password }}"
gcp_username: "{{ gcp_username }}"
# the project in which all objects will be created
gcp_project: "{{ gcp_project }}"
# the region used to deploy the instance, network, etc.
# note that controller does not support all regions from GCP! Verify first or controller will give an error message.
gcp_region: "{{ gcp_region }}"

#
# Let's encrypt
#

# by default we use let's encrypt production and therefore validate the certificates
letsencrypt_staging: false

role-controller-setup/defaults/main.yml

---
# all variables prefixed with "vault_" should not be declared here, but in a vault.yml file
#

# this is the password of the admin user for logging into automation controller
controller_admin_password: "{{ vault_controller_admin_password }}"

# the password used by Postgres
controller_pg_password: "{{ controller_admin_password }}"

# enable private Automation Hub
controller_ah_enable: false
controller_ah_fqdn: "{{ controller_ah_instance_name }}.{{ dns_suffix }}.ansible-labs.de"
controller_fqdn: "{{ controller_instance_name }}.{{ dns_suffix }}.ansible-labs.de"

# enable EDA controller
controller_eda_enable: false
controller_eda_fqdn: "{{ controller_eda_instance_name }}.{{ dns_suffix }}.ansible-labs.de"

# Whether to use the stating servers of Let's Encrypt (false will use Production)
letsencrypt_staging: false
# set this to True if you don't want let's encrypt to be setup
letsencrypt_skip: false

# disable Dynamic DNS by default
dns_update: false

role-epel/defaults/main.yml

---
# defaults file for epel

role-insights-registration/defaults/main.yml

---
# defaults file for insights-registration
auto_config: TRUE
authmethod: BASIC
display_name: "{{ inventory_hostname | default(omit, true) }}"
proxy: "{{ insights_proxy | default(none, true) }}"
malware_detection: true

role-instance/defaults/main.yml

---
# all variables prefixed with "vault_" should not be declared here, but in a vault.yml file
#

# the name of the instance or virtual machine to create
# MANDATORY
# instance_name: example

# add the instance to the specified group
# default = all_hosts
# instance_group: webapp

# wait for instance to be reachable via SSH
# MANDATORY
instance_wait_for_connection: false

# set instance typo to linux or windows depending on OS
# MANDATORY
instance_type: linux

# set remove to true to delete instances and all associated resources
remove: false

# the flavor of this instance
# for GCP use n2-standard-2 for a RHAAP node
# for EC2 use t3.large for a RHAAP node
# for Azure use Standard_DS2 for a RHAAP node
# MANDATORY
# except with type: vmware
instance_flavor: t3.large
# GCP allows to set individual disk size for first disk
# value is in GB
# MANDATORY on GCP, optional on AWS, ignored on all other providers
instance_disk_size: 20

# Additional ports to open for in security groups, outside of the default ones, which are:
# Linux: 22, 80, 443
# Windows: 80, 443, 3389, 5986
# instance_additional_port: "3000"

# in which platform to create the instance or vm
# currently either azure, ec2, gcp, or vmware
# MANDATORY
type: ec2

#
# Amazon AWS
#

# set type to "ec2" to use these settings

# which EC2 region to use
# MANDATORY
ec2_region: "eu-central-1"

# supports raw public key data for use with ansible-navigator
# ec2_key_pair: "ssh-rsa AAAAABBCCCDDDEEEEFFFGGGG"

# OPTIONAL (does not work with ansible-navigator)
# ec2_key_pair: "/path/to/file"

# which AMI to use as a template
# If not defined AMI will be search based on the filter below (ec2_image_names)
# ami-0f58468b80db2db66 for RHEL 7 in eu-central-1
# MANDATORY: ec2_ami_id or ec2_image_name (see below)
ec2_ami_id: ""

# which AMI's to search from AWS
# RHEL 7: "RHEL-7.9_HVM_GA*x86_64*"
# RHEL 8: "RHEL-8.3_HVM*x86_64*"
# Windows Server 2019 full: "Windows_Server-2019-English-Full-Base*"
# Windows Server 2019 core: "Windows_Server-2019-English-Core-Base*"
# Windows Server 2016 full: "Windows_Server-2016-English-Full-Base*"
# Windows Server 2016 core: "Windows_Server-2016-English-Core-Base*"
# MANDATORY: ec2_ami_id or ec2_image_name
# use aws CLI to get the latest AMI ID
# aws ec2 describe-images --filter "Name=name,Values=RHEL-8.6.0_HVM-*x86_64*"
ec2_image_name: "RHEL-9.4.0_HVM-*x86_64*"

# VPC objects will inherit the instance name if not explicitly overridden
# MANDATORY, default is {{ instance_name }}
# override this variable to add the new instance to an existing VPC
ec2_vpc_name: "{{ instance_name }}"

# EBS Device information (device name, size) to use for the instance and its volume size, to override AWS defaults.
# Root disk for CentOS/RHEL/Windows is /dev/sda1, any other value will create an additional disk.
ec2_ebs_device_name: /dev/sda1

#
# Azure
#

# set typo to "azure" to use these settings

# Azure resource group name
# override this value if you use Azure Open Environment credentials from RHPDS
# MANDATORY
azure_resource_group: "{{ instance_name }}"

# use a specific private network for all instances
azure_private_network: "{{ azure_resource_group }}"

# public key to inject into the Linux instance for SSH
# MANDATORY
azure_ssh_public_key: "{{ azure_ssh_public_key }}"

# standard flavor is 2 CPU, 8 GB RAM
# instance_flavor: Standard_DS2
# for 4 CPU , 16 GB RAM
# instance_flavor: Standard_DS4
# for 1 CPU, 1 GB RAM
# instance_flavor: Standard_B1s
# get list of images with the following command: (might require you to do az login first)
# az vm image list-skus --output table --offer RHEL --location westeurope --publisher RedHat
# MANDATORY
azure_image_offer: RHEL
azure_image_publisher: RedHat
azure_image_sku: 9-lvm-gen2

# set disk type for this instance
# MANDATORY
azure_disk_type: "StandardSSD_LRS"

#
# Digital Ocean
#

# OAuth Authentication token
# MANDATORY
do_oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}"

# Which OS image to use
# MANDATORY
do_image_id: centos-stream-9-x64

# In which region to deploy
# MANDATORY
do_region_id: fra1

# The name of the project
do_project_name: "{{ instance_name }}"

# The name of the firewall
do_firewall_name: "{{ instance_name }}"

# Set this to false to not create firewall for this instance
do_create_firewall: true

# The name of the VPC to connect to
do_vpc_name: "{{ instance_name }}"

#
# Google Cloud
#

# set type to "gcp" to use these settings

# which template to use
# RHEL 7: projects/rhel-cloud/global/images/family/rhel-7
# CentOS 7: projects/centos-cloud/global/images/family/centos-7
# RHEL 8: projects/rhel-cloud/global/images/family/rhel-8
# CentOS 8: projects/centos-cloud/global/images/family/centos-8
# Windows Server 2013: projects/windows-cloud/global/images/family/windows-2019
# MANDATORY
gcp_template: projects/rhel-cloud/global/images/family/rhel-9

# which GCP Region and Zone to use for the new VM
# MANDATORY
gcp_region: europe-west2

# GCP Zone within that region
# MANDATORY
gcp_zone: europe-west2-a

# create resources in a specific project
# MANDATORY
gcp_project: rhaap

# use the specified network name for connecting instance
# this can be helpful if mulitple instances are deployed to avoid hitting the quota
gcp_network_name: "{{ instance_name }}"

#
# VMware
#

# set type to "vmware" to use these settings

vcenter_validate_certs: false

# vmware_datacenter: '<vmware datacenter>'
# vmware_cluster: '<vmware cluster>'
# vmware_folder: '<vmware datacenter>/vm/<folder>'
# vmware_datastore: '<vmw datastore name>'
# vmware_network: '<vmware network>'
# vmware_template: '<existing RHEL 8 template>'
#
# individual instance definition - default values SMALL
instance_parameters:
  vmware_instance_diskgb: '10'
  vmware_instance_memmb: '8192'
  vmware_instance_cpus: '2'

#
# DNS
#

# update DNS records
# MANDATORY
dns_update: false

# the DNS sub domain <dns_suffix>.ansible-labs.de
# MANDATORY if dns_update is true
dns_suffix: example

# the key used to interact with the DNS server
# NOTE: This is the content of the file, not the path to the file
# MANDATORY if dns_update is true
dns_key: "{{ dns_key }}"

# the private key to interact with the DNS server
# NOTE: This is the content of the file, not the path to the file
# MANDATORY if dns_update is true
dns_private: "{{ dns_key_private }}"

role-nsupdate/defaults/main.yml

---
# the short hostname of the DNS record (e.g. controller)
# MANDATORY
# shortname:

# the IP address for the DNS record (A record)
# MANDATORY
# ipaddress:

# the DNS key as a string
# generated by the DNS admin
# MANDATORY
# key:

# the private DNS key as a string
# generated by the DNS admin
# MANDATORY
# private:

# DNS sub domain below ansible-labs.de or custom DNS zone
# MANDATORY
# dns_suffix:

role-package-downloader/defaults/main.yml

---
# defaults file for role-package-downloader
rh_api_offline_token:
rh_product_path:
rh_product_filename:

role-rhc-install/defaults/main.yml

---
# defaults file for role

role-rhsm-registration/defaults/main.yml

---
# defaults file for rhsm-registration
rhsm_user: ""
rhsm_password: ""
rhsm_poolid: ""
activationkey: ""
org_id: ""

role-satellite-content/defaults/main.yml

---
# admin password for Satellite login
satellite_admin_password: "{{ vault_satellite_admin_password }}"
# Satellite manifest to import
satellite_manifest_file: ~/satellite-manifest.zip
# synchronize RHEL 8 content
satellite_sync_rhel8: false
# synchronize RHEL 7 content
satellite_sync_rhel7: false

role-satellite-setup/defaults/main.yml

---
# specify your admin password, it's recommended to store in in vault
satellite_admin_password: "{{ vault_satellite_admin_password }}"
# if you have problems with the installer checks (e.g. DNS lookup checks), set this to true
satellite_skip_checks: false