diff --git a/.github/workflows/build-ee-pr.yml b/.github/workflows/build-ee-pr.yml
index 2127f062a67..eded9e2030a 100644
--- a/.github/workflows/build-ee-pr.yml
+++ b/.github/workflows/build-ee-pr.yml
@@ -19,7 +19,7 @@ jobs:
with:
tag: pr-${{ github.event.number }}
labels: |-
- quay.expires-after=1d
+ quay.expires-after=7d
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.revision=${{ github.sha }}
diff --git a/.gitlab-ci.yaml b/.gitlab-ci.yaml
new file mode 100644
index 00000000000..1b7c830dd75
--- /dev/null
+++ b/.gitlab-ci.yaml
@@ -0,0 +1,46 @@
+---
+stages:
+ - static-analysis
+
+.static-analysis:
+ stage: static-analysis
+ interruptible: true
+ needs: []
+
+sonarqube-check:
+ extends: .static-analysis
+ image: images.paas.redhat.com/alm/sonar-scanner-alpine:latest
+ variables:
+ LANG: "en_US.UTF-8"
+ GIT_DEPTH: "0" # Tells git to fetch all the branches of the project, required by the analysis task
+ SONAR_HOST_URL: https://sonarqube.corp.redhat.com
+ SONAR_SCM_PROVIDER: git
+ SONAR_SCANNER_OPTS: "-Xmx512m"
+ SONAR_USER_HOME: /tmp/.sonar
+ KUBERNETES_MEMORY_REQUEST: "512Mi"
+ KUBERNETES_MEMORY_LIMIT: "4Gi"
+ KUBERNETES_EPHEMERAL_STORAGE_REQUEST: "512Mi"
+ KUBERNETES_EPHEMERAL_STORAGE_LIMIT: "1Gi"
+ cache:
+ key: "${CI_JOB_NAME}"
+ paths:
+ - "${SONAR_USER_HOME}/cache"
+ script:
+ - >
+ set -x; sonar-scanner -Dsonar.python.version="3.7, 3.8, 3.9"
+ ${SONAR_SETTINGS:+-Dproject.settings="$SONAR_SETTINGS"}
+ ${SONAR_QUALITY_GATE_WAIT:+-Dsonar.qualitygate.wait="$SONAR_QUALITY_GATE_WAIT"}
+ ${SONAR_SOURCE_ENCODING:+-Dsonar.sourceEncoding="$SONAR_SOURCE_ENCODING"}
+ ${SONAR_PROJECT_KEY:+-Dsonar.projectKey="$SONAR_PROJECT_KEY"}
+ ${SONAR_PROJECT_NAME:+-Dsonar.projectName="$SONAR_PROJECT_NAME"}
+ ${SONAR_PROJECT_VERSION:+-Dsonar.projectVersion="$SONAR_PROJECT_VERSION"}
+ ${SONAR_SOURCES:+-Dsonar.sources="$SONAR_SOURCES"}
+ ${SONAR_EXCLUSIONS:+-Dsonar.exclusions="$SONAR_EXCLUSIONS"}
+ ${SONAR_SCM_PROVIDER:+-Dsonar.scm.provider="$SONAR_SCM_PROVIDER"}
+ ${CI_MERGE_REQUEST_IID:+-Dsonar.pullrequest.key="$CI_MERGE_REQUEST_IID"}
+ ${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:+-Dsonar.pullrequest.branch="$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME"}
+ ${CI_MERGE_REQUEST_TARGET_BRANCH_NAME:+-Dsonar.pullrequest.base="$CI_MERGE_REQUEST_TARGET_BRANCH_NAME"}
+ timeout: 15 minutes
+ allow_failure: true
+ tags:
+ - shared
diff --git a/ansible/cloud_providers/osp_infrastructure_deployment.yml b/ansible/cloud_providers/osp_infrastructure_deployment.yml
index 7cd57418653..6d881556e09 100644
--- a/ansible/cloud_providers/osp_infrastructure_deployment.yml
+++ b/ansible/cloud_providers/osp_infrastructure_deployment.yml
@@ -49,17 +49,20 @@
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
tasks:
- name: Gather instance facts
- os_server_info:
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ openstack.cloud.server_info:
+ all_projects: false
server: "*"
filters:
metadata:
guid: "{{ guid }}"
env_type: "{{ env_type }}"
- register: r_osp_facts
+ register: r_osp_server_facts
- name: debug osp_facts
debug:
- var: r_osp_facts
+ var: r_osp_server_facts
verbosity: 2
- name: Run infra-osp-dns Role
diff --git a/ansible/configs/ansible-bu-workshop/README.adoc b/ansible/configs/ansible-bu-workshop/README.adoc
new file mode 100644
index 00000000000..678cd6b72c7
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/README.adoc
@@ -0,0 +1,3 @@
+== Overview
+
+*ansible-bu-workshop*
diff --git a/ansible/configs/ansible-bu-workshop/default_vars.yml b/ansible/configs/ansible-bu-workshop/default_vars.yml
new file mode 100644
index 00000000000..31ac9cd6356
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/default_vars.yml
@@ -0,0 +1,79 @@
+---
+# -------------------------------------------------
+# Infra variables
+# -------------------------------------------------
+env_type: ansible-rhel-workshop
+output_dir: /tmp/workdir# Writable working scratch directory
+email: "{{ env_type }}@opentlc.com"
+guid: "{{ env_type }}"
+uuid: "{{ guid }}"
+cloud_provider: ec2
+#[ Login Settings ]
+install_ipa_client: false
+#[ Run a full yum update ]
+update_packages: false
+#[ This var is used to identify stack (cloudformation, azure resourcegroup, ...) ]
+project_tag: "{{ env_type }}-{{ guid }}"
+#[ Variables you should ***NOT*** Configure for you deployment ]
+#[ You can, but you usually wouldn't need to. ]
+admin_user: opentlc-mgr
+ansible_user: ec2-user
+remote_user: ec2-user
+#[ Is this running from Red Hat Ansible Tower ]
+tower_run: false
+software_to_deploy: none
+
+# -------------------------------------------------
+# FTL Settings
+# -------------------------------------------------
+install_ftl: false
+ftl_use_python3: true
+
+# -------------------------------------------------
+# Role: set_env_authorized_key
+# -------------------------------------------------
+set_env_authorized_key: true
+key_name: opentlc_admin_backdoor.pem
+deploy_local_ssh_config_location: "{{output_dir}}/"
+env_authorized_key: "{{guid}}key"
+ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
+
+# -------------------------------------------------
+# Role: control-user
+# -------------------------------------------------
+install_control_user: true
+student_name: student1
+student_password: "{{ common_password }}"
+control_user_name: "{{ student_name }}"
+control_user_password: "{{ common_password }}"
+control_user_private_group: "{{ omit }}"
+control_user_ssh_config: ./files/ssh_config.j2
+
+# -------------------------------------------------
+# Role: bastion-lite
+# -------------------------------------------------
+install_bastion_lite: true
+
+# -------------------------------------------------
+# Role: set-repositories
+# -------------------------------------------------
+repo_method: satellite
+use_content_view: true
+
+# -------------------------------------------------
+# Role: common
+# -------------------------------------------------
+install_common: true
+common_packages_el8:
+ - python3
+ - unzip
+ - bash-completion
+ - tmux
+ - bind-utils
+ - wget
+ - nano
+ - git
+ - vim-enhanced
+ - httpd-tools
+ - python3-pip
+ - tree
diff --git a/ansible/configs/ansible-bu-workshop/default_vars_ec2.yml b/ansible/configs/ansible-bu-workshop/default_vars_ec2.yml
new file mode 100644
index 00000000000..b74a92e336f
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/default_vars_ec2.yml
@@ -0,0 +1,130 @@
+---
+# -------------------------------------------------
+# AWS Network settings
+# -------------------------------------------------
+subdomain_base_short: "{{ guid }}"
+subdomain_base_suffix: ".example.opentlc.com"
+subdomain_base: "{{ subdomain_base_short }}{{ subdomain_base_suffix }}"
+aws_dns_zone_private: "example.com."
+aws_dns_zone_private_chomped: "example.com"
+
+# -------------------------------------------------
+# AWS EC2 Environment Sizing
+# -------------------------------------------------
+default_instance_image: "RHEL87GOLD-latest"
+default_rootfs_size: 50
+#[ Instance type ]
+default_instance_type: "t3a.medium"
+
+# -------------------------------------------------
+# AWS EC2 Security Groups
+# -------------------------------------------------
+security_groups:
+ - name: BastionSG
+ rules:
+ - name: BastionHTTPSPorts
+ description: "HTTPS Public"
+ from_port: 443
+ to_port: 443
+ protocol: tcp
+ cidr: "0.0.0.0/0"
+ rule_type: Ingress
+
+ - name: BastionHTTPPorts
+ description: "HTTP Public"
+ from_port: 80
+ to_port: 80
+ protocol: tcp
+ cidr: "0.0.0.0/0"
+ rule_type: Ingress
+
+ - name: BastionSSHPorts
+ description: "Bastion ssh"
+ from_port: 22
+ to_port: 22
+ protocol: tcp
+ cidr: "0.0.0.0/0"
+ rule_type: Ingress
+
+ - name: NodeSG
+ rules:
+ - name: FromNodeSGtcp
+ description: "Allow everything from HostSG nodes"
+ from_port: 0
+ to_port: 65535
+ protocol: tcp
+ from_group: NodeSG
+ rule_type: Ingress
+
+ - name: FromNodeSGudp
+ description: "Allow everything from HostSG nodes"
+ from_port: 0
+ to_port: 65535
+ protocol: udp
+ from_group: NodeSG
+ rule_type: Ingress
+
+ - name: FromBastionTCP
+ description: "Allow everything from Bastion"
+ from_port: 0
+ to_port: 65535
+ protocol: tcp
+ from_group: BastionSG
+ rule_type: Ingress
+
+ - name: FromBastionUDP
+ description: "Allow everything from Bastion"
+ from_port: 0
+ to_port: 65535
+ protocol: udp
+ from_group: BastionSG
+ rule_type: Ingress
+
+# -------------------------------------------------
+# AWS EC2 Instances
+# -------------------------------------------------
+instances:
+ - name: "{{ bastion_instance_name | default('ansible-1') }}"
+ count: 1
+ unique: true
+ public_dns: true
+ dns_loadbalancer: true
+ floating_ip: true
+ image: "{{ bastion_instance_image | default(default_instance_image) }}"
+ flavor:
+ ec2: "{{bastion_instance_type | default(default_instance_type) }}"
+ tags:
+ - key: "AnsibleGroup"
+ value: "bastions"
+ - key: "ostype"
+ value: "linux"
+ - key: "instance_filter"
+ value: "{{ env_type }}-{{ email }}"
+ rootfs_size: "{{ default_rootfs_size }}"
+ security_groups:
+ - BastionSG
+ - DefaultSG
+
+ - name: "node"
+ count: 3
+ public_dns: false
+ image: "{{ node_instance_image | default(default_instance_image) }}"
+ flavor:
+ ec2: "{{node_instance_type | default(default_instance_type) }}"
+ tags:
+ - key: "AnsibleGroup"
+ value: "nodes"
+ - key: "ostype"
+ value: "rhel"
+ - key: "instance_filter"
+ value: "{{ env_type }}-{{ email }}"
+ rootfs_size: "{{ default_rootfs_size }}"
+ security_groups:
+ - DefaultSG
+ - NodeSG
+
+# -------------------------------------------------
+# Ansible hosts_template.j2 inventory groups
+# -------------------------------------------------
+inventory_groups:
+ - nodes
diff --git a/ansible/configs/ansible-bu-workshop/destroy_env.yml b/ansible/configs/ansible-bu-workshop/destroy_env.yml
new file mode 100644
index 00000000000..a1dfde08bfe
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/destroy_env.yml
@@ -0,0 +1,18 @@
+---
+- name: Import default destroy playbook
+ import_playbook: ../../cloud_providers/{{cloud_provider}}_destroy_env.yml
+
+- name: Destroy Bookbag
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tasks:
+
+ - name: Include role to destroy Bookbag
+ when:
+ - bookbag_git_repo is defined
+ include_role:
+ name: bookbag
+ vars:
+ ACTION: destroy
diff --git a/ansible/configs/ansible-bu-workshop/files/hosts_template.j2 b/ansible/configs/ansible-bu-workshop/files/hosts_template.j2
new file mode 100644
index 00000000000..db0b18939a4
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/files/hosts_template.j2
@@ -0,0 +1,18 @@
+
+{% if groups.nodes is defined %}
+[web]
+{% for host in groups['nodes'] %}
+{{ host.split('.')[0] }} ansible_host={{ host }}
+{% endfor %}
+{% endif %}
+
+
+[control]
+ansible-1 ansible_host={{ groups['bastions'][0] }}
+
+[all:vars]
+timeout=60
+ansible_user={{ remote_user }}
+ansible_ssh_private_key_file="~/.ssh/{{ guid }}key.pem"
+ansible_ssh_common_args="-o StrictHostKeyChecking=no"
+ansible_become=true
diff --git a/ansible/configs/ansible-bu-workshop/files/ssh_config.j2 b/ansible/configs/ansible-bu-workshop/files/ssh_config.j2
new file mode 100644
index 00000000000..60f3da720fa
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/files/ssh_config.j2
@@ -0,0 +1,10 @@
+Host *
+ User ec2-user
+ IdentityFile ~/.ssh/{{ guid }}key.pem
+ ForwardAgent yes
+ StrictHostKeyChecking no
+ ConnectTimeout 600
+ ConnectionAttempts 10
+ ControlMaster auto
+ ControlPath /tmp/%h-%r
+ ControlPersist 5m
diff --git a/ansible/configs/ansible-bu-workshop/post_infra.yml b/ansible/configs/ansible-bu-workshop/post_infra.yml
new file mode 100644
index 00000000000..8171fbb38ce
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/post_infra.yml
@@ -0,0 +1,13 @@
+- name: Step 002 Post Infrastructure
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step002
+ - post_infrastructure
+ tasks:
+ - debug:
+ msg: "Step 002 Post Infrastructure"
+
+
diff --git a/ansible/configs/ansible-bu-workshop/post_software.yml b/ansible/configs/ansible-bu-workshop/post_software.yml
new file mode 100644
index 00000000000..b44475c1af5
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/post_software.yml
@@ -0,0 +1,90 @@
+---
+
+- name: Step 005 post software
+ hosts: localhost
+ gather_facts: false
+ become: true
+ tags:
+ - step005_01
+ - post_software
+ tasks:
+ - debug:
+ msg: "Post-Software tasks Started"
+
+
+- name: Install Post Software workloads for bastion
+ hosts: bastions
+ become: true
+ tasks:
+ - name: Deploy Post Software workloads
+ when: post_software_workloads_for_bastion | default("") | length > 0
+ include_role:
+ name: "{{ _post_bastion }}"
+ loop: "{{ post_software_workloads_for_bastion }}"
+ loop_control:
+ loop_var: _post_bastion
+
+
+- name: Install Post Software workloads for gitlab
+ hosts: gitlab
+ become: true
+ tasks:
+ - name: Deploy Post Software workloads
+ when: post_software_workloads_for_gitlab | default("") | length > 0
+ include_role:
+ name: "{{ _post_gitlab }}"
+ loop: "{{ post_software_workloads_for_gitlab }}"
+ loop_control:
+ loop_var: _post_gitlab
+
+
+- name: Install Post Software workloads for nodes
+ hosts: nodes
+ become: true
+ tasks:
+ - name: Deploy Post Software workloads
+ when: post_software_workloads_for_nodes | default("") | length > 0
+ include_role:
+ name: "{{ _post_nodes }}"
+ loop: "{{ post_software_workloads_for_nodes }}"
+ loop_control:
+ loop_var: _post_nodes
+
+
+- name: Deploy user setup
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ tags:
+ - step005_02
+ - post_software
+ tasks:
+ - name: print out user.info
+ agnosticd_user_info:
+ msg: |
+ SSH Host: ssh {{ student_name }}@{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}
+ SSH Password: {{ student_password }}
+
+ - name: Save user data
+ agnosticd_user_info:
+ data:
+ ssh_command: "ssh {{ student_name }}@{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}"
+ ssh_password: "{{ student_password }}"
+ ssh_username: "{{ student_name }}"
+ cloud_provider: "{{ cloud_provider }}"
+ hostname: "{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}"
+ subdomain_base: "{{ subdomain_base }}"
+ subdomain_internal: "{{ aws_dns_zone_private_chomped | default('') }}"
+
+
+- name: PostSoftware flight-check
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tags:
+ - step005_03
+ - post_software
+ tasks:
+ - debug:
+ msg: "Post-Software checks completed successfully"
diff --git a/ansible/configs/multi-cloud-capsule/pre_infra.yml b/ansible/configs/ansible-bu-workshop/pre_infra.yml
similarity index 59%
rename from ansible/configs/multi-cloud-capsule/pre_infra.yml
rename to ansible/configs/ansible-bu-workshop/pre_infra.yml
index e6d3b50ab93..908d8ecf07d 100644
--- a/ansible/configs/multi-cloud-capsule/pre_infra.yml
+++ b/ansible/configs/ansible-bu-workshop/pre_infra.yml
@@ -1,13 +1,10 @@
- name: Step 000 Pre Infrastructure
hosts: localhost
connection: local
- become: false
gather_facts: false
+ become: false
tags:
- step001
- - pre_infrastructure
tasks:
- - name: Pre-Infra
- debug:
- msg: "Pre-Infra work is done"
-
+ - debug:
+ msg: "Step 000 Pre Infrastructure"
diff --git a/ansible/configs/ansible-bu-workshop/pre_software.yml b/ansible/configs/ansible-bu-workshop/pre_software.yml
new file mode 100644
index 00000000000..3f3903d3e44
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/pre_software.yml
@@ -0,0 +1,97 @@
+---
+- name: Step 003 Pre Software
+ hosts: localhost
+ gather_facts: false
+ become: false
+ tags:
+ - step003_1
+ - pre_software
+ tasks:
+ - debug:
+ msg: "Step 003 Pre Software"
+
+ - import_role:
+ name: infra-local-create-ssh_key
+ when: set_env_authorized_key | bool
+
+
+- name: Configure all hosts with Repositories, Common Files and Set environment key
+ hosts: all:!windows
+ become: true
+ gather_facts: False
+ tags:
+ - step003_2
+ - pre_software
+ roles:
+ - when: repo_method is defined
+ role: set-repositories
+
+ - when: install_common | bool
+ role: common
+
+ - when: set_env_authorized_key | bool
+ role: set_env_authorized_key
+
+- name: Configuring Bastion Hosts
+ hosts: bastions
+ become: true
+ tags:
+ - step003_3
+ - pre_software
+
+ roles:
+ - when: install_bastion_lite | bool
+ role: bastion-lite
+
+ - when: install_control_user | bool
+ role: control-user
+
+
+- name: Install Pre Software workloads
+ hosts: bastions
+ become: true
+ tasks:
+ - name: Deploy Pre Software workloads
+ when: pre_software_workloads_for_bastion | default("") | length > 0
+ include_role:
+ name: "{{ _pre_bastion }}"
+ loop: "{{ pre_software_workloads_for_bastion }}"
+ loop_control:
+ loop_var: _pre_bastion
+
+- name: Install Pre Software workloads for gitlab
+ hosts: gitlab
+ become: true
+ tasks:
+ - name: Deploy Pre Software workloads
+ when: pre_software_workloads_for_gitlab | default("") | length > 0
+ include_role:
+ name: "{{ _pre_gitlab }}"
+ loop: "{{ pre_software_workloads_for_gitlab }}"
+ loop_control:
+ loop_var: _pre_gitlab
+
+- name: Install Pre Software workloads for nodes
+ hosts: nodes
+ become: true
+ tasks:
+ - name: Deploy Pre Software workloads
+ when: pre_software_workloads_for_nodes | default("") | length > 0
+ include_role:
+ name: "{{ _pre_nodes }}"
+ loop: "{{ pre_software_workloads_for_nodes }}"
+ loop_control:
+ loop_var: _pre_nodes
+
+
+- name: PreSoftware flight-check
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tags:
+ - step003_4
+ - pre_software
+ tasks:
+ - debug:
+ msg: "Pre-Software checks completed successfully"
diff --git a/ansible/configs/ansible-bu-workshop/requirements.yml b/ansible/configs/ansible-bu-workshop/requirements.yml
new file mode 100644
index 00000000000..d381e0f8d50
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/requirements.yml
@@ -0,0 +1,7 @@
+collections:
+ - name: ansible.posix
+ version: 1.3.0
+ - name: infra.controller_configuration
+ version: 2.2.5
+ - name: ansible.workshops
+ version: 1.0.18
\ No newline at end of file
diff --git a/ansible/configs/ansible-bu-workshop/software.yml b/ansible/configs/ansible-bu-workshop/software.yml
new file mode 100644
index 00000000000..6993d842ea4
--- /dev/null
+++ b/ansible/configs/ansible-bu-workshop/software.yml
@@ -0,0 +1,60 @@
+---
+- name: Step 004 software
+ hosts: localhost
+ gather_facts: false
+ become: false
+ tags:
+ - step004_01
+ - software
+ tasks:
+ - debug:
+ msg: "Software tasks Started"
+
+- name: Install Software workloads
+ hosts: bastions
+ become: true
+ tasks:
+ - name: Deploy Software workloads
+ when: software_workloads_for_bastion | default("") | length > 0
+ include_role:
+ name: "{{ _software_bastion }}"
+ loop: "{{ software_workloads_for_bastion }}"
+ loop_control:
+ loop_var: _software_bastion
+
+- name: Install Software workloads for gitlab
+ hosts: gitlab
+ become: true
+ tasks:
+ - name: Deploy Software workloads
+ when: software_workloads_for_gitlab | default("") | length > 0
+ include_role:
+ name: "{{ _software_gitlab }}"
+ loop: "{{ software_workloads_for_gitlab }}"
+ loop_control:
+ loop_var: _software_gitlab
+
+- name: Install Software workloads for nodes
+ hosts: nodes
+ become: true
+ tasks:
+ - name: Deploy Software workloads
+ when: software_workloads_for_nodes | default("") | length > 0
+ include_role:
+ name: "{{ _software_nodes }}"
+ loop: "{{ software_workloads_for_nodes }}"
+ loop_control:
+ loop_var: _software_nodes
+
+
+- name: Software flight-check
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tags:
+ - step004_03
+ - software
+ tasks:
+ - debug:
+ msg: "Software checks completed successfully"
diff --git a/ansible/configs/ansible-lightspeed/default_vars_ec2.yml b/ansible/configs/ansible-lightspeed/default_vars_ec2.yml
index 998575aefbe..963ab015a24 100644
--- a/ansible/configs/ansible-lightspeed/default_vars_ec2.yml
+++ b/ansible/configs/ansible-lightspeed/default_vars_ec2.yml
@@ -14,6 +14,7 @@ aws_dns_zone_private_chomped: "example.com"
# -------------------------------------------------
default_instance_type: "t3a.medium"
default_instance_image: "RHEL91GOLD-latest"
+default_instance_name: "codeserver"
default_rootfs_size_node: 30
# -------------------------------------------------
@@ -58,7 +59,7 @@ security_groups:
# AWS EC2 Instances
# -------------------------------------------------
instances:
- - name: codeserver
+ - name: "{{ default_instance_name }}"
count: 1
unique: true
public_dns: true
diff --git a/ansible/configs/ansible-lightspeed/post_software.yml b/ansible/configs/ansible-lightspeed/post_software.yml
index a1239a5a8df..542220ebb42 100644
--- a/ansible/configs/ansible-lightspeed/post_software.yml
+++ b/ansible/configs/ansible-lightspeed/post_software.yml
@@ -37,16 +37,12 @@
- name: print out user.info
agnosticd_user_info:
msg: |
- noVNC Web URL: https://{{ code_server_hostname }}:6080/vnc.html?host={{ code_server_hostname }}&port=6080&autoconnect=true&resize=remote
- noVNC Password: {{ student_password }}
SSH Host: ssh {{ student_name }}@{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}
SSH Password: {{ student_password }}
- name: Save user data
agnosticd_user_info:
data:
- novnc_web_url: "https://{{ code_server_hostname }}:6080/vnc.html?host={{ code_server_hostname }}&port=6080&autoconnect=true&resize=remote"
- novnc_user_password: "{{ student_password }}"
ssh_command: "ssh {{ student_name }}@{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}"
ssh_password: "{{ student_password }}"
ssh_username: "{{ student_name }}"
@@ -55,7 +51,6 @@
subdomain_base: "{{ subdomain_base }}"
subdomain_internal: "{{ aws_dns_zone_private_chomped | default('') }}"
-
- name: PostSoftware flight-check
hosts: localhost
connection: local
diff --git a/ansible/configs/ansible-lightspeed/software.yml b/ansible/configs/ansible-lightspeed/software.yml
index 44d1305abba..88991151436 100644
--- a/ansible/configs/ansible-lightspeed/software.yml
+++ b/ansible/configs/ansible-lightspeed/software.yml
@@ -17,173 +17,56 @@
tags:
- step004
- bastion_tasks
-
tasks:
- - name: Install pip3
- ansible.builtin.package:
- name: python3-pip
-
- - name: Install certbot
- ansible.builtin.pip:
- name: certbot
- state: present
-
- - name: Generate letsencrypt certificate
- ansible.builtin.command: >-
- /usr/local/bin/certbot certonly
- --standalone
- -d {{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}
- -m rhpds-admins@redhat.com
- --agree-tos
- -n
-
- - name: Download noVNC
- ansible.builtin.get_url:
- url: https://github.com/novnc/noVNC/archive/refs/tags/v{{ novnc_version }}.tar.gz
- dest: /usr/local/src/v{{ novnc_version }}.tar.gz
- mode: '644'
-
- - name: Unarchive noVNC
- ansible.builtin.unarchive:
- src: /usr/local/src/v{{ novnc_version }}.tar.gz
- dest: /usr/local/src/
- remote_src: true
-
- - name: Copy novnc.service
- ansible.builtin.template:
- src: novnc.service
- dest: /etc/systemd/system/novnc.service
- mode: '644'
-
- - name: Enable and start service
- ansible.builtin.service:
- name: novnc
- state: started
- enabled: true
-
- - name: Git clone ansible lightspeed repo
- become_user: "{{ student_name }}"
- ansible.builtin.git:
- repo: https://github.com/redhat-gpte-devopsautomation/ansible-lightspeed.git
- dest: "/home/{{ student_name }}/ansible-lightspeed"
- version: main
-
- - name: Remove .git file
- ansible.builtin.file:
- path: "/home/{{ student_name }}/ansible-lightspeed/.git"
- state: absent
-
- - name: Set vscode repository
- ansible.builtin.yum_repository:
- name: code
- description: Visual Studio Code
- file: vscode
- baseurl: https://packages.microsoft.com/yumrepos/vscode
- enabled: true
- gpgkey: https://packages.microsoft.com/keys/microsoft.asc
- gpgcheck: true
-
- - name: Update rhel host
- ansible.builtin.package:
- name: '*'
- state: latest
-
- - name: Install code package
- ansible.builtin.package:
- name:
- - code
- - firefox
- - ansible-core
- state: present
-
- - name: Install ansible-lint
- ansible.builtin.pip:
- name: ansible-lint
- state: present
-
- - name: Create directory ~/.config/autostart
- ansible.builtin.file:
- path: "/home/{{ student_name }}/.config/autostart"
- state: directory
- mode: '755'
- owner: "{{ student_name }}"
- group: "{{ student_name }}"
-
- - name: Copy code.desktop to autostart
- ansible.builtin.copy:
- src: /usr/share/applications/code.desktop
- dest: "/home/{{ student_name }}/.config/autostart/code.desktop"
- remote_src: true
- mode: "644"
- owner: "{{ student_name }}"
- group: "{{ student_name }}"
-
- - name: Add --password-store=basic option to code.desktop
- ansible.builtin.lineinfile:
- path: "/home/{{ student_name }}/.config/autostart/code.desktop"
- regexp: "^Exec="
- firstmatch: true
- line: "Exec=/usr/share/code/code --unity-launch %F --password-store=basic"
-
- - name: Create extensions directory
- ansible.builtin.file:
- path: /tmp/extensions
- state: directory
- mode: '0755'
- owner: "{{ student_name }}"
- group: "{{ student_name }}"
-
- - name: Download vscode extensions
- ansible.builtin.get_url:
- url: "{{ item }}"
- dest: "/tmp/extensions/"
- validate_certs: false
- mode: '644'
- owner: "{{ student_name }}"
- group: "{{ student_name }}"
- loop: "{{ vscode_server_extension_urls }}"
-
- - name: Install vscode extensions in given order
- become_user: "{{ student_name }}"
- ansible.builtin.command: >-
- /usr/bin/code
- --install-extension
- /tmp/extensions/{{ item }}
- loop: "{{ vscode_server_extension_urls | map('urlsplit', 'path') | map('basename') | list }}"
-
- - name: VScode copy default settings
- ansible.builtin.template:
- src: ./files/settings.json.j2
- dest: "/home/{{ student_name }}/.config/Code/User/settings.json"
- mode: '644'
- owner: "{{ student_name }}"
- group: "{{ student_name }}"
-
- - name: Include rhel-graphical role
+ - name: Deploy Software workloads
+ when: software_workloads_for_bastion | default("") | length > 0
+ include_role:
+ name: "{{ _software_bastion }}"
+ loop: "{{ software_workloads_for_bastion }}"
+ loop_control:
+ loop_var: _software_bastion
+
+ # Ansible Lightspeed Workshop
+ - name: Setup ansible lightspeed demo block
+ when: ansible_lightspeed_setup_demo_repo | bool
+ block:
+ - name: Git clone ansible lightspeed repo
+ become_user: "{{ student_name }}"
+ ansible.builtin.git:
+ repo: https://github.com/redhat-gpte-devopsautomation/ansible-lightspeed.git
+ dest: "/home/{{ student_name }}/ansible-lightspeed"
+ version: main
+
+ - name: Remove .git file
+ ansible.builtin.file:
+ path: "/home/{{ student_name }}/ansible-lightspeed/.git"
+ state: absent
+
+ # RHEL Graphics
+ - name: RHEL X11 block
when: install_rhel_graphical | bool
- ansible.builtin.include_role:
- name: rhel-graphical
-
- - name: Stop and disable firewalld
- ansible.builtin.service:
- name: firewalld
- state: stopped
- enabled: false
-
- - name: Create /etc/dconf/db/local.d directory
- ansible.builtin.file:
- path: /etc/dconf/db/local.d
- state: directory
- mode: '755'
-
- - name: Create /etc/dconf/db/local.d/00-logout
- ansible.builtin.copy:
- src: 00-logout
- dest: /etc/dconf/db/local.d/00-logout
- mode: '644'
+ block:
+ - name: Stop and disable firewalld
+ ansible.builtin.service:
+ name: firewalld
+ state: stopped
+ enabled: false
+
+ - name: Create /etc/dconf/db/local.d directory
+ ansible.builtin.file:
+ path: /etc/dconf/db/local.d
+ state: directory
+ mode: '755'
+
+ - name: Create /etc/dconf/db/local.d/00-logout
+ ansible.builtin.copy:
+ src: 00-logout
+ dest: /etc/dconf/db/local.d/00-logout
+ mode: '644'
+
+ - name: Update dconfig
+ ansible.builtin.command: dconf update
- - name: Update dconfig
- ansible.builtin.command: dconf update
- name: Software flight-check
hosts: localhost
diff --git a/ansible/configs/aro/htpasswd.yml b/ansible/configs/aro/htpasswd.yml
index 63fadfd815e..7bcf6335543 100644
--- a/ansible/configs/aro/htpasswd.yml
+++ b/ansible/configs/aro/htpasswd.yml
@@ -90,7 +90,7 @@
- oauth-htpasswd.yaml
- name: Retrieve API server configuration (for API endpoint)
- k8s_facts:
+ k8s_info:
host: "{{ az_aro4_public_api_fixed }}"
api_key: "{{ az_aro4_auth_results.k8s_auth.api_key }}"
api_version: config.openshift.io/v1
diff --git a/ansible/configs/base-infra/post_software.yml b/ansible/configs/base-infra/post_software.yml
index b793ce17238..9c06a9c66ba 100644
--- a/ansible/configs/base-infra/post_software.yml
+++ b/ansible/configs/base-infra/post_software.yml
@@ -68,7 +68,6 @@
- name: Deploy nookbag Web Interface
when:
- nookbag_git_repo is defined
- - showroom_git_repo is not defined
ansible.builtin.include_role:
name: nookbag
diff --git a/ansible/configs/base-rosa/.yamllint b/ansible/configs/base-rosa/.yamllint
new file mode 100644
index 00000000000..3f0b53e73a4
--- /dev/null
+++ b/ansible/configs/base-rosa/.yamllint
@@ -0,0 +1,13 @@
+---
+extends: default
+
+rules:
+ comments:
+ require-starting-space: false
+ min-spaces-from-content: 1
+ comments-indentation: disable
+ indentation:
+ indent-sequences: consistent
+ line-length:
+ max: 200
+ allow-non-breakable-inline-mappings: true
diff --git a/ansible/configs/base-rosa/README.adoc b/ansible/configs/base-rosa/README.adoc
new file mode 100644
index 00000000000..816df23112f
--- /dev/null
+++ b/ansible/configs/base-rosa/README.adoc
@@ -0,0 +1,62 @@
+= Base ROSA
+== Config Description
+
+The following config includes:
+
+* One bastion host for ROSA installation
+* SSH access setup
+* Base ROSA config gives option to select number of worker nodes and instance type.
+
+== Review the `default_vars.yml` variable file
+
+* This file link:./default_vars.yml[./default_vars.yml] contains all the variables you need to define to control the deployment of your environment. These are the defaults.
+
+* Override the defaults for your environment by creating your own myenvironment-variables.yml file, as below.
+
+* To update worker node machine type check `rosa_machine_type: "m5.2xlarge"` variable.
+
+== AWS Prereqs for ROSA
+
+Please see https://docs.openshift.com/rosa/rosa_getting_started/rosa-aws-prereqs.html for a list of pre-reqs for the target AWS account.
+
+== Secrets
+
+You will need to define the `rosa_token` variable in order to deploy this config. Add this variable to your secret file.
+
+This token can be created and downloaded from https://cloud.redhat.com/openshift/token/rosa
+
+It should look like:
+
+[source,yaml]
+----
+rosa_token: "eyJ<..REDACTED..>dz8"
+----
+
+== Running Ansible Playbook
+
+=== Running Playbook With Environment and Secrets files
+
+You can create yaml files of your desired configs and secrets and execute them:
+
+`ansible-playbook ansible/main.yaml -e @myenvironment-variables.yml -e@my-secrets.yml`
+
+=== To Delete an environment
+
+Run the `destroy_env.yml` playbook.
+
+Ex: `ansible-playbook ansible/configs/rosa/destroy_env.yml -e @myenvironment-variables.yml -e@my-secrets.yml`
+
+The teardown process is roughly as follows:
+* Delete sandbox
+
+== Software stages in config provide
+
+* Install AWS CLI on bastion
+* Install ROSA CLI on bastion
+* Optionally run ROSA installer (default is to run installer)
+
+== Developer
+* Tyrell Reddy
+* Mitesh Sharma
+* Ritesh Shah
+* Prakhar Srivastava
\ No newline at end of file
diff --git a/ansible/configs/base-rosa/default_vars.yml b/ansible/configs/base-rosa/default_vars.yml
new file mode 100644
index 00000000000..16488befb94
--- /dev/null
+++ b/ansible/configs/base-rosa/default_vars.yml
@@ -0,0 +1,58 @@
+---
+###### VARIABLES YOU SHOULD CONFIGURE FOR YOUR DEPLOYEMNT
+###### OR PASS as "-e" args to ansible-playbook command
+
+env_type: base-rosa
+
+## guid is the deployment unique identifier, it will be appended to all tags,
+## files and anything that identifies this environment from another "just like it"
+guid: defaultguid
+
+# Project Tag for all generated resources
+project_tag: "{{ env_type }}-{{ guid }}"
+
+# Do you want to run a full yum update
+update_packages: false
+
+# Install FTL
+# requirements.yml should have the right version of FTL injector
+install_ftl: false
+
+# To be added as an additional tag on resources
+purpose: development
+
+# Tags to be added to VMs
+cloud_tags:
+ env_type: "{{ env_type }}"
+ guid: "{{ guid }}"
+ course_name: "{{ course_name | default( 'unknown' ) }}"
+ platform: "{{ platform | default( 'unknown' ) }}"
+
+bastion_user_name: rosa
+bastion_user_enable_sudo: false
+bastion_user_use_password: false
+bastion_user_password: ""
+bastion_user_password_length: 12
+
+# ROSA Cluster Name
+rosa_cluster_name: "rosa-{{ guid }}"
+
+# ROSA worker node machine type recommended in the official documentatition
+rosa_machine_type: "m6a.2xlarge"
+
+# ROSA number of worker nodes recommended in the official documentatition
+rosa_wroker_nodes: 2
+
+# ROSA Version
+rosa_version: latest
+
+# Where to download the ROSA installer from
+rosa_installer_url: "https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/rosa/{{ rosa_version }}/rosa-linux.tar.gz"
+
+# This should come from a secret and is only used if the rosa_token does not come from the dialog
+gpte_rosa_token: ""
+# This should come from a dialog, if not, gpte_rosa_token is used from secret
+rosa_token: ""
+
+demo_instance_name: "demo-rhods-on-rosa"
+demo_name: "OCP4 Workshop RHODS on ROSA"
diff --git a/ansible/configs/base-rosa/default_vars_ec2.yml b/ansible/configs/base-rosa/default_vars_ec2.yml
new file mode 100644
index 00000000000..fde97158a12
--- /dev/null
+++ b/ansible/configs/base-rosa/default_vars_ec2.yml
@@ -0,0 +1,54 @@
+---
+### AWS EC2 Environment settings
+
+# The region to be used, if not specified by -e in the command line
+aws_region: us-east-1
+
+# The key that is used to
+key_name: "default_key_name"
+
+## Networking (AWS)
+
+### Route 53 Zone ID (AWS)
+# This is the Route53 HostedZoneId where you will create your Public DNS entries
+#
+# HostedZoneId needs to come from the account that is being used. It also needs to match
+# subdomain_base_suffix
+HostedZoneId: Z3IHLWJZOU9SRT
+
+subdomain_base_short: "{{ guid }}"
+subdomain_base_suffix: ".example.opentlc.com"
+subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
+
+## Environment Sizing
+
+bastion_instance_type: "t2.small"
+bastion_instance_image: RHEL84GOLD-latest
+bastion_rootfs_size: 30
+
+###### VARIABLES YOU SHOULD ***NOT*** CONFIGURE FOR YOUR DEPLOYEMNT
+
+# This is the user that Ansible will use to connect to the nodes it is
+# configuring from the admin/control host
+ansible_user: ec2-user
+
+sandbox_enable_ui: true
+
+# The instance definition for the bastion
+instances:
+- name: "bastion"
+ count: 1
+ unique: true
+ public_dns: true
+ floating_ip: true
+ image: "{{ bastion_instance_image }}"
+ flavor:
+ ec2: "{{ bastion_instance_type }}"
+ tags:
+ - key: "AnsibleGroup"
+ value: "bastions"
+ - key: "ostype"
+ value: "linux"
+ rootfs_size: "{{ bastion_rootfs_size }}"
+ security_groups:
+ - BastionSG
diff --git a/ansible/configs/base-rosa/destroy_env.yml b/ansible/configs/base-rosa/destroy_env.yml
new file mode 100644
index 00000000000..a8bdb5b457d
--- /dev/null
+++ b/ansible/configs/base-rosa/destroy_env.yml
@@ -0,0 +1,84 @@
+---
+- name: Destroy environment on AWS
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ environment:
+ AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
+ AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
+ AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
+ tasks:
+ - name: Run infra-ec2-create-inventory role
+ include_role:
+ name: infra-ec2-create-inventory
+ - name: SSH config setup
+ when:
+ - groups["bastions"] is defined
+ - groups["bastions"] | length > 0
+ include_role:
+ name: infra-common-ssh-config-generate
+
+- name: Set ssh extra args for all hosts, use ssh_config just created
+ hosts: all
+ gather_facts: false
+ any_errors_fatal: true
+ ignore_errors: false
+ tasks:
+ - name: add -F option ansible_ssh_extra_args
+ set_fact:
+ ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{ hostvars['localhost'].ansible_ssh_config }}"
+
+- name: Start all EC2 instances if they are stopped
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ environment:
+ AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
+ AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
+ AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
+ tasks:
+ - include_tasks: ec2_instances_start.yaml
+
+- name: Destroy ROSA
+ hosts: bastions
+ gather_facts: false
+ become: false
+ environment:
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ tasks:
+ - name: Check for ROSA binary
+ stat:
+ path: /usr/local/bin/rosa
+ register: rosa_check
+ ignore_errors: true
+
+ - name: Try to gracefully uninstall ROSA if binary is installed, otherwise just nuke the sandbox
+ when: rosa_check.stat.exists
+ block:
+ - set_fact:
+ rosa_cluster_name: "rosa-{{ guid }}"
+
+ - name: Destroy ROSA Cluster
+ command: "/usr/local/bin/rosa delete cluster -y --cluster={{ rosa_cluster_name }}"
+ register: r_rosa_delete
+ failed_when: >-
+ r_rosa_delete.rc != 0
+ and 'ERR: There is no cluster with identifier or name' not in r_rosa_delete.stderr
+
+ - name: Wait for ROSA deletion to complete
+ command: "/usr/local/bin/rosa describe cluster -c {{ rosa_cluster_name }}"
+ register: rosa_cluster_status
+ ignore_errors: true
+ until: rosa_cluster_status.rc != 0
+ retries: 60
+ delay: 60
+
+ - name: Make sure ROSA cluster is gone
+ fail:
+ msg: "The ROSA cluster still exists after one hour of trying to delete. Please look at it manually."
+ when: rosa_cluster_status.rc == 0
+
+- name: Import cloud provider specific destroy playbook
+ import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
diff --git a/ansible/configs/base-rosa/ec2_instances_start.yaml b/ansible/configs/base-rosa/ec2_instances_start.yaml
new file mode 100644
index 00000000000..3969c2b0e5e
--- /dev/null
+++ b/ansible/configs/base-rosa/ec2_instances_start.yaml
@@ -0,0 +1,32 @@
+---
+- name: Get all EC2 instances
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: stopped
+ register: r_stopped_instances
+
+# Wk: Don't wait for instances to be running. Otherwise this is
+# a very sequential task. Just start the instances.
+# The next task will wait until all instances are running - but
+# this happens now in parallel instead of sequentially.
+- name: Ensure EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ item.instance_id }}"
+ state: started
+ wait: false
+ loop: "{{ r_stopped_instances.instances }}"
+
+- name: Wait until all EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: running
+ register: r_running_instances
+ until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
+ delay: 10
+ retries: 60
diff --git a/ansible/configs/multi-cloud-capsule/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/base-rosa/files/cloud_providers/ec2_cloud_template.j2
similarity index 68%
rename from ansible/configs/multi-cloud-capsule/files/cloud_providers/ec2_cloud_template.j2
rename to ansible/configs/base-rosa/files/cloud_providers/ec2_cloud_template.j2
index d42f0f54419..f7055cc370f 100644
--- a/ansible/configs/multi-cloud-capsule/files/cloud_providers/ec2_cloud_template.j2
+++ b/ansible/configs/base-rosa/files/cloud_providers/ec2_cloud_template.j2
@@ -73,7 +73,7 @@ Resources:
SubnetId:
Ref: PublicSubnet
-{% for security_group in security_groups|list %}
+{% for security_group in security_groups|list + default_security_groups|list %}
{{security_group['name']}}:
Type: "AWS::EC2::SecurityGroup"
Properties:
@@ -85,7 +85,8 @@ Resources:
Value: "{{security_group['name']}}"
{% endfor %}
-{% for security_group in security_groups|list %}
+{% for security_group in default_security_groups|list + security_groups|list
+ if security_group.name in used_security_groups %}
{% for rule in security_group.rules %}
{{security_group['name']}}{{rule['name']}}:
Type: "AWS::EC2::SecurityGroup{{rule['rule_type']}}"
@@ -122,7 +123,9 @@ Resources:
HostedZoneConfig:
Comment: "{{ aws_comment }}"
-
+{% if secondary_stack is not defined
+ and aws_dns_create_public_zone | bool
+%}
DnsZonePublic:
Type: "AWS::Route53::HostedZone"
Properties:
@@ -135,11 +138,11 @@ Resources:
DependsOn:
- DnsZonePublic
Properties:
- {% if HostedZoneId is defined %}
+{% if HostedZoneId is defined %}
HostedZoneId: "{{ HostedZoneId }}"
- {% else %}
+{% else %}
HostedZoneName: "{{ aws_dns_zone_root }}"
- {% endif %}
+{% endif %}
RecordSets:
- Name: "{{ aws_dns_zone_public }}"
Type: NS
@@ -148,112 +151,119 @@ Resources:
"Fn::GetAtt":
- DnsZonePublic
- NameServers
-
+{% endif %}
{% for instance in instances %}
-{% if instance['dns_loadbalancer'] | d(false) | bool
- and not instance['unique'] | d(false) | bool %}
+{% if instance['dns_loadbalancer'] | default(false) | bool
+ and not instance['unique'] | default(false) | bool %}
{{instance['name']}}DnsLoadBalancer:
Type: "AWS::Route53::RecordSetGroup"
DependsOn:
- {% for c in range(1, (instance['count']|int)+1) %}
+{% for c in range(1, (instance['count']|int)+1) %}
- {{instance['name']}}{{c}}
- {% if instance['public_dns'] %}
+{% if instance['public_dns'] %}
- {{instance['name']}}{{c}}EIP
- {% endif %}
- {% endfor %}
+{% endif %}
+{% endfor %}
Properties:
+{% if aws_dns_create_public_zone | bool %}
+{% if secondary_stack is defined %}
+ HostedZoneName: "{{ aws_dns_zone_public }}"
+{% else %}
HostedZoneId:
Ref: DnsZonePublic
+{% endif %}
+{% else %}
+ HostedZoneName: "{{ aws_dns_zone_root }}"
+{% endif %}
RecordSets:
- Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
Type: A
TTL: {{ aws_dns_ttl_public }}
ResourceRecords:
-{% for c in range(1,(instance['count'] |int)+1) %}
+{% for c in range(1,(instance['count'] |int)+1) %}
- "Fn::GetAtt":
- {{instance['name']}}{{c}}
- PublicIp
-{% endfor %}
-{% endif %}
+{% endfor %}
+{% endif %}
-{% for c in range(1,(instance['count'] |int)+1) %}
+{% for c in range(1,(instance['count'] |int)+1) %}
{{instance['name']}}{{loop.index}}:
Type: "AWS::EC2::Instance"
Properties:
-{% if instance.name in agnosticd_images | default({}) %}
+{% if instance.name in agnosticd_images | default({}) %}
ImageId: {{ agnosticd_images[instance.name].image_id }}
-{% elif custom_image is defined %}
+{% elif custom_image is defined %}
ImageId: {{ custom_image.image_id }}
-{% else %}
+{% else %}
ImageId:
Fn::FindInMap:
- RegionMapping
- Ref: AWS::Region
- {{ instance.image | default(aws_default_image) }}
-{% endif %}
+{% endif %}
InstanceType: "{{instance['flavor'][cloud_provider]}}"
- KeyName: "{{instance.key_name | default(key_name)}}"
- {% if instance['UserData'] is defined %}
+ KeyName: "{{instance.key_name | default(ssh_provision_key_name) | default(key_name)}}"
+{% if instance['UserData'] is defined %}
{{instance['UserData']}}
- {% endif %}
+{% endif %}
- {% if instance['security_groups'] is defined %}
+{% if instance['security_groups'] is defined %}
SecurityGroupIds:
- {% for sg in instance.security_groups %}
+{% for sg in instance.security_groups %}
- Ref: {{ sg }}
- {% endfor %}
- {% else %}
+{% endfor %}
+{% else %}
SecurityGroupIds:
- Ref: DefaultSG
- {% endif %}
+{% endif %}
SubnetId:
Ref: PublicSubnet
Tags:
- {% if instance['unique'] | d(false) | bool %}
+{% if instance['unique'] | d(false) | bool %}
- Key: Name
Value: {{instance['name']}}
- Key: internaldns
- Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
- - Key: publicname
- Value: {{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{subdomain_base }}
- {% else %}
+ Value: {{instance['name']}}.{{aws_dns_zone_private_chomped}}
+{% else %}
- Key: Name
Value: {{instance['name']}}{{loop.index}}
- Key: internaldns
Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_private_chomped}}
- - Key: publicname
- Value: {{instance['name']}}{{loop.index}}.{{aws_dns_zone_public_prefix|d('')}}{{ subdomain_base}}
- {% endif %}
+{% endif %}
- Key: "owner"
Value: "{{ email | default('unknownuser') }}"
- Key: "Project"
Value: "{{project_tag}}"
- Key: "{{project_tag}}"
Value: "{{ instance['name'] }}"
- {% for tag in instance['tags'] %}
+{% for tag in instance['tags'] %}
- Key: {{tag['key']}}
Value: {{tag['value']}}
- {% endfor %}
+{% endfor %}
BlockDeviceMappings:
- {% if '/dev/sda1' not in instance.volumes|d([])|json_query('[].device_name')
- and '/dev/sda1' not in instance.volumes|d([])|json_query('[].name')
+{% if '/dev/sda1' not in instance.volumes | default([]) | json_query('[].device_name')
+ and '/dev/sda1' not in instance.volumes | default([]) | json_query('[].name')
%}
- DeviceName: "/dev/sda1"
Ebs:
VolumeSize: "{{ instance['rootfs_size'] | default(aws_default_rootfs_size) }}"
VolumeType: "{{ aws_default_volume_type }}"
- {% endif %}
- {% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
+{% endif %}
+{% for vol in instance.volumes|default([]) if vol.enable|d(true) %}
- DeviceName: "{{ vol.name | default(vol.device_name) }}"
Ebs:
- {% if cloud_provider in vol and 'type' in vol.ec2 %}
+{% if cloud_provider in vol and 'type' in vol.ec2 %}
VolumeType: "{{ vol[cloud_provider].type }}"
- {% else %}
+{% else %}
VolumeType: "{{ aws_default_volume_type }}"
- {% endif %}
+{% endif %}
+{% if vol.snapshot_id is defined %}
+ SnapshotId: "{{ vol.snapshot_id}}"
+{% endif %}
VolumeSize: "{{ vol.size }}"
- {% endfor %}
+{% endfor %}
{{instance['name']}}{{loop.index}}InternalDns:
Type: "AWS::Route53::RecordSetGroup"
@@ -261,11 +271,11 @@ Resources:
HostedZoneId:
Ref: DnsZonePrivate
RecordSets:
- {% if instance['unique'] | d(false) | bool %}
+{% if instance['unique'] | d(false) | bool %}
- Name: "{{instance['name']}}.{{aws_dns_zone_private}}"
- {% else %}
+{% else %}
- Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_private}}"
- {% endif %}
+{% endif %}
Type: A
TTL: {{ aws_dns_ttl_private }}
ResourceRecords:
@@ -273,7 +283,7 @@ Resources:
- {{instance['name']}}{{loop.index}}
- PrivateIp
-{% if instance['public_dns'] %}
+{% if instance['public_dns'] %}
{{instance['name']}}{{loop.index}}EIP:
Type: "AWS::EC2::EIP"
DependsOn:
@@ -287,29 +297,33 @@ Resources:
DependsOn:
- {{instance['name']}}{{loop.index}}EIP
Properties:
- {% if secondary_stack is defined %}
+{% if aws_dns_create_public_zone | bool %}
+{% if secondary_stack is defined %}
HostedZoneName: "{{ aws_dns_zone_public }}"
- {% else %}
+{% else %}
HostedZoneId:
Ref: DnsZonePublic
- {% endif %}
+{% endif %}
+{% else %}
+ HostedZoneName: "{{ aws_dns_zone_root }}"
+{% endif %}
RecordSets:
- {% if instance['unique'] | d(false) | bool %}
+{% if instance['unique'] | d(false) | bool %}
- Name: "{{instance['name']}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
- {% else %}
+{% else %}
- Name: "{{instance['name']}}{{loop.index}}.{{aws_dns_zone_public_prefix|d('')}}{{ aws_dns_zone_public }}"
- {% endif %}
+{% endif %}
Type: A
TTL: {{ aws_dns_ttl_public }}
ResourceRecords:
- "Fn::GetAtt":
- {{instance['name']}}{{loop.index}}
- PublicIp
-{% endif %}
-{% endfor %}
+{% endif %}
+{% endfor %}
{% endfor %}
-
+{% if secondary_stack is not defined %}
Route53User:
Type: AWS::IAM::User
Properties:
@@ -330,11 +344,15 @@ Resources:
- route53:ChangeResourceRecordSets
- route53:ListResourceRecordSets
- route53:GetHostedZone
+{% if aws_dns_create_public_zone %}
Resource:
Fn::Join:
- ""
- - "arn:aws:route53:::hostedzone/"
- Ref: DnsZonePublic
+{% else %}
+ Resource: "arn:aws:route53:::hostedzone/{{ HostedZoneId }}"
+{% endif %}
- Effect: Allow
Action: route53:GetChange
@@ -346,13 +364,45 @@ Resources:
Properties:
UserName:
Ref: Route53User
+{% endif %}
+
+ StudentUser:
+ Type: AWS::IAM::User
+ Properties:
+ UserName: "{{ email | default(owner) }}-{{ guid }}"
+ Policies:
+ - PolicyName: AccessAll
+ PolicyDocument:
+ Statement:
+ - Effect: Allow
+ Action: "*"
+ Resource: "*"
+ {% if sandbox_enable_ui | default(true) | bool %}
+ LoginProfile:
+ Password: {{ rosa_console_password | to_json }}
+ PasswordResetRequired: False
+ {% endif %}
+ Policies:
+ - PolicyName: AccessAll
+ PolicyDocument:
+ Statement:
+ - Effect: Allow
+ Action: "*"
+ Resource: "*"
+ StudentUserAccessKey:
+ DependsOn: StudentUser
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName:
+ Ref: StudentUser
Outputs:
Route53internalzoneOutput:
Description: The ID of the internal route 53 zone
Value:
Ref: DnsZonePrivate
+{% if secondary_stack is not defined %}
Route53User:
Value:
Ref: Route53User
@@ -367,3 +417,18 @@ Outputs:
- Route53UserAccessKey
- SecretAccessKey
Description: IAM User for Route53 (Let's Encrypt)
+{% endif %}
+ StudentUser:
+ Value:
+ Ref: StudentUser
+ Description: IAM User for Student
+ StudentUserAccessKey:
+ Value:
+ Ref: StudentUserAccessKey
+ Description: IAM User for Route53 (Let's Encrypt)
+ StudentUserSecretAccessKey:
+ Value:
+ Fn::GetAtt:
+ - StudentUserAccessKey
+ - SecretAccessKey
+ Description: IAM User for Route53 (Let's Encrypt)
diff --git a/ansible/configs/base-rosa/files/requirements_k8s.txt b/ansible/configs/base-rosa/files/requirements_k8s.txt
new file mode 100644
index 00000000000..657e5c2e584
--- /dev/null
+++ b/ansible/configs/base-rosa/files/requirements_k8s.txt
@@ -0,0 +1,188 @@
+adal==1.2.7
+ansible==2.9.27
+appdirs==1.4.4
+applicationinsights==0.11.10
+argcomplete==1.12.3
+asciitree==0.3.3
+attrs==21.2.0
+autopage==0.4.0
+awscli==1.22.17
+azure-cli-core==2.35.0
+azure-cli-nspkg==3.0.4
+azure-cli-telemetry==1.0.6
+azure-common==1.1.11
+azure-core==1.17.0
+azure-graphrbac==0.61.1
+azure-identity==1.9.0
+azure-keyvault==1.0.0a1
+azure-keyvault-certificates==4.3.0
+azure-keyvault-keys==4.4.0
+azure-keyvault-secrets==4.3.0
+azure-mgmt-apimanagement==0.2.0
+azure-mgmt-authorization==0.51.1
+azure-mgmt-automation==0.1.1
+azure-mgmt-compute==10.0.0
+azure-mgmt-containerinstance==1.4.0
+azure-mgmt-containerregistry==8.2.0
+azure-mgmt-containerservice==9.1.0
+azure-mgmt-core==1.2.0
+azure-mgmt-cosmosdb==0.5.2
+azure-mgmt-datalake-nspkg==2.0.0
+azure-mgmt-datalake-store==0.5.0
+azure-mgmt-devtestlabs==3.0.0
+azure-mgmt-dns==2.1.0
+azure-mgmt-eventhub==2.0.0
+azure-mgmt-hdinsight==0.1.0
+azure-mgmt-iothub==0.7.0
+azure-mgmt-keyvault==1.1.0
+azure-mgmt-loganalytics==1.0.0
+azure-mgmt-managedservices==1.0.0
+azure-mgmt-managementgroups==0.2.0
+azure-mgmt-marketplaceordering==0.1.0
+azure-mgmt-monitor==0.5.2
+azure-mgmt-network==12.0.0
+azure-mgmt-notificationhubs==2.0.0
+azure-mgmt-nspkg==2.0.0
+azure-mgmt-privatedns==0.1.0
+azure-mgmt-rdbms==1.9.0
+azure-mgmt-recoveryservices==0.4.0
+azure-mgmt-recoveryservicesbackup==0.6.0
+azure-mgmt-redis==5.0.0
+azure-mgmt-resource==10.2.0
+azure-mgmt-search==3.0.0
+azure-mgmt-servicebus==0.5.3
+azure-mgmt-sql==0.10.0
+azure-mgmt-storage==11.1.0
+azure-mgmt-trafficmanager==0.50.0
+azure-mgmt-web==0.41.0
+azure-nspkg==2.0.0
+azure-storage==0.35.1
+Babel==2.9.1
+bcrypt==3.2.0
+boto==2.49.0
+boto3==1.20.16
+botocore==1.23.17
+cachetools==4.2.2
+certifi==2021.5.30
+cffi==1.14.6
+chardet==4.0.0
+click==8.0.1
+cliff==3.9.0
+cmd2==2.1.2
+colorama==0.4.3
+configparser==5.0.2
+cryptography==3.3.2
+debtcollector==2.2.0
+decorator==5.0.9
+distro==1.6.0
+dnspython==2.1.0
+docutils==0.15.2
+dogpile.cache==1.1.3
+fabric==2.6.0
+google-auth==2.0.2
+humanfriendly==10.0
+idna==2.10
+importlib-metadata==4.8.1
+importlib-resources==5.2.2
+iniconfig==1.1.1
+invoke==1.6.0
+ipaddress==1.0.23
+iso8601==0.1.16
+isodate==0.6.0
+Jinja2==3.0.1
+jmespath==0.10.0
+jsonpatch==1.32
+jsonpointer==2.1
+jsonschema==3.2.0
+keepercommander==16.1.8
+keystoneauth1==4.3.1
+knack==0.9.0
+kubernetes==12.0.1
+libkeepass==0.3.1.post1
+lxml==4.6.3
+MarkupSafe==2.0.1
+msal==1.17.0
+msal-extensions==0.3.1
+msgpack==1.0.2
+msrest==0.6.21
+msrestazure==0.6.4
+munch==2.5.0
+netaddr==0.8.0
+netifaces==0.11.0
+oauthlib==3.1.1
+openshift==0.12.1
+openstacksdk==0.59.0
+os-client-config==2.1.0
+os-service-types==1.7.0
+osc-lib==2.4.2
+oslo.config==8.7.1
+oslo.context==3.3.1
+oslo.i18n==5.0.1
+oslo.log==4.6.0
+oslo.serialization==4.2.0
+oslo.utils==4.10.0
+packaging==21.0
+paramiko==2.7.2
+pathlib2==2.3.6
+pathspec==0.9.0
+pbr==5.6.0
+pkginfo==1.7.1
+pluggy==1.0.0
+portalocker==1.7.1
+prettytable==0.7.2
+prompt-toolkit==2.0.10
+protobuf==3.17.3
+psutil==5.9.0
+py==1.10.0
+pyasn1==0.4.8
+pyasn1-modules==0.2.8
+pycparser==2.20
+pycryptodome==3.10.1
+pycryptodomex==3.10.1
+Pygments==2.10.0
+pyinotify==0.9.6
+PyJWT==2.3.0
+PyNaCl==1.4.0
+pyOpenSSL==20.0.1
+pyparsing==2.4.7
+pyperclip==1.8.2
+pyrsistent==0.18.0
+PySocks==1.7.1
+pytest==6.2.5
+python-cinderclient==8.0.0
+python-dateutil==2.8.2
+python-glanceclient==3.5.0
+python-heatclient==2.3.0
+python-keystoneclient==4.2.0
+python-logstash==0.4.6
+python-neutronclient==7.5.0
+python-novaclient==17.5.0
+python-openstackclient==5.6.0
+python-string-utils==1.0.0
+python-swiftclient==3.12.0
+pytz==2021.1
+PyYAML==5.4.1
+requests==2.25.1
+requests-oauthlib==1.3.0
+requestsexceptions==1.4.0
+rfc3986==1.5.0
+rsa==4.7.2
+ruamel.yaml==0.17.16
+ruamel.yaml.clib==0.2.6
+s3transfer==0.5.0
+selinux==0.2.1
+simplejson==3.17.5
+six==1.16.0
+stevedore==3.4.0
+tabulate==0.8.9
+toml==0.10.2
+typing-extensions==3.10.0.2
+unicodecsv==0.14.1
+urllib3==1.26.6
+warlock==1.3.3
+wcwidth==0.2.5
+websocket-client==1.2.1
+wrapt==1.12.1
+xmltodict==0.12.0
+yamllint==1.26.3
+zipp==3.5.0
\ No newline at end of file
diff --git a/ansible/configs/base-rosa/post_infra.yml b/ansible/configs/base-rosa/post_infra.yml
new file mode 100644
index 00000000000..981991affc6
--- /dev/null
+++ b/ansible/configs/base-rosa/post_infra.yml
@@ -0,0 +1,17 @@
+---
+- name: Step 002 - Post Infrastructure
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step002
+ - post_infrastructure
+ tasks:
+ - name: get aws user credentials from stack outputs
+ when:
+ - cloudformation_out_final is defined
+ set_fact:
+ rosa_access_key_id: "{{ cloudformation_out_final.stack_outputs.StudentUserAccessKey }}"
+ rosa_secret_access_key: "{{ cloudformation_out_final.stack_outputs.StudentUserSecretAccessKey }}"
+ rosa_console_user_name: "{{ cloudformation_out_final.stack_outputs.StudentUser }}"
diff --git a/ansible/configs/base-rosa/post_software.yml b/ansible/configs/base-rosa/post_software.yml
new file mode 100644
index 00000000000..8a71b98cbf0
--- /dev/null
+++ b/ansible/configs/base-rosa/post_software.yml
@@ -0,0 +1,24 @@
+---
+- name: Step 005 Post Software
+ hosts: bastions
+ become: true
+ gather_facts: false
+ tasks:
+ - debug:
+ msg: "Post-Software Steps starting"
+
+# Deploy Workloads
+- name: Deploy Infra and Student Workloads
+ import_playbook: workloads.yml
+
+
+- name: PostSoftware flight-check
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tags:
+ - post_flight_check
+ tasks:
+ - debug:
+ msg: "Post-Software checks completed successfully"
diff --git a/ansible/configs/base-rosa/pre_infra.yml b/ansible/configs/base-rosa/pre_infra.yml
new file mode 100644
index 00000000000..d16040f60c5
--- /dev/null
+++ b/ansible/configs/base-rosa/pre_infra.yml
@@ -0,0 +1,14 @@
+---
+- name: Step 000 Pre Infrastructure
+ hosts:
+ - localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step001
+ - pre_infrastructure
+ tasks:
+ - debug:
+ msg: "Step 000 Pre Infrastructure - Starting"
+ - include_tasks: pre_infra_ec2.yml
diff --git a/ansible/configs/base-rosa/pre_infra_ec2.yml b/ansible/configs/base-rosa/pre_infra_ec2.yml
new file mode 100644
index 00000000000..126f0eb5929
--- /dev/null
+++ b/ansible/configs/base-rosa/pre_infra_ec2.yml
@@ -0,0 +1,19 @@
+---
+- name: Set rosa console password
+ set_fact:
+ rosa_console_password: >-
+ {{ lookup('community.general.random_string',
+ length=12, min_lower=1, min_upper=1, special=false,
+ min_numeric=1) }}
+
+- name: Get the current caller identity information
+ environment:
+ AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
+ AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
+ AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
+ aws_caller_info:
+ register: _caller_info
+
+- name: Set account ID
+ set_fact:
+ sandbox_account_id: "{{ _caller_info.account }}"
diff --git a/ansible/configs/base-rosa/pre_software.yml b/ansible/configs/base-rosa/pre_software.yml
new file mode 100644
index 00000000000..23585a5a2dc
--- /dev/null
+++ b/ansible/configs/base-rosa/pre_software.yml
@@ -0,0 +1,49 @@
+---
+# Cloudformation or Heat template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
+- name: Configure all hosts with Repositories, Common Files and Set environment key
+ hosts: all
+ become: true
+ gather_facts: false
+ tags:
+ - step004
+ - common_tasks
+ roles:
+ - role: common
+ when: install_common | default( true ) | bool
+ tasks:
+ - name: Add GUID to /etc/skel/.bashrc
+ lineinfile:
+ path: "/etc/skel/.bashrc"
+ regexp: "^export GUID"
+ line: "export GUID={{ guid }}"
+
+- name: Create a Python3 VirtualEnv for use in the k8s Ansible tasks
+ hosts: bastions
+ gather_facts: false
+ become: true
+ tasks:
+ - name: Setup k8s virtualenv
+ include_role:
+ name: host_virtualenv
+ vars:
+ host_virtualenv_path: /opt/virtualenvs/k8s
+ # Merge base k8s requirements with cloud provider specific requirements
+ host_virtualenv_requirements:
+ - requirements_k8s.txt
+
+ - name: Install git
+ package:
+ state: present
+ name:
+ - git
+
+- name: PreSoftware flight-check
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tags:
+ - flight_check
+ tasks:
+ - debug:
+ msg: "Pre-Software checks completed successfully"
diff --git a/ansible/configs/base-rosa/requirements.yml b/ansible/configs/base-rosa/requirements.yml
new file mode 100644
index 00000000000..762c4fe0d0e
--- /dev/null
+++ b/ansible/configs/base-rosa/requirements.yml
@@ -0,0 +1,12 @@
+---
+collections:
+- name: kubernetes.core
+ version: 2.3.0
+- name: amazon.aws
+ version: 2.2.0
+- name: community.general
+ version: 4.6.1
+- name: ansible.posix
+ version: 1.3.0
+- name: community.okd
+ version: 2.3.0
diff --git a/ansible/configs/base-rosa/software.yml b/ansible/configs/base-rosa/software.yml
new file mode 100644
index 00000000000..93c671171dd
--- /dev/null
+++ b/ansible/configs/base-rosa/software.yml
@@ -0,0 +1,279 @@
+---
+- name: Set up bastion
+ hosts: bastions
+ gather_facts: false
+ become: true
+ tasks:
+ - name: Generate user password if not defined
+ set_fact:
+ rosa_user_password: >-
+ {{ lookup('password', '/dev/null length={{ bastion_user_password_length }} chars=ascii_letters,digits') }}
+
+ - name: Create user with password
+ become: true
+ user:
+ state: present
+ name: "{{ bastion_user_name }}"
+ password: "{{ rosa_user_password | password_hash( 'sha512' ) }}"
+ password_lock: false
+ comment: ROSA User
+ group: users
+ groups: "{{ 'wheel' if bastion_user_enable_sudo | bool else '' }}"
+ shell: /bin/bash
+
+ - name: Enable password authentication
+ become: true
+ lineinfile:
+ line: PasswordAuthentication yes
+ regexp: '^ *PasswordAuthentication'
+ path: /etc/ssh/sshd_config
+
+ - name: Restart sshd
+ become: true
+ service:
+ name: sshd
+ state: restarted
+
+- name: Step 00xxxxx software
+ hosts: bastions
+ gather_facts: false
+ become: false
+ environment:
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ tasks:
+ - tags:
+ - install_studentvm_role
+ block:
+ - name: Run student role
+ when: studentvm_roles | default( "" ) | length > 0
+ include_role:
+ name: "{{ studentvm_role }}"
+ loop: "{{ studentvm_roles }}"
+ loop_control:
+ loop_var: studentvm_role
+
+ - tags:
+ - install_awscli
+ block:
+ - name: Get awscli bundle
+ get_url:
+ url: https://s3.amazonaws.com/aws-cli/awscli-bundle-1.18.200.zip
+ dest: /tmp/awscli-bundle.zip
+ - name: Unzip awscli-bundle.zip
+ unarchive:
+ src: /tmp/awscli-bundle.zip
+ dest: /tmp/
+ remote_src: true
+ - name: Install awscli
+ command: /tmp/awscli-bundle/install -i /usr/local/aws -b /bin/aws
+ args:
+ creates: /usr/local/aws
+ become: true
+ - name: cleanup archive and tmp files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /tmp/awscli-bundle
+ - /tmp/awscli-bundle.zip
+
+ - tags:
+ - create_aws_dir
+ block:
+ - name: Create .aws directory
+ file:
+ path: ~/.aws
+ state: directory
+
+ - tags:
+ - create_aws_creds
+ block:
+ - name: Add aws credentials
+ blockinfile:
+ path: ~/.aws/credentials
+ create: true
+ mode: 0600
+ block: |-
+ [default]
+ aws_access_key_id={{ hostvars.localhost.rosa_access_key_id }}
+ aws_secret_access_key={{ hostvars.localhost.rosa_secret_access_key }}
+
+ - tags:
+ - create_aws_config
+ block:
+ - name: Add aws config
+ blockinfile:
+ path: ~/.aws/config
+ create: true
+ mode: 0600
+ block: |-
+ [default]
+ region={{ aws_region }}
+
+ - tags:
+ - install_rosacli
+ block:
+ - name: Get ROSA CLI
+ get_url:
+ url: "{{ rosa_installer_url }}"
+ dest: /tmp/rosa-linux.tar.gz
+ - name: Unzip rosa-linux.tar.gz
+ unarchive:
+ src: /tmp/rosa-linux.tar.gz
+ dest: /usr/local/bin/
+ remote_src: true
+ become: true
+ - name: cleanup archive file
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /tmp/rosa-linux.tar.gz
+
+ - tags:
+ - verify_rosa_installer
+ block:
+ - set_fact:
+ rosa_token: "{{ gpte_rosa_token }}"
+ when: rosa_token == ""
+ - name: Log into ROSA
+ command: "/usr/local/bin/rosa login --token {{ rosa_token }}"
+ - name: Init AWS account for ROSA
+ command: "/usr/local/bin/rosa init"
+ - name: Verify permissions for ROSA
+ command: "/usr/local/bin/rosa verify permissions"
+ - name: Verify quota for ROSA
+ command: "/usr/local/bin/rosa verify quota"
+
+ - tags:
+ - run_rosa_installer
+ block:
+ - name: Create ROSA Cluster
+ command: "/usr/local/bin/rosa create cluster --cluster-name={{ rosa_cluster_name }} --compute-machine-type={{ rosa_machine_type }} --compute-nodes={{ rosa_wroker_nodes }}"
+ register: _r_create_cluster
+ until: _r_create_cluster.rc == 0
+ retries: 5
+ delay: 30
+
+ - tags:
+ - wait_rosa_installer
+ block:
+ - name: Wait 5 minutes for the ROSA installer to start
+ pause:
+ minutes: 5
+ - name: Check for ROSA installer completion
+ shell: "/usr/local/bin/rosa describe cluster -c {{ rosa_cluster_name }} |grep ^State:|awk '{print $2}'"
+ register: rosa_installer_status
+ until: rosa_installer_status.stdout.find("ready") != -1
+ retries: 120
+ delay: 60
+
+ - tags:
+ - get_rosa_console_url
+ block:
+ - name: Get ROSA Console URL
+ shell: "/usr/local/bin/rosa describe cluster -c {{ rosa_cluster_name }} |grep '^Console URL:'|awk '{print $3}'"
+ register: rosa_console_url
+
+ - tags:
+ - create_rosa_admin
+ block:
+ - name: Create ROSA admin user
+ shell: "/usr/local/bin/rosa create admin --cluster={{ rosa_cluster_name }} |grep 'oc login' | awk '{print $7}'"
+ register: rosa_admin_result
+ - name: Create .config/ocm directory in rosa user homedir
+ become: true
+ file:
+ path: "~{{ bastion_user_name }}/.config/ocm"
+ owner: "{{ bastion_user_name }}"
+ state: directory
+ - name: Copy ROSA token to ec2 user dir
+ become: true
+ ansible.builtin.copy:
+ src: /home/ec2-user/.config/ocm/ocm.json
+ dest: "~{{ bastion_user_name }}/.config/ocm/ocm.json"
+ owner: "{{ bastion_user_name }}"
+ mode: '0600'
+ remote_src: true
+ - name: Create .aws directory in rosa user homedir
+ become: true
+ file:
+ path: "~{{ bastion_user_name }}/.aws"
+ owner: "{{ bastion_user_name }}"
+ state: directory
+ - name: Copy AWS credentials to rosa user dir
+ become: true
+ ansible.builtin.copy:
+ src: /home/ec2-user/.aws/credentials
+ dest: "~{{ bastion_user_name }}/.aws/credentials"
+ owner: "{{ bastion_user_name }}"
+ remote_src: true
+ - name: Copy AWS config to rosa user dir
+ become: true
+ ansible.builtin.copy:
+ src: /home/ec2-user/.aws/config
+ dest: "~{{ bastion_user_name }}/.aws/config"
+ owner: "{{ bastion_user_name }}"
+ remote_src: true
+
+ - when:
+ - rosa_admin_result is defined
+ - rosa_console_url is defined
+ block:
+ - name: Set ROSA token warning boolean true
+ when: rosa_token == gpte_rosa_token
+ set_fact:
+ rosa_token_warning: true
+
+ - name: Set ROSA token warning boolean false
+ when: rosa_token != gpte_rosa_token
+ set_fact:
+ rosa_token_warning: false
+
+ - name: Save ansible vars to user_info data
+ agnosticd_user_info:
+ data:
+ rosa_sandbox_account_id: "{{ sandbox_account_id }}"
+ rosa_console_user_name: "{{ hostvars.localhost.rosa_console_user_name }}"
+ rosa_console_password: "{{ hostvars.localhost.rosa_console_password }}"
+ rosa_bastion_user_name: "{{ bastion_user_name }}"
+ rosa_subdomain_base: "{{ subdomain_base }}"
+ rosa_user_password: "{{ rosa_user_password }}"
+ rosa_console_url: "{{ rosa_console_url.stdout }}"
+ rosa_admin_password: "{{ rosa_admin_result.stdout }}"
+ rosa_token_warning: "{{ rosa_token_warning }}"
+
+ - name: Print ROSA admin credentials as user.info
+ agnosticd_user_info:
+ msg: |
+
+ *NOTE:* With great power comes great responsibility. We monitor usage.
+
+ == AWS web console access:
+ * URL: https://{{ sandbox_account_id }}.signin.aws.amazon.com/console
+ * User: {{ hostvars.localhost.rosa_console_user_name }}
+ * Password: {{ hostvars.localhost.rosa_console_password }}
+
+ *IMPORTANT:* Please be very careful to not expose AWS credentials in GIT repos or anywhere else that could be public!
+ If your credentials are compromised, your environment will be deleted without warning.
+
+ == Bastion SSH access:
+ * ssh {{ bastion_user_name }}@bastion.{{ subdomain_base }}
+ * Password: {{ rosa_user_password }}
+ * Your AWS credentials are preconfigured in `~/.aws/credentials` on the bastion host.
+ * The ROSA CLI is preinstalled on the bastion host in `/usr/local/bin`. There is no need to use root.
+
+ == OpenShift console access:
+ * URL: {{ rosa_console_url.stdout }}
+
+ - name: Print ROSA token warning
+ when: rosa_token_warning
+ agnosticd_user_info:
+ msg: |
+
+ *IMPORTANT:* You did not provide a ROSA token.
+
+ This is fine as long as you do not need to access the managment console at
+ https://console.redhat.com/openshift. It is recommended that you generate and provide your own ROSA token when deploying
+ this catalog item so that you have full functionality and control of your cluster. You can generate a rosa token from
+ your Red Hat console account here: https://console.redhat.com/openshift/token/rosa
diff --git a/ansible/configs/base-rosa/templates/agnosticd_user_info_upload.yaml.j2 b/ansible/configs/base-rosa/templates/agnosticd_user_info_upload.yaml.j2
new file mode 100644
index 00000000000..9ea1416800a
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/agnosticd_user_info_upload.yaml.j2
@@ -0,0 +1,26 @@
+{% if _userdata is defined %}
+- name: Update user data
+ agnosticd_user_info:
+ data:
+ {{ _userdata | to_nice_yaml(indent=2) | indent(6) }}
+{% else %}
+
+- name: Debug
+ debug:
+ msg: _userdata does not exist
+{% endif %}
+
+{% if _userinfo is defined %}
+- name: Print Access information
+ agnosticd_user_info:
+ msg: "{% raw %}{{ item }}{% endraw %}"
+
+ with_items:
+ {{ _userinfo | indent(4) }}
+
+{% else %}
+
+- name: Debug
+ debug:
+ msg: "_userinfo does not exist"
+{% endif %}
\ No newline at end of file
diff --git a/ansible/configs/base-rosa/templates/bastion_ssh_config.j2 b/ansible/configs/base-rosa/templates/bastion_ssh_config.j2
new file mode 100644
index 00000000000..a1ecb913db4
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/bastion_ssh_config.j2
@@ -0,0 +1,11 @@
+{% if cloud_provider == 'ec2' %}
+Host ec2* *.internal
+{% elif cloud_provider == 'osp' %}
+Host *.example.com
+{% endif %}
+ User {{ ansible_user }}
+ IdentityFile ~/.ssh/{{ env_authorized_key }}.pem
+ ForwardAgent yes
+ StrictHostKeyChecking no
+ ConnectTimeout 60
+ ConnectionAttempts 10
diff --git a/ansible/configs/base-rosa/templates/demo-operator-catalog-source.yaml b/ansible/configs/base-rosa/templates/demo-operator-catalog-source.yaml
new file mode 100644
index 00000000000..ac386b8fb73
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/demo-operator-catalog-source.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: CatalogSource
+metadata:
+ name: demo-provisioner-catalog
+ namespace: demo-provisioner-operator-system
+spec:
+ displayName: RHDP Demo Provisioner
+ publisher: Red Hat
+ sourceType: grpc
+ image: quay.io/redhat-gpte/demo-operator-catalog:2.0.0
+ updateStrategy:
+ registryPoll:
+ interval: 10m
diff --git a/ansible/configs/base-rosa/templates/demo-operator-namespace.yaml b/ansible/configs/base-rosa/templates/demo-operator-namespace.yaml
new file mode 100644
index 00000000000..ac769f0b443
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/demo-operator-namespace.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ control-plane: controller-manager
+ app.kubernetes.io/name: namespace
+ app.kubernetes.io/instance: system
+ app.kubernetes.io/component: manager
+ app.kubernetes.io/created-by: demo-provisioner-operator
+ app.kubernetes.io/part-of: demo-provisioner-operator
+ app.kubernetes.io/managed-by: kustomize
+ name: demo-provisioner-operator-system
diff --git a/ansible/configs/base-rosa/templates/demo-operator-operator-group.yaml b/ansible/configs/base-rosa/templates/demo-operator-operator-group.yaml
new file mode 100644
index 00000000000..7abb1672bed
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/demo-operator-operator-group.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: demo-provisioner-og
+ namespace: demo-provisioner-operator-system
+spec:
+ targetNamespaces:
+ - demo-provisioner-operator-system
diff --git a/ansible/configs/base-rosa/templates/demo-operator-subscription.yaml b/ansible/configs/base-rosa/templates/demo-operator-subscription.yaml
new file mode 100644
index 00000000000..b64bf08430f
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/demo-operator-subscription.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: demo-provisioner-subscription
+ namespace: demo-provisioner-operator-system
+spec:
+ channel: "alpha"
+ installPlanApproval: Automatic
+ name: demo-provisioner-operator
+ source: demo-provisioner-catalog
+ sourceNamespace: demo-provisioner-operator-system
+ startingCSV: demo-provisioner-operator.v0.0.1
diff --git a/ansible/configs/base-rosa/templates/demo-workshop-install.yaml.j2 b/ansible/configs/base-rosa/templates/demo-workshop-install.yaml.j2
new file mode 100644
index 00000000000..20a6bb4feb2
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/demo-workshop-install.yaml.j2
@@ -0,0 +1,30 @@
+---
+apiVersion: demos.redhat.com/v1
+kind: Demo
+metadata:
+ labels:
+ app.kubernetes.io/created-by: demo-provisioner-operator
+ app.kubernetes.io/instance: demo-sample
+ app.kubernetes.io/managed-by: kustomize
+ app.kubernetes.io/name: demo
+ app.kubernetes.io/part-of: demo-provisioner-operator
+ name: "{{ demo_instance_name }}"
+ namespace: demo-provisioner-operator-system
+spec:
+ agnosticD:
+ branch: "{{ scm_ref }}"
+ repo: 'https://github.com/redhat-cop/agnosticd.git'
+ name: "{{ demo_name }}"
+ extraVars:
+ output_dir: "/tmp"
+ num_users: {{ num_users }}
+ ocp4_workload_authentication_rosa_admin_user: admin
+ ocp4_workload_authentication_rosa_admin_password: Openshift@1
+ ocp4_workload_generate_kubeconfig_openshift_username: cluster-admin
+ ocp4_workload_generate_kubeconfig_openshift_password: "{{ rosa_admin_result.stdout }}"
+ ocp4_workload_generate_kubeconfig_openshift_api_url: "{{ rosa_api_server_url }}"
+ guid: "{{ guid | default(omit) }}"
+ ocp4_workload_authentication_rosa_aws_access_key_id: {{ aws_access_key_id }}
+ ocp4_workload_authentication_rosa_aws_region: {{ aws_region }}
+ ocp4_workload_authentication_rosa_aws_secret_access_key: {{ aws_secret_access_key }}
+ ocp4_workload_authentication_rosa_token: {{ gpte_rosa_token | default(omit) }}
diff --git a/ansible/configs/base-rosa/templates/kubeconfig.j2 b/ansible/configs/base-rosa/templates/kubeconfig.j2
new file mode 100644
index 00000000000..a2e751ac5f0
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/kubeconfig.j2
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Config
+
+clusters:
+- cluster:
+ server: {{ _r_kube_auth.k8s_auth.host }}
+ name: rosa
+
+contexts:
+- context:
+ cluster: rosa
+ user: {{ _r_kube_auth.k8s_auth.username }}
+ name: admin
+
+current-context: admin
+
+users:
+- name: {{ _r_kube_auth.k8s_auth.username }}
+ user:
+ token: {{ _r_kube_auth.k8s_auth.api_key }}
\ No newline at end of file
diff --git a/ansible/configs/base-rosa/templates/project-request-template.yaml b/ansible/configs/base-rosa/templates/project-request-template.yaml
new file mode 100644
index 00000000000..c138594909e
--- /dev/null
+++ b/ansible/configs/base-rosa/templates/project-request-template.yaml
@@ -0,0 +1,122 @@
+---
+apiVersion: template.openshift.io/v1
+kind: Template
+metadata:
+ name: project-request
+ namespace: openshift-config
+objects:
+- apiVersion: networking.k8s.io/v1
+ kind: NetworkPolicy
+ metadata:
+ name: allow-from-all-namespaces
+ spec:
+ ingress:
+ - from:
+ - namespaceSelector: {}
+ podSelector: {}
+- apiVersion: networking.k8s.io/v1
+ kind: NetworkPolicy
+ metadata:
+ name: allow-from-default-namespace
+ spec:
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ name: default
+ podSelector: null
+- apiVersion: v1
+ kind: LimitRange
+ metadata:
+ name: ${PROJECT_NAME}-core-resource-limits
+ spec:
+ limits:
+ - default:
+ cpu: 500m
+ memory: 1.5Gi
+ defaultRequest:
+ cpu: 50m
+ memory: 256Mi
+ max:
+ memory: 16Gi
+ min:
+ memory: 6Mi
+ type: Container
+ - max:
+ memory: 32Gi
+ min:
+ memory: 6Mi
+ type: Pod
+- apiVersion: v1
+ kind: Project
+ metadata:
+ annotations:
+ openshift.io/description: ${PROJECT_DESCRIPTION}
+ openshift.io/display-name: ${PROJECT_DISPLAYNAME}
+ openshift.io/requester: ${PROJECT_REQUESTING_USER}
+ creationTimestamp: null
+ name: ${PROJECT_NAME}
+ spec: {}
+ status: {}
+- apiVersion: v1
+ groupNames:
+ - system:serviceaccounts:${PROJECT_NAME}
+ kind: RoleBinding
+ metadata:
+ creationTimestamp: null
+ name: system:image-pullers
+ namespace: ${PROJECT_NAME}
+ roleRef:
+ name: system:image-puller
+ subjects:
+ - kind: SystemGroup
+ name: system:serviceaccounts:${PROJECT_NAME}
+ userNames: null
+- apiVersion: v1
+ groupNames: null
+ kind: RoleBinding
+ metadata:
+ creationTimestamp: null
+ name: system:image-builders
+ namespace: ${PROJECT_NAME}
+ roleRef:
+ name: system:image-builder
+ subjects:
+ - kind: ServiceAccount
+ name: builder
+ userNames:
+ - system:serviceaccount:${PROJECT_NAME}:builder
+- apiVersion: v1
+ groupNames: null
+ kind: RoleBinding
+ metadata:
+ creationTimestamp: null
+ name: system:deployers
+ namespace: ${PROJECT_NAME}
+ roleRef:
+ name: system:deployer
+ subjects:
+ - kind: ServiceAccount
+ name: deployer
+ userNames:
+ - system:serviceaccount:${PROJECT_NAME}:deployer
+- apiVersion: v1
+ groupNames: null
+ kind: RoleBinding
+ metadata:
+ creationTimestamp: null
+ name: admin
+ namespace: ${PROJECT_NAME}
+ roleRef:
+ name: admin
+ subjects:
+ - kind: User
+ name: ${PROJECT_ADMIN_USER}
+ userNames:
+ - ${PROJECT_ADMIN_USER}
+parameters:
+- name: PROJECT_NAME
+- name: PROJECT_DISPLAYNAME
+- name: PROJECT_DESCRIPTION
+- name: PROJECT_ADMIN_USER
+- name: PROJECT_REQUESTING_USER
diff --git a/ansible/configs/base-rosa/workloads.yml b/ansible/configs/base-rosa/workloads.yml
new file mode 100644
index 00000000000..ee2d6435192
--- /dev/null
+++ b/ansible/configs/base-rosa/workloads.yml
@@ -0,0 +1,116 @@
+---
+# Workloads are being run on bastion.
+# This enables using the k8s module in the workload.
+# openshift python module is installed for Python3
+- name: Install workloads
+ hosts: bastions
+ gather_facts: false
+ run_once: true
+ become: false
+ tasks:
+ - name: Set Ansible Python interpreter to k8s virtualenv
+ set_fact:
+ ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python
+
+ - name: Generate cluster api
+ set_fact:
+ rosa_api_server_url: "https://api{{ rosa_console_url.stdout | regex_search('(?<=\\.apps).*') }}:6443"
+
+ - name: Run authentication
+ community.okd.openshift_auth:
+ validate_certs: false
+ host: "{{ rosa_api_server_url }}"
+ username: cluster-admin
+ password: "{{ rosa_admin_result.stdout }}"
+ register: _r_kube_auth
+ retries: 30
+ delay: 120
+ until:
+ - _r_kube_auth is defined
+ - _r_kube_auth.k8s_auth is defined
+ - _r_kube_auth.k8s_auth.api_key is defined
+
+ - name: Create a directory if it does not exist
+ ansible.builtin.file:
+ path: ~/.kube
+ state: directory
+ mode: '0755'
+
+ - name: generate kubeconfig
+ template:
+ src: templates/kubeconfig.j2
+ dest: ~/.kube/config
+
+ - name: Install ocp-student-workloads
+ when:
+ - user_count | default(0) | int > 0
+ - student_workloads | default("") | length > 0
+ tags:
+ - student_workloads
+ block:
+ - name: Check if authentication mechanism is set to htpasswd
+ when: install_idm | default("") != "htpasswd"
+ fail:
+ msg: Authentication Mechanism must be htpasswd
+
+ - name: Generate list of User IDs
+ set_fact:
+ users: "{{ lookup('sequence', 'start=1 end={{ user_count|int }}', wantlist=true) | map('int') | list }}"
+
+ - name: Deploy ocp-student-workloads for each user ID
+ include_role:
+ name: "{{ workload_loop_var[1] }}"
+ vars:
+ ocp_username: "user{{ workload_loop_var[0] }}"
+ become_override: true
+ ACTION: "provision"
+ loop: "{{ users | product(student_workloads) | list }}"
+ loop_control:
+ loop_var: workload_loop_var
+
+ - name: install ocp-infra-workloads
+ vars:
+ ACTION: "provision"
+ ocp_username: "system:admin"
+ # Variables defined for running infra workloads
+ output_dir: "/tmp"
+ num_users: "{{ num_users }}"
+ ocp4_workload_authentication_rosa_admin_user: admin
+ ocp4_workload_authentication_rosa_admin_password: Openshift@1
+ ocp4_workload_generate_kubeconfig_openshift_username: cluster-admin
+ ocp4_workload_generate_kubeconfig_openshift_password: "{{ rosa_admin_result.stdout }}"
+ ocp4_workload_generate_kubeconfig_openshift_api_url: "{{ rosa_api_server_url }}"
+ guid: "{{ guid | default(omit) }}"
+ ocp4_workload_authentication_rosa_aws_access_key_id: "{{ aws_access_key_id }}"
+ ocp4_workload_authentication_rosa_aws_region: "{{ aws_region }}"
+ ocp4_workload_authentication_rosa_aws_secret_access_key: "{{ aws_secret_access_key }}"
+ ocp4_workload_authentication_rosa_token: "{{ gpte_rosa_token | default(omit) }}"
+ ansible.builtin.include_role:
+ name: "{{ workload_loop_var }}"
+ loop: "{{ infra_workloads }}"
+ loop_control:
+ loop_var: workload_loop_var
+
+ - name: Check validatingwebhooconfiguration sre-namespace-validation exists.
+ k8s_info:
+ api_version: admissionregistration.k8s.io/v1
+ kind: ValidatingWebhookConfiguration
+ register: r_failed_validation
+ until: "{{ r_failed_validation.resources | json_query('[?metadata.name == `sre-namespace-validation`]') }}"
+ retries: 60
+ delay: 10
+
+ - name: Remove restricted operations on ROSA clusters from validatingwebhookconfiguration.
+ shell: |
+ oc login --insecure-skip-tls-verify=true -u cluster-admin -p {{ rosa_admin_result.stdout }} {{ rosa_api_server_url }}
+ sleep 10
+ oc delete validatingwebhookconfiguration sre-namespace-validation
+
+ - name: Update project template
+ k8s:
+ state: present
+ definition: "{{ lookup('template', 'templates/project-request-template.yaml' ) | from_yaml }}"
+ validate_certs: false
+ register: r_project_template
+ retries: 2
+ delay: 5
diff --git a/ansible/configs/convert-to-rhel/post_software.yml b/ansible/configs/convert-to-rhel/post_software.yml
index 73056e99c6f..398626a225f 100644
--- a/ansible/configs/convert-to-rhel/post_software.yml
+++ b/ansible/configs/convert-to-rhel/post_software.yml
@@ -74,10 +74,12 @@
ssh_command: "ssh {{ ansible_service_account_user_name }}@{{ groups['bastions'][0] | regex_replace('\\..*$') }}.{{ guid }}{{ agnosticd_domain_name }}"
ssh_password: "{{ student_password }}"
- - name: Deploy Bookbag
- ansible.builtin.include_role:
- name: ocp4_workload_bookbag
-
+ - name: Deploy Bookbag
+ when: bookbag_git_repo is defined
+ include_role:
+ name: bookbag
+ vars:
+ ACTION: create
- name: PostSoftware flight-check
hosts: localhost
diff --git a/ansible/configs/hands-on-with-openshift-virtualization/default_vars.yml b/ansible/configs/hands-on-with-openshift-virtualization/default_vars.yml
index 8a867f1bb58..fb9cc2fb90e 100644
--- a/ansible/configs/hands-on-with-openshift-virtualization/default_vars.yml
+++ b/ansible/configs/hands-on-with-openshift-virtualization/default_vars.yml
@@ -27,8 +27,6 @@ multi_network_primary: "{{ guid }}-external-network"
osp_migration_report_labconsole: true
osp_migration_labconsole_url: https://console.apps.open.redhat.com/
-ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
-
########################
## Quotas ##
########################
diff --git a/ansible/configs/hands-on-with-openshift-virtualization/pre_infra.yml b/ansible/configs/hands-on-with-openshift-virtualization/pre_infra.yml
index 1ee75257113..3b39ebc802b 100644
--- a/ansible/configs/hands-on-with-openshift-virtualization/pre_infra.yml
+++ b/ansible/configs/hands-on-with-openshift-virtualization/pre_infra.yml
@@ -1,27 +1,56 @@
+---
- name: Step 000 Pre Infrastructure
hosts: localhost
connection: local
become: false
tags:
- - step001
- - pre_infrastructure
+ - step001
+ - pre_infrastructure
tasks:
- - name: Create migration host group
- add_host:
- name: "{{ import_host }}"
- ansible_become: true
- ansible_ssh_private_key_file: "{{ migration_key_path | default(omit) }}"
- ansible_user: "opentlc-mgr"
- bastion: "{{ import_host }}"
- group: "migration"
- output_dir: "{{ output_dir }}"
- remote_user: "opentlc-mgr"
+ - when: target_host is mapping
+ block:
+ - when:
+ - '"ansible_ssh_private_key_content" in target_host'
+ - '"ansible_ssh_private_key_file" in target_host'
+ fail:
+ msg: You cannot set both ansible_ssh_private_key_content and ansible_ssh_private_key_file
+ - when: '"ansible_ssh_private_key_content" in target_host'
+ block:
+ - name: Prepare ssh_key from provided content
+ copy:
+ content: "{{ target_host.ansible_ssh_private_key_content }}"
+ dest: "{{ output_dir }}/ssh_key.pem"
+ mode: 0600
+
+ - set_fact:
+ target_host_ansible_ssh_private_key_file: "{{ output_dir }}/ssh_key.pem"
+
+ - name: Add migration host to inventory
+ add_host:
+ name: >-
+ {{
+ target_host.name
+ | default(target_host.hostname)
+ | default(target_host.ansible_host)
+ }}
+ ansible_host: "{{ target_host.ansible_host | default(omit) }}"
+ group: migration
+ ansible_user: "{{ target_host.ansible_user | default(omit) }}"
+ ansible_port: "{{ target_host.ansible_port | default(omit) }}"
+ ansible_ssh_private_key_file: >-
+ {{ target_host.ansible_ssh_private_key_file
+ | default(target_host_ansible_ssh_private_key_file)
+ | default(omit) }}
+ ansible_ssh_extra_args: "{{ target_host.ansible_ssh_extra_args | default(omit) }}"
+ ansible_ssh_pipelining: true
+ ansible_become: true
+ ansible_python_interpreter: /root/virtualenvs/python3.8-migration/bin/python
- name: Download images from IBM Cloud when is production
hosts: migration
gather_facts: false
tasks:
- - import_role:
- name: infra-osp-download-images
- when: purpose == "production"
+ - import_role:
+ name: infra-osp-download-images
+ when: purpose == "production"
diff --git a/ansible/configs/hands-on-with-openshift-virtualization/requirements.yml b/ansible/configs/hands-on-with-openshift-virtualization/requirements.yml
index 3c8c3e20862..a1a64d247ee 100644
--- a/ansible/configs/hands-on-with-openshift-virtualization/requirements.yml
+++ b/ansible/configs/hands-on-with-openshift-virtualization/requirements.yml
@@ -5,4 +5,4 @@ collections:
- name: community.general
version: 4.6.1
- name: openstack.cloud
- version: 1.7.2
+ version: 2.1.0
diff --git a/ansible/configs/hands-on-with-openshift-virtualization/software.yml b/ansible/configs/hands-on-with-openshift-virtualization/software.yml
index 6a3263b0b09..24cc6fa5ac5 100644
--- a/ansible/configs/hands-on-with-openshift-virtualization/software.yml
+++ b/ansible/configs/hands-on-with-openshift-virtualization/software.yml
@@ -365,7 +365,7 @@
- name: Configure Logical Volumes
shell: "/usr/local/bin/oc create -f 2_logical_volume.yaml"
retries: 30
- delay: 30
+ delay: 60
register: result
until: result.rc == 0
args:
diff --git a/ansible/configs/hands-on-with-openshift-virtualization/templates/install-config.yaml.j2 b/ansible/configs/hands-on-with-openshift-virtualization/templates/install-config.yaml.j2
index 0a88ca7cdda..44028e872a7 100644
--- a/ansible/configs/hands-on-with-openshift-virtualization/templates/install-config.yaml.j2
+++ b/ansible/configs/hands-on-with-openshift-virtualization/templates/install-config.yaml.j2
@@ -90,4 +90,4 @@ platform:
sshKey: '{{ ssh_key_pub.stdout }}'
-pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K3JocGRzYWRtaW5zcmVkaGF0Y29tMWZyZ3NpZHV6cTJkem5zajNpdzBhdG1samg3OjJMSTFEVTM1MFVCQks1ODRCTFVBODBFTTU1V0RQRDNXRDI0Qko2Q0I5VzNFSFIzS0pSSFhOSFgyVllNMlFFMVQ=","email":"rhpds-admins@redhat.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K3JocGRzYWRtaW5zcmVkaGF0Y29tMWZyZ3NpZHV6cTJkem5zajNpdzBhdG1samg3OjJMSTFEVTM1MFVCQks1ODRCTFVBODBFTTU1V0RQRDNXRDI0Qko2Q0I5VzNFSFIzS0pSSFhOSFgyVllNMlFFMVQ=","email":"rhpds-admins@redhat.com"},"registry.connect.redhat.com":{"auth":"NTE1NDg0ODB8dWhjLTFGckdzSURVWlEyRHpuU0ozaVcwQXRtTEpoNzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXpPR1prTVdJNFpqYzJOamcwTmpKbVlXTTRaVFpsWVRnd09EUTJOMkkzTnlKOS5YUmQ5LS1LQ3kzVlpVbF9ldTc0THpQMFEzOVYwRUVfeWRZOE5pVGRScUlyd2hVRHYtcFF2ZEtLV1ZpVmlaQWF0QkhEUVdmVDB1Z2pfTWIzYmNPUktqSXdBNldQTXYxWTc1RmhYQUg1S2Myc3lnSHVxWTRfZlhSOXJnbW42N0l0MmhiUXJyb3BBNXlaYXpXSzhPeTBJb29VWFAteDBPUjZ2VDJTVGktbm5sblBLbEFSWTBEZkxJYmk3OHZlZXFadUpyUDl4SzlXdnRaOEZOREpzQnlUc2VmeFRoVmtLMDVwVDlhTk9nTkxITGJMeU5sdEc1RE9xU1JiZ1hLMDJ6RXNaU3BwYmZLdVAwNVJYQWljQy14WEZiamtLaFpkYTgwV3lnZDJKcTZXWVF3WW83ZXgtLUh1MEpKeXBTczRINVY0Nm50dTNVRlNVUERBZEJ5VmVDU2RxckpzUWZoSmlpLVdJbXdjWnp6LUNwTlRfNVo0ei1WUkc0aV9hVF9TWnVkQzVySmFLdFpHS1RQWlg0SDlNLWxDeFlHZDJNYzhuWlc4NWVUeTJPYnBVOHA2S19sU3A3Wm15RzhEbWh6bFAtYTQzb0J1V3hJTHg3Y283U3BkOFRyYVNRbjVnaFpvc0VKZGp6X2ljTlFhVktNazFHQjEwbU1uOXJBeGdUcm5qU09aSEZvcXdmX2Y2dnZFWi0ySUp2Qk91UUZRQThsZDlzRDVDb1ZWNEdwTWx1Rl8zZGJqcXhuVTE0WXdHT2RhSldSOEtMTlFwbU9RV0JrWFJIcVpwN01UT0ZDX0dMVDRWeGNTMXhva0p6RUFxN1c4NzBSQVo4VnAtUGdscEJCc2RDT2tfdGNCNEY5T2hkZ0NPb3JMNHJkZmp6cEJobUZuMEhzVkFFNGJkaWhfRjNGSQ==","email":"rhpds-admins@redhat.com"},"registry.redhat.io":{"auth":"NTE1NDg0ODB8dWhjLTFGckdzSURVWlEyRHpuU0ozaVcwQXRtTEpoNzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXpPR1prTVdJNFpqYzJOamcwTmpKbVlXTTRaVFpsWVRnd09EUTJOMkkzTnlKOS5YUmQ5LS1LQ3kzVlpVbF9ldTc0THpQMFEzOVYwRUVfeWRZOE5pVGRScUlyd2hVRHYtcFF2ZEtLV1ZpVmlaQWF0QkhEUVdmVDB1Z2pfTWIzYmNPUktqSXdBNldQTXYxWTc1RmhYQUg1S2Myc3lnSHVxWTRfZlhSOXJnbW42N0l0MmhiUXJyb3BBNXlaYXpXSzhPeTBJb29VWFAteDBPUjZ2VDJTVGktbm5sblBLbEFSWTBEZkxJYmk3OHZlZXFadUpyUDl4SzlXdnRaOEZOREpzQnlUc2VmeFRoVmtLMDVwVDlhTk9nTkxITGJMeU5sdEc1RE9xU1JiZ1hLMDJ6RXNaU3BwYmZLdVAwNVJYQWljQy14WEZiamtLaFpkYTgwV3lnZDJKcTZXWVF3WW83ZXgtLUh1MEpKeXBTczRINVY0Nm50dTNVRlNVUERBZEJ5VmVDU2RxckpzUWZoSmlpLVdJbXdjWnp6LUNwTlRfNVo0ei1WUkc0aV9hVF9TWnVkQzVySmFLdFpHS1RQWlg0SDlNLWxDeFlHZDJNYzhuWlc4NWVUeTJPYnBVOHA2S19sU3A3Wm15RzhEbWh6bFAtYTQzb0J1V3hJTHg3Y283U3BkOFRyYVNRbjVnaFpvc0VKZGp6X2ljTlFhVktNazFHQjEwbU1uOXJBeGdUcm5qU09aSEZvcXdmX2Y2dnZFWi0ySUp2Qk91UUZRQThsZDlzRDVDb1ZWNEdwTWx1Rl8zZGJqcXhuVTE0WXdHT2RhSldSOEtMTlFwbU9RV0JrWFJIcVpwN01UT0ZDX0dMVDRWeGNTMXhva0p6RUFxN1c4NzBSQVo4VnAtUGdscEJCc2RDT2tfdGNCNEY5T2hkZ0NPb3JMNHJkZmp6cEJobUZuMEhzVkFFNGJkaWhfRjNGSQ==","email":"rhpds-admins@redhat.com"},"provision.ocp.example.com:5000":{"email":"dummy@redhat.com","auth":"ZHVtbXk6ZHVtbXk="}}}'
+pullSecret: {{ ocp4_pull_secret | to_json | to_json if ocp4_pull_secret is mapping else ocp4_pull_secret | to_json }}
diff --git a/ansible/configs/hybrid-cloud-binder/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/hybrid-cloud-binder/files/cloud_providers/ec2_cloud_template.j2
index ae94a8e52aa..eaff4ee1a58 100644
--- a/ansible/configs/hybrid-cloud-binder/files/cloud_providers/ec2_cloud_template.j2
+++ b/ansible/configs/hybrid-cloud-binder/files/cloud_providers/ec2_cloud_template.j2
@@ -198,7 +198,7 @@ Resources:
- {{ instance['image_id'] | default('RHELAMI') }}
InstanceType: "{{ instance['flavor'][cloud_provider] }}"
- KeyName: "{{ instance['key_name'] | default(key_name) }}"
+ KeyName: "{{instance.key_name | default(ssh_provision_key_name) | default(key_name)}}"
{% if instance['UserData'] is defined %}
{{ instance['UserData'] }}
{% endif %}
diff --git a/ansible/configs/hybrid-cloud-binder/post_software.yml b/ansible/configs/hybrid-cloud-binder/post_software.yml
index 62abc4874f0..e15fe43ef9f 100644
--- a/ansible/configs/hybrid-cloud-binder/post_software.yml
+++ b/ansible/configs/hybrid-cloud-binder/post_software.yml
@@ -24,9 +24,10 @@
groups: ohc_hub
ansible_connection: ssh
# need full path to key because not creating ssh_config file
- ansible_ssh_private_key_file: "~/.ssh/opentlc_admin_backdoor.pem"
- ansible_user: "ec2-user"
+ ansible_user: "{{ aws_hub_provision_data.bastion_ssh_user_name }}"
+ ansible_password: "{{ aws_hub_provision_data.bastion_ssh_password }}"
remote_user: "ec2-user"
+ ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python3
# ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
- name: add aws_dev_a to inventory
@@ -35,9 +36,10 @@
groups: ohc_aws_dev_a
ansible_connection: ssh
# ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
- ansible_ssh_private_key_file: "~/.ssh/opentlc_admin_backdoor.pem"
- ansible_user: "ec2-user"
+ ansible_user: "{{ aws_dev_a_provision_data.bastion_ssh_user_name }}"
+ ansible_password: "{{ aws_dev_a_provision_data.bastion_ssh_password }}"
remote_user: "ec2-user"
+ ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python3
- name: Log into Hub
k8s_auth:
@@ -236,16 +238,6 @@
when: hybrid_cloud_binder_install_secured_cluster is true | default(true) | bool
block:
- - name: Check if desired virtualenv is available on the host
- stat:
- path: "/opt/virtualenvs/k8s/bin/python"
- register: r_virtualenv
-
- - name: Set Ansible Python interpreter to virtualenv
- when: r_virtualenv.stat.exists
- set_fact:
- ansible_python_interpreter: "/opt/virtualenvs/k8s/bin/python"
-
- name: Call the OCP4 RHACS Apps role
ansible.builtin.include_role:
name: ocp4_workload_rhacs_demo_apps
@@ -320,16 +312,6 @@
when: hybrid_cloud_binder_setup_coolstore is true | default(true) | bool
block:
- - name: Check if desired virtualenv is available on the host
- stat:
- path: "/opt/virtualenvs/k8s/bin/python"
- register: r_virtualenv
-
- - name: Set Ansible Python interpreter to virtualenv
- when: r_virtualenv.stat.exists
- set_fact:
- ansible_python_interpreter: "/opt/virtualenvs/k8s/bin/python"
-
- name: Setup CoolStore
vars:
ocp4_workload_coolstore_backoffice_demo_ohc_central_stackrox_host: "{{ aws_hub_provision_data.acs_route | urlsplit('hostname') }}"
diff --git a/ansible/configs/migrating-to-ocpvirt/files/rfc2136.ini.j2 b/ansible/configs/migrating-to-ocpvirt/files/rfc2136.ini.j2
new file mode 100644
index 00000000000..316dd2b0106
--- /dev/null
+++ b/ansible/configs/migrating-to-ocpvirt/files/rfc2136.ini.j2
@@ -0,0 +1,10 @@
+# Target DNS server
+dns_rfc2136_server = {{ osp_cluster_dns_server }}
+# Target DNS port
+dns_rfc2136_port = 53
+# TSIG key name
+dns_rfc2136_name = {{ ddns_key_name }}
+# TSIG key secret
+dns_rfc2136_secret = {{ ddns_key_secret }}
+# TSIG key algorithm
+dns_rfc2136_algorithm = {{ ddns_key_algorithm | d('hmac-md5') }}
diff --git a/ansible/configs/migrating-to-ocpvirt/post_software.yml b/ansible/configs/migrating-to-ocpvirt/post_software.yml
index 0e6be48bee7..fc543672da6 100644
--- a/ansible/configs/migrating-to-ocpvirt/post_software.yml
+++ b/ansible/configs/migrating-to-ocpvirt/post_software.yml
@@ -136,7 +136,7 @@
# - "OpenShift web console : https://console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}"
# - "kubeadmin user Password : {{ hostvars['kube_holder']['kubeadmin_password'] }}"
- - when: ocp4_aio_deploy_cnvlab
+ - when: ocp4_aio_deploy_cnvlab and not build_lab|bool
name: Print Openshift Virtualization lab infos
agnosticd_user_info:
msg: "{{ item }}"
diff --git a/ansible/configs/migrating-to-ocpvirt/pre_software.yml b/ansible/configs/migrating-to-ocpvirt/pre_software.yml
index bd1b9e2355e..d13bd9fea26 100644
--- a/ansible/configs/migrating-to-ocpvirt/pre_software.yml
+++ b/ansible/configs/migrating-to-ocpvirt/pre_software.yml
@@ -65,27 +65,6 @@
name: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm"
disable_gpg_check: true
- - name: install certbot
- yum:
- name: "certbot"
-
- - name: Generate certificate using certbot
- command: >
- certbot certonly --standalone
- -d console-openshift-console.apps.{{ guid }}.dynamic.opentlc.com,oauth-openshift.apps.{{ guid }}.dynamic.opentlc.com,virt-openshift-mtv.apps.{{ guid }}.dynamic.opentlc.com
- -m josegonz@redhat.com --agree-tos -n
-
-
- - name: Fetch letsencrypt SSL certificates to transfer to the bastion node
- fetch:
- src: "/etc/letsencrypt/archive/console-openshift-console.apps.{{ guid }}.dynamic.opentlc.com/{{ item }}"
- dest: "{{ output_dir }}/{{ item }}"
- flat: yes
- loop:
- - chain1.pem
- - cert1.pem
- - privkey1.pem
-
- name: install mariadb client
yum:
name: "mariadb"
@@ -98,6 +77,35 @@
include_role:
name: bastion-student-user
+ - name: Copy credentials to host temporarily
+ template:
+ src: ./files/rfc2136.ini.j2
+ dest: /home/lab-user/.rfc2136.ini
+
+ - name: Request Both Let's Encrypt Static and Wildcard Certificates
+ include_role:
+ name: host-lets-encrypt-certs-certbot
+ vars:
+ _certbot_domain: "api.{{ guid }}.dynamic.opentlc.com"
+ _certbot_wildcard_domain: "*.apps.{{ guid }}.dynamic.opentlc.com"
+ _certbot_production: True
+ _certbot_dns_provider: "rfc2136"
+ _certbot_remote_dir: "/root"
+ _certbot_cache_cert_file: "/tmp/server.cert"
+ _certbot_cache_key_file: "/tmp/server.key"
+ _certbot_cache_ca_file: "/tmp/server_ca.cer"
+ _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
+ _certbot_cache_archive_file: "/tmp/certbot.tar.gz"
+ _certbot_renew_automatically: False
+ _certbot_force_issue: False
+ _certbot_user: "lab-user"
+
+ - name: Remove credentials once LE certs complete
+ file:
+ state: absent
+ path: /home/lab-user/.rfc2136.ini
+ when: _certbot_setup_complete
+
- name: Deploy base software
include_role:
name: ocp4_aio_base_software
@@ -139,15 +147,6 @@
vars:
ocp4_aio_ssh_key: "{{ lookup('file', '{{ output_dir }}/{{ guid }}_id_rsa.pub' ) }}"
- - name: Copy letsencrypt files
- copy:
- src: "{{ output_dir }}/{{ item }}"
- dest: "/root/{{ item }}"
- loop:
- - chain1.pem
- - cert1.pem
- - privkey1.pem
-
- name: Install httpd
yum:
name: httpd
diff --git a/ansible/configs/migrating-to-ocpvirt/requirements.yml b/ansible/configs/migrating-to-ocpvirt/requirements.yml
index 85125b7210e..6cc79376eb6 100644
--- a/ansible/configs/migrating-to-ocpvirt/requirements.yml
+++ b/ansible/configs/migrating-to-ocpvirt/requirements.yml
@@ -1,35 +1,43 @@
roles:
- - src: https://github.com/agonzalezrh/ocp4_aio_infra_role_base_software.git
+ - name: ocp4_aio_base_software
+ src: https://github.com/rhpds/ocp4_aio_infra_role_base_software.git
scm: git
- name: ocp4_aio_base_software
+ version: v413
- name: ocp4_aio_base_virt
- src: https://github.com/agonzalezrh/ocp4_aio_infra_role_base_virt.git
+ src: https://github.com/rhpds/ocp4_aio_infra_role_base_virt.git
scm: git
+ version: v413
- name: ocp4_aio_prepare_bastion
- src: https://github.com/agonzalezrh/ocp4_aio_infra_role_prepare_bastion.git
+ src: https://github.com/rhpds/ocp4_aio_infra_role_prepare_bastion.git
scm: git
+ version: v413
- name: ocp4_aio_deploy_bastion
- src: https://github.com/agonzalezrh/ocp4_aio_infra_role_deploy_bastion.git
+ src: https://github.com/rhpds/ocp4_aio_infra_role_deploy_bastion.git
scm: git
- name: ocp4_aio_deploy_ocp
- src: https://github.com/agonzalezrh/ocp4_aio_infra_role_deploy_ocp.git
+ src: https://github.com/rhpds/ocp4_aio_infra_role_deploy_ocp.git
scm: git
+ version: v413
- name: ocp4_aio_role_ocs
- src: https://github.com/agonzalezrh/ocp4_aio_role_ocs.git
+ src: https://github.com/rhpds/ocp4_aio_role_ocs.git
scm: git
+ version: v413
- name: ocp4_aio_workload_cnvlab
- src: https://github.com/agonzalezrh/ocp4_aio_role_deploy_cnvlab.git
+ src: https://github.com/rhpds/ocp4_aio_role_deploy_cnvlab.git
scm: git
+ version: v413
collections:
- name: community.general
+ version: 4.6.1
- name: containers.podman
+ version: 1.10.1
- name: equinix.metal
version: 1.4.1
- name: ovirt.ovirt
diff --git a/ansible/configs/migrating-to-ocpvirt/templates/httpd/ssl.conf b/ansible/configs/migrating-to-ocpvirt/templates/httpd/ssl.conf
index b8e9b9ead07..bedb229ba16 100644
--- a/ansible/configs/migrating-to-ocpvirt/templates/httpd/ssl.conf
+++ b/ansible/configs/migrating-to-ocpvirt/templates/httpd/ssl.conf
@@ -14,8 +14,8 @@ SSLEngine on
SSLHonorCipherOrder on
SSLCipherSuite PROFILE=SYSTEM
SSLProxyCipherSuite PROFILE=SYSTEM
-SSLCertificateFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
-SSLCertificateKeyFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
+SSLCertificateFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
+SSLCertificateKeyFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
SetEnvIf Request_URI /api/proxy/plugin/forklift-console-plugin/ forklift
RequestHeader set Host "console-openshift-console.apps.ocp.example.com" env=!forklift
RequestHeader set Referer "https://console-openshift-console.apps.ocp.example.com" env=!forklift
@@ -50,8 +50,8 @@ LogLevel warn
SSLEngine on
SSLCipherSuite PROFILE=SYSTEM
SSLProxyCipherSuite PROFILE=SYSTEM
-SSLCertificateFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
-SSLCertificateKeyFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
+SSLCertificateFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
+SSLCertificateKeyFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
RequestHeader set Host "oauth-openshift.apps.ocp.example.com"
ProxyPreserveHost Off
SSLProxyEngine on
@@ -76,8 +76,8 @@ SSLEngine on
SSLHonorCipherOrder on
SSLCipherSuite PROFILE=SYSTEM
SSLProxyCipherSuite PROFILE=SYSTEM
-SSLCertificateFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
-SSLCertificateKeyFile /etc/letsencrypt/live/console-openshift-console.apps.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
+SSLCertificateFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/fullchain.pem
+SSLCertificateKeyFile /root/certbot/config/live/api.{{ guid }}.{{ cluster_dns_zone }}/privkey.pem
#RequestHeader set Referer "https://zzzzz.apps.ocp.example.com"
#RequestHeader set Origin "https://zzzzz.apps.ocp.example.com"
ProxyPreserveHost On
diff --git a/ansible/configs/multi-cloud-capsule/README.adoc b/ansible/configs/multi-cloud-capsule/README.adoc
deleted file mode 100644
index 3e14d23a416..00000000000
--- a/ansible/configs/multi-cloud-capsule/README.adoc
+++ /dev/null
@@ -1,225 +0,0 @@
-:config: multi-cloud-capsule
-:author: GPTE Team
-:tag1: install_capsule
-:tag2: configure_capsule
-
-
-
-Config: {config}
-===============
-
-With {config}, we can capsule server on OpenStack and AWS cloud providers.
-
-
-Requirements
-------------
-
-Following are the requirements:
-
-. Aws OR OpenStack credentials .
-. Satellite must be install and setup.
-. Satellite should have all capsule repositories in activation key.
-
-
-
-Config Variables
-----------------
-
-* Cloud specfic settings related variables.
-
-|===
-|*Variable* | *State* |*Description*
-| env_type: multi-cloud-capsule |Required | Name of the config
-| output_dir: /tmp/workdir |Required | Writable working scratch directory
-| email: capsule-vm@example.com |Required | User info for notifications
-| guid: defaultguid | Reqired |Unique identifier
-| cloud_provider: ec2 |Required | Which AgnosticD Cloud Provider to use
-|aws_regions: "String" |Required | aws region
-|===
-
-
-* Satellite specfic settings related variables.
-
-|===
-|*Variable* | *State* |*Description*
-|install_satellite: Boolean |Required | To enable installation roles
-|configure_satellite: Boolean |Required | To enable configuration roles
-|satellite_version: "Digit" |Required |satellite version
-|org: "String" |Required |Organization name
-|org_label: "String" |Required | Organization label in string without space
-|org_description: "String" |Required | Organization description
-|lifecycle_environment_path: [list] |Required | Contains nested list of environment path
-|satellite_content: [list] |Required | Main List variable
-|subscription_name: "String" |Required | Subscription name mainly required for manifest role
-| manifest_file: "/path/to/manifest.zip" |Required | Path of download satellite manifest
-|===
-
-[NOTE]
-For more about variables read README.adoc of the roles.
-
-* Example variables files
-
-. Sample of sample_vars_ec2.yml
-[source=text]
-----
-[user@desktop ~]$ cd agnosticd/ansible
-
-[user@desktop ~]$ cat ./configs/multi-cloud-capsule/sample_vars_ec2.yml
-
-env_type: multi-cloud-capsule
-output_dir: /tmp/workdir
-email: satellite_vm@example.com
-
-
-install_satellite: True
-configure_satellite: True
-satellite_version: 6.4
-org: gpte
-org_label: gpte
-
-
-
-satellite_content:
- - name: "Capsule Server"
- activation_key: "capsule_key"
- subscriptions:
- - "Employee SKU"
- life_cycle: "Library"
- content_view: "Capsule Content"
- content_view_update: False
- repos:
- - name: 'Red Hat Enterprise Linux 7 Server (RPMs)'
- product: 'Red Hat Enterprise Linux Server'
- basearch: 'x86_64'
- releasever: '7Server'
-
- - name: 'Red Hat Satellite Capsule 6.4 (for RHEL 7 Server) (RPMs)'
- product: 'Red Hat Satellite Capsule'
- basearch: 'x86_64'
- - name: "Three Tier App"
- activation_key: "three_tier_app_key"
- content_view: "Three Tier App Content"
- life_cycle: "Library"
- subscriptions:
- - "Employee SKU"
- repos:
- - name: 'Red Hat Enterprise Linux 7 Server (RPMs)'
- product: 'Red Hat Enterprise Linux Server'
- basearch: 'x86_64'
- releasever: '7Server'
-----
-for reference look at link:sample_vars_ec2.yml[]
-
-. Sample of ec2_secrets.yml
-[source=text]
-----
-[user@desktop ~]$ cat ~/ec2_secrets.yml
-aws_access_key_id: xxxxxxxxxxxxxxxx
-aws_secret_access_key: xxxxxxxxxxxxxxxxxx
-own_repo_path: http://localrepopath/to/repo
-openstack_pem: ldZYgpVcjl0YmZNVytSb2VGenVrTG80SzlEU2xtUTROMHUzR1BZdzFoTEg3R2hXM
-====Omitted=====
-25ic0NTTnVDblp4bVE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
-
-openstack_pub: XZXYgpVcjl0YmZNVytSb2VGenVrTG80SzlEU2xtUTROMHUzR1BZdzFoTEg3R2hXM
-====Omitted=====
-53ic0NTTnVDblp4bVE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
-----
-
-
-
-Roles
------
-
-* List of satellite and capsule roles
-
-
-|===
-|*Role*| *Link* | *Description*
-|satellite-public-hostname | link:../../roles/satellite-public-hostname[satellite-public-hostname] | Set public hostname
-|satellite-capsule-installation |link:../../roles/satellite-capsule-installation[satellite-capsule-installation] | Install capsule packages
-|satellite-capsule-configuration | link:../../roles/satellite-capsule-configuration[satellite-capsule-configuration] | Setup capsule server
-|===
-
-Tags
----
-
-|===
-|{tag1} |Consistent tag for all capsule installation roles
-|{tag2} |Consistent tag for all capsule configuration roles
-|===
-
-* Example tags
-
-----
-## Tagged jobs
-ansible-playbook playbook.yml --tags configure_capsule
-
-## Skip tagged jobs
-ansible-playbook playbook.yml --skip-tags install_capsule
-----
-
-Example to run config
----------------------
-
-How to use config (for instance, with variables passed in playbook).
-
-[source=text]
-----
-[user@desktop ~]$ cd agnosticd/ansible
-
-[user@desktop ~]$ ansible-playbook main.yml \
- -e @./configs/multi-cloud-capsule/sample_vars_ec2.yml \
- -e @~/ec2_secrets.yml \
- -e guid=defaultguid \
- -e satellite_admin=admin \
- -e 'satellite_admin_password=changeme' \
- -e manifest_file=/path/to/manifest_satellite_6.4.zip
-----
-
-Example to stop environment
----------------------------
-
-[source=text]
-----
-[user@desktop ~]$ cd agnosticd/ansible
-
-[user@desktop ~]$ ansible-playbook ./configs/multi-cloud-capsule/stop.yml \
- -e @./configs/multi-cloud-capsule/sample_vars_ec2.yml \
- -e @~/ec2_secrets.yml \
- -e guid=defaultguid
-----
-
-Example to start environment
----------------------------
-
-[source=text]
-----
-[user@desktop ~]$ cd agnosticd/ansible
-
-[user@desktop ~]$ ansible-playbook ./configs/multi-cloud-capsule/start.yml \
- -e @./configs/multi-cloud-capsule/sample_vars_ec2.yml \
- -e @~/ec2_secrets.yml \
- -e guid=defaultguid
-----
-
-Example to destroy environment
-------------------------------
-
-[source=text]
-----
-[user@desktop ~]$ cd agnosticd/ansible
-
-[user@desktop ~]$ ansible-playbook ./configs/multi-cloud-capsule/destroy.yml \
- -e @./configs/multi-cloud-capsule/sample_vars_ec2.yml \
- -e @~/ec2_secrets.yml \
- -e guid=defaultguid
-----
-
-
-
-
-Author Information
-------------------
-
-{author}
diff --git a/ansible/configs/multi-cloud-capsule/default_vars.yml b/ansible/configs/multi-cloud-capsule/default_vars.yml
deleted file mode 100644
index d8ba5636fa3..00000000000
--- a/ansible/configs/multi-cloud-capsule/default_vars.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-
-env_type: multi-cloud-capsule
-output_dir: /tmp/workdir # Writable working scratch directory
-email: "{{env_type}}@example.com"
-guid: defaultguid
-
-
-deploy_local_ssh_config_location: "{{output_dir}}/"
-key_name: ocpkey # Keyname must exist in AWS
-env_authorized_key: "{{guid}}key"
-set_env_authorized_key: true
-default_key_name: ~/.ssh/{{key_name}}.pem
-
-install_bastion: true
-install_common: true
-install_ipa_client: false
-tower_run: false
-update_packages: false
-install_satellite: True
-configure_satellite: false
-
-project_tag: "{{ env_type }}-{{ guid }}"
-
-capsule_repos:
- - rhel-7-server-rpms
- - rhel-server-rhscl-7-rpms
- - rhel-7-server-satellite-maintenance-6-rpms
- - rhel-7-server-ansible-2.6-rpms
- - rhel-7-server-satellite-capsule-6.4-rpms
- - rhel-7-server-satellite-tools-6.4-rpms
-
-
-
-
-...
diff --git a/ansible/configs/multi-cloud-capsule/default_vars_ec2.yml b/ansible/configs/multi-cloud-capsule/default_vars_ec2.yml
deleted file mode 100644
index 58766f79297..00000000000
--- a/ansible/configs/multi-cloud-capsule/default_vars_ec2.yml
+++ /dev/null
@@ -1,120 +0,0 @@
-################################################################################
-### Environment Settings for aws
-################################################################################
-## Environment Sizing
-
-cloud_provider: ec2 # Which AgnosticD Cloud Provider to use # User info for notifications
-HostedZoneId: Z3IHLWJZOU9SRT
-aws_region: ap-southeast-2
-
-
-capsule_instance_count: 1
-capsule_instance_type: "m5a.2xlarge"
-
-security_groups:
- - name: CapsuleSG
- rules:
- - name: CapSSHPort
- description: "SSH Public"
- from_port: 22
- to_port: 22
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapbootpsPorts
- description: "bootps Public"
- from_port: 67
- to_port: 67
- protocol: udp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapftftpPorts
- description: "tftp Public"
- from_port: 69
- to_port: 69
- protocol: udp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapHTTPSPorts
- description: "HTTP Public"
- from_port: 80
- to_port: 80
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapHTTPSPorts
- description: "HTTPS Public"
- from_port: 443
- to_port: 443
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapCommplexPorts
- description: "Commplex Public"
- from_port: 5000
- to_port: 5000
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapCoPorts
- description: "Co Public"
- from_port: 5647
- to_port: 5647
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapiRDMIPorts
- description: "iRDMIPublic"
- from_port: 8000
- to_port: 8000
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapRDMIPorts
- description: "RDMIPublic"
- from_port: 8140
- to_port: 8140
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CappcsyncPorts
- description: "pcsync Public"
- from_port: 8443
- to_port: 8443
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapwebsbPorts
- description: "websb Public"
- from_port: 9090
- to_port: 9090
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
-
-# Environment Instances
-instances:
- - name: "capsule"
- count: "{{capsule_instance_count}}"
- security_groups:
- - CapsuleSG
- public_dns: true
- dns_loadbalancer: false
- flavor:
- ec2: "{{capsule_instance_type}}"
- tags:
- - key: "AnsibleGroup"
- value: "capsules"
- - key: "ostype"
- value: "linux"
- - key: "instance_filter"
- value: "{{ env_type }}-{{ email }}"
-
-# DNS settings for environmnet
-subdomain_base_short: "{{ guid }}"
-subdomain_base_suffix: ".example.opentlc.com"
-subdomain_base: "{{subdomain_base_short}}{{subdomain_base_suffix}}"
-
-zone_internal_dns: "{{guid}}.internal."
-chomped_zone_internal_dns: "{{guid}}.internal"
-
diff --git a/ansible/configs/multi-cloud-capsule/default_vars_osp.yml b/ansible/configs/multi-cloud-capsule/default_vars_osp.yml
deleted file mode 100644
index 71de90b9dcd..00000000000
--- a/ansible/configs/multi-cloud-capsule/default_vars_osp.yml
+++ /dev/null
@@ -1,133 +0,0 @@
-################################################################################
-### OSP Environment variables
-################################################################################
-
-
-cloud_provider: osp
-install_student_user: false
-
-
-ansible_user: cloud-user
-remote_user: cloud-user
-osp_cluster_dns_zone: red.osp.opentlc.com
-osp_cluster_dns_server: ddns01.opentlc.com
-use_dynamic_dns: true
-osp_project_create: true
-student_name: student
-admin_user: opentlc-mgr
-
-
-
-capsule_instance_type: 8c32g100d
-
-
-capsule_instance_image: rhel-server-7.7-update-2
-
-capsule_instance_count: 1
-
-security_groups:
- - name: CapsuleSG
- rules:
- - name: CapSSHPort
- description: "SSH Public"
- from_port: 22
- to_port: 22
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapbootpsPorts
- description: "bootps Public"
- from_port: 67
- to_port: 67
- protocol: udp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapftftpPorts
- description: "tftp Public"
- from_port: 69
- to_port: 69
- protocol: udp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapHTTPSPorts
- description: "HTTP Public"
- from_port: 80
- to_port: 80
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapHTTPSPorts
- description: "HTTPS Public"
- from_port: 443
- to_port: 443
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapCommplexPorts
- description: "Commplex Public"
- from_port: 5000
- to_port: 5000
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapCoPorts
- description: "Co Public"
- from_port: 5647
- to_port: 5647
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapiRDMIPorts
- description: "iRDMIPublic"
- from_port: 8000
- to_port: 8000
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapRDMIPorts
- description: "RDMIPublic"
- from_port: 8140
- to_port: 8140
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CappcsyncPorts
- description: "pcsync Public"
- from_port: 8443
- to_port: 8443
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
- - name: CapwebsbPorts
- description: "websb Public"
- from_port: 9090
- to_port: 9090
- protocol: tcp
- cidr: "0.0.0.0/0"
- rule_type: Ingress
-
-
-# Environment Instances
-instances:
- - name: "capsule"
- count: "{{capsule_instance_count}}"
- public_dns: true
- floating_ip: true
- image_id: "{{ capsule_instance_image }}"
- flavor:
- ec2: "{{capsule_instance_type}}"
- osp: "{{capsule_instance_type}}"
- azure: Standard_A2_V2
- image_id: "{{ capsule_instance_image }}"
- security_groups:
- - CapsuleSG
- tags:
- - key: "AnsibleGroup"
- value: "capsules"
- - key: "ostype"
- value: "linux"
- - key: "instance_filter"
- value: "{{ env_type }}-{{ email }}"
-
-
-
diff --git a/ansible/configs/multi-cloud-capsule/destroy_env.yml b/ansible/configs/multi-cloud-capsule/destroy_env.yml
deleted file mode 100644
index 6af8600d1d9..00000000000
--- a/ansible/configs/multi-cloud-capsule/destroy_env.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- import_playbook: ../../include_vars.yml
-
-- name: Delete Infrastructure
- hosts: localhost
- connection: local
- gather_facts: False
- become: no
- tasks:
- - name: Run infra-ec2-template-destroy
- include_role:
- name: "infra-{{cloud_provider}}-template-destroy"
- when: cloud_provider == 'ec2'
-
- - name: Run infra-azure-template-destroy
- include_role:
- name: "infra-{{cloud_provider}}-template-destroy"
- when: cloud_provider == 'azure'
diff --git a/ansible/configs/multi-cloud-capsule/files/hosts_template.j2 b/ansible/configs/multi-cloud-capsule/files/hosts_template.j2
deleted file mode 100644
index 9531ff4f6b3..00000000000
--- a/ansible/configs/multi-cloud-capsule/files/hosts_template.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{# # These are the satellite hosts #}
-{% if groups['satellites'] is defined %}
-[satellites]
-{% for host in groups['satellites'] %}
-{% if cloud_provider == 'ec2' %}
-{{host}}
-{% elif cloud_provider == 'osp' %}
-{{host}} ansible_host={{host}}.example.com
-{% endif %}
-{% endfor %}
-
-
-
-[all:vars]
-{# ###########################################################################
-### Ansible Vars
-########################################################################### #}
-timeout=60
-ansible_become=yes
-ansible_user={{remote_user}}
-
-[all:children]
-satellites
-{% endif %}
\ No newline at end of file
diff --git a/ansible/configs/multi-cloud-capsule/files/repos_template.j2 b/ansible/configs/multi-cloud-capsule/files/repos_template.j2
deleted file mode 100644
index 5f31f2335b4..00000000000
--- a/ansible/configs/multi-cloud-capsule/files/repos_template.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{% if groups['capsules'] is defined %}
-{% if inventory_hostname in groups['capsules'] %}
-{# capsule repos #}
-[rhel-7-server-rpms]
-name=Red Hat Enterprise Linux 7
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-rpms
-enabled=1
-gpgcheck=0
-
-[rhel-server-rhscl-7-rpms]
-name=Red Hat Enterprise Linux 7 RHSCL
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-server-rhscl-7-rpms
-enabled=1
-gpgcheck=0
-
-[rhel-7-server-ansible-2.6-rpms]
-name=Red Hat Enterprise Ansible 2.6
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-ansible-2.6-rpms
-enabled=1
-gpgcheck=0
-
-[rhel-7-server-satellite-capsule-6.4-rpms]
-name=Red Hat Enterprise Satellite Capsule 6.4
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-capsule-6.4-rpms
-enabled=1
-gpgcheck=0
-
-[rhel-7-server-satellite-maintenance-6-rpms]
-name=Red Hat Enterprise Satellite 6 Maintenance
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-maintenance-6-rpms
-enabled=1
-gpgcheck=0
-
-
-[rhel-7-server-satellite-tools-6.4-rpms]
-name=Red Hat Enterprise Linux Satellite tools 6.4
-baseurl={{own_repo_path}}/{{repo_version}}/rhel-7-server-satellite-tools-6.4-rpms
-enabled=1
-gpgcheck=0
-
-{% endif %}
-{% endif %}
-
diff --git a/ansible/configs/multi-cloud-capsule/infra.yml b/ansible/configs/multi-cloud-capsule/infra.yml
deleted file mode 100644
index e930c1fc76b..00000000000
--- a/ansible/configs/multi-cloud-capsule/infra.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-
-- import_playbook: ./infra_configs/{{ cloud_provider }}_infrastructure_deployment.yml
diff --git a/ansible/configs/multi-cloud-capsule/infra_configs/ec2_infrastructure_deployment.yml b/ansible/configs/multi-cloud-capsule/infra_configs/ec2_infrastructure_deployment.yml
deleted file mode 100644
index 1c7320d9822..00000000000
--- a/ansible/configs/multi-cloud-capsule/infra_configs/ec2_infrastructure_deployment.yml
+++ /dev/null
@@ -1,126 +0,0 @@
----
-
-- import_playbook: ../../../cloud_providers/ec2_pre_checks.yml
-
-- name: Step 001.1 Deploy Infrastructure
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step001
- - step001.1
- - deploy_infrastructure
- tasks:
- - name: Run infra-ec2-template-generate Role
- import_role:
- name: infra-ec2-template-generate
-
- - name: Run infra-ec2-template-create Role
- import_role:
- name: infra-ec2-template-create
- vars:
- aws_region_loop: "{{aws_region}}"
-
- - name: Run infra-ec2-template-create Role into FallBack region
- include_role:
- name: infra-ec2-template-create
- vars:
- aws_region_loop: "{{item}}"
- with_items: "{{ fallback_regions }}"
- when:
- - fallback_regions is defined
- - cloudformation_out is failed
-
- - name: report Cloudformation error
- fail:
- msg: "FAIL {{ project_tag }} Create Cloudformation"
- when: not cloudformation_out is succeeded
- tags:
- - provision_cf_template
-
-- name: Step 001.2 Create Inventory and SSH config setup
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step001
- - step001.2
- - create_inventory
- - create_ssh_config
- tasks:
- # Sometimes the infra step is skipped, for example when scaling up a cluster.
- # when step001.1 is skipped, aws_region_final is not defined.
- - when: aws_region_final is not defined
- include_tasks: ec2_detect_region_tasks.yml
-
- - name: Run infra-ec2-create-inventory Role
- import_role:
- name: infra-ec2-create-inventory
-
- - name: Run Common SSH Config Generator task file
- import_tasks: ./infra-common-ssh-config-generate.yml
-
-# include global vars again, this time for all hosts now that the inventory is built
-- import_playbook: ../../../include_vars.yml
- tags:
- - create_inventory
- - must
-
-- name: Step 001.3 Configure Linux Hosts and Wait for Connection
- hosts:
- - all:!windows:!network
- gather_facts: false
- any_errors_fatal: true
- ignore_errors: false
- become: true
- tags:
- - step001
- - step001.3
- - wait_ssh
- - set_hostname
- tasks:
- - name: set facts for remote access
- tags:
- - create_inventory
- set_fact:
- aws_region_final: "{{hostvars['localhost'].aws_region_final}}"
- ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
-
- - name: Run infra-ec2-wait_for_linux_hosts Role
- import_role:
- name: infra-ec2-wait_for_linux_hosts
-
- - name: Run infra-ec2-linux-set-hostname Role
- import_role:
- name: infra-ec2-linux-set-hostname
-
-- name: Step 001.4 Configure Windows Hosts and Wait for Connection
- gather_facts: false
- hosts:
- - windows
- tags:
- - step001
- - step001.4
- tasks:
- - name: set facts for remote access
- tags:
- - create_inventory
- set_fact:
- ansible_become: false
- ansible_connection: winrm
- ansible_host: "{{ public_dns_name }}"
- ansible_password: "{{ hostvars['localhost'].windows_password | default(hostvars['localhost'].generated_windows_password) }}"
- ansible_port: 5986
- ansible_user: Administrator
- ansible_winrm_server_cert_validation: ignore
- aws_region_final: "{{hostvars['localhost'].aws_region_final}}"
-
- - name: Run infra-ec2-wait_for_linux_hosts Role
- import_role:
- name: infra-ec2-wait_for_windows_hosts
-
- - name: Set output_dir for all windows hosts
- set_fact:
- output_dir: "{{ hostvars.localhost.output_dir }}"
diff --git a/ansible/configs/multi-cloud-capsule/infra_configs/infra-common-ssh-config-generate.yml b/ansible/configs/multi-cloud-capsule/infra_configs/infra-common-ssh-config-generate.yml
deleted file mode 100644
index 735c638a12a..00000000000
--- a/ansible/configs/multi-cloud-capsule/infra_configs/infra-common-ssh-config-generate.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-
-- name: Store hostname as a fact
- set_fact:
- ansible_ssh_config: "{{output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf"
- ansible_known_host: "{{output_dir}}/{{ env_type }}_{{ guid }}_ssh_known_hosts"
-
-- name: Store hostname as a fact
- set_fact:
- remote_user: ec2-user
- when: "cloud_provider == 'ec2'"
-
-- name: Store hostname as a fact
- set_fact:
- remote_user: cloud-user
- when: "cloud_provider == 'osp'"
-
-
-- name: delete local ssh config and know_host file. start fresh
- file:
- dest: "{{ item }}"
- state: absent
- loop:
- - "{{ansible_known_host}}"
- - "{{ ansible_ssh_config }}"
-
-- name: Create empty local ssh config
- file:
- dest: "{{ ansible_ssh_config }}"
- state: touch
- when: secondary_stack is not defined
-
-- name: Add proxy config to workdir ssh config file
- blockinfile:
- dest: "{{ ansible_ssh_config }}"
- marker: "##### {mark} ADDED PROXY HOST {{ item }} {{ env_type }}-{{ guid }} ######"
- content: |
- Host {{ item }} {{ hostvars[item].shortname |d('')}}
- Hostname {{ hostvars[item].public_ip_address }}
- IdentityFile {{ ssh_key | default(infra_ssh_key) | default(ansible_ssh_private_key_file) | default(default_key_name)}}
- IdentitiesOnly yes
- User {{ remote_user }}
- ControlMaster auto
- ControlPath /tmp/{{ guid }}-%r-%h-%p
- ControlPersist 5m
- StrictHostKeyChecking no
- ConnectTimeout 60
- ConnectionAttempts 10
- UserKnownHostsFile {{ansible_known_host}}
- loop: "{{ groups['capsules'] }} "
- tags:
- - proxy_config_main
-
-...
\ No newline at end of file
diff --git a/ansible/configs/multi-cloud-capsule/infra_configs/infra-osp-create-inventory.yml b/ansible/configs/multi-cloud-capsule/infra_configs/infra-osp-create-inventory.yml
deleted file mode 100644
index 192e80f77ea..00000000000
--- a/ansible/configs/multi-cloud-capsule/infra_configs/infra-osp-create-inventory.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-- set_fact:
- _name_selector: name
-
-- set_fact:
- stack_tag: "{{env_type | replace('-', '_')}}_{{guid}}"
- tags:
- - create_inventory
- - must
-
-- when: server.status != 'terminated'
- block:
- - name: Add hosts to inventory
- add_host:
- name: "{{ server | json_query(_name_selector) | default(server.name) }}"
- original_name: "{{ server.name }}"
- groups:
- #TODO: remove thos tag_*
- - "tag_Project_{{stack_tag}}"
- - "tag_{{ stack_tag }} | default('unknowns') }}"
- - "{{ server.metadata.ostype | default('unknowns') }}"
- ansible_user: "{{ ansible_user }}"
- remote_user: "{{ remote_user }}"
- # ansible_ssh_private_key_file: "{{item['key_name']}}"
- # key_name: "{{item['key_name']}}"
- state: "{{ server.status }}"
- instance_id: "{{ server.id }}"
- isolated: "{{ server.metadata.isolated | default(false) }}"
- # private_dns_name: "{{item['private_dns_name']}}"
- private_ip_address: "{{ server.private_v4 }}"
- public_ip_address: "{{ server.public_v4 | default('') }}"
- image_id: "{{ server.image.id | default('') }}"
- ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
- # bastion: "{{ local_bastion | default('') }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
- loop_control:
- label: "{{ server | json_query(_name_selector) | default(server.name) }}"
- loop_var: server
- tags:
- - create_inventory
- - must
-
- - add_host:
- name: "{{ server | json_query(_name_selector) | default(server.name) }}"
- groups: "{{ server.metadata.AnsibleGroup }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
- loop_control:
- label: "{{ server | json_query(_name_selector) | default(server.name) }}"
- loop_var: server
- when: server.metadata.AnsibleGroup | default('') != ''
- tags:
- - create_inventory
- - must
-
-
-- name: debug hostvars
- debug:
- var: hostvars
- verbosity: 2
-
-- name: debug groups
- debug:
- var: groups
- verbosity: 2
diff --git a/ansible/configs/multi-cloud-capsule/infra_configs/osp_infrastructure_deployment.yml b/ansible/configs/multi-cloud-capsule/infra_configs/osp_infrastructure_deployment.yml
deleted file mode 100644
index 5584fa9e4c6..00000000000
--- a/ansible/configs/multi-cloud-capsule/infra_configs/osp_infrastructure_deployment.yml
+++ /dev/null
@@ -1,109 +0,0 @@
----
-- name: Step 001.1 Deploy Infrastructure
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step001
- - step001.1
- - deploy_infrastructure
- environment:
- OS_AUTH_URL: "{{ osp_auth_url }}"
- OS_USERNAME: "{{ osp_auth_username }}"
- OS_PASSWORD: "{{ osp_auth_password }}"
- OS_PROJECT_NAME: "admin"
- OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
- OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- tasks:
- - name: Run infra-osp-project-create Role
- import_role:
- name: infra-osp-project-create
- tags:
- - infra-osp-project-create
-
- - name: Run infra-osp-template-generate Role
- import_role:
- name: infra-osp-template-generate
-
- - name: Run infra-osp-template-create Role
- import_role:
- name: infra-osp-template-create
-
-- name: Step 001.2 Create Inventory and SSH config setup
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step001
- - step001.2
- - create_inventory
- - create_ssh_config
- environment:
- OS_AUTH_URL: "{{ osp_auth_url }}"
- OS_USERNAME: "{{ osp_auth_username }}"
- OS_PASSWORD: "{{ osp_auth_password }}"
- OS_PROJECT_NAME: "{{ osp_project_name }}"
- OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
- OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- tasks:
- - name: Gather instance facts
- os_server_info:
- server: "*"
- filters:
- metadata:
- guid: "{{ guid }}"
- env_type: "{{ env_type }}"
- register: r_osp_facts
-
- - name: debug osp_facts
- debug:
- var: r_osp_facts
- verbosity: 2
-
- - name: Run infra-osp-dns Role
- import_role:
- name: infra-osp-dns
- vars:
- _dns_state: present
-
- - name: Run infra-osp-create-inventory Role
- import_tasks: ./infra-osp-create-inventory.yml
-
- - name: Run Common SSH Config Generator task file
- import_tasks: ./infra-common-ssh-config-generate.yml
-
-
-# include global vars again, this time for all hosts now that the inventory is built
-- import_playbook: ../../../include_vars.yml
- tags:
- - create_inventory
- - must
-
-- name: Step 001.3 Configure Linux Hosts and Wait for Connection
- hosts:
- - all:!windows:!network
- gather_facts: false
- any_errors_fatal: true
- ignore_errors: false
- tags:
- - step001
- - step001.3
- - wait_ssh
- tasks:
- - name: set facts for remote access
- tags:
- - create_inventory
- set_fact:
- # set python interpreter: Useful when the distrib running ansible has a different path
- # ex: when running using the alpine image
- #ansible_python_interpreter: env python
- ansible_ssh_common_args: >-
- {{ ansible_ssh_extra_args|d() }}
- -F {{ output_dir }}/{{ env_type }}_{{ guid }}_ssh_conf
- -o ControlPath=/tmp/{{ guid }}-%r-%h-%p
-
- - name: Run infra-generic-wait_for_linux_hosts Role
- import_role:
- name: infra-generic-wait_for_linux_hosts
diff --git a/ansible/configs/multi-cloud-capsule/post_infra.yml b/ansible/configs/multi-cloud-capsule/post_infra.yml
deleted file mode 100644
index 65f049992ce..00000000000
--- a/ansible/configs/multi-cloud-capsule/post_infra.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-- name: Step 002 Post Infrastructure
- hosts: localhost
- connection: local
- become: false
- gather_facts: false
- tags:
- - step002
- - post_infrastructure
- tasks:
- - name: Job Template to launch a Job Template with update on launch inventory set
- uri:
- url: "https://{{ ansible_tower_ip }}/api/v1/job_templates/{{ job_template_id }}/launch/"
- method: POST
- user: "{{tower_admin}}"
- password: "{{tower_admin_password}}"
- body:
- extra_vars:
- guid: "{{guid}}"
- ipa_host_password: "{{ipa_host_password}}"
-
- body_format: json
- validate_certs: False
- HEADER_Content-Type: "application/json"
- status_code: 200, 201
- when: tower_run == 'true'
diff --git a/ansible/configs/multi-cloud-capsule/post_software.yml b/ansible/configs/multi-cloud-capsule/post_software.yml
deleted file mode 100644
index bdf01011a5a..00000000000
--- a/ansible/configs/multi-cloud-capsule/post_software.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-- name: Step 00xxxxx post software
- hosts: support
- gather_facts: False
- become: yes
- tasks:
- - debug:
- msg: "Post-Software tasks Started"
-
-
-# - name: Step lab post software deployment
-# hosts: bastions
-# gather_facts: False
-# become: yes
-# tags:
-# - opentlc_bastion_tasks
-# tasks:
-# - import_role:
-# name: bastion-opentlc-ipa
-# when: install_ipa_client|bool
-
-
-
-- name: PostSoftware flight-check
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - post_flight_check
- tasks:
-
- - debug:
- msg: "Post-Software checks completed successfully"
-
-
-
diff --git a/ansible/configs/multi-cloud-capsule/pre_software.yml b/ansible/configs/multi-cloud-capsule/pre_software.yml
deleted file mode 100644
index ea018a01bdf..00000000000
--- a/ansible/configs/multi-cloud-capsule/pre_software.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-- name: Step 003 Pre Software
- hosts: localhost
- gather_facts: false
- become: false
- tasks:
- - debug:
- msg: "Step 003 Pre Software"
-
- - import_role:
- name: infra-local-create-ssh_key
- when: set_env_authorized_key | bool
-
-- name: Configure all hosts with Repositories
- hosts:
- - all:!windows
- become: true
- gather_facts: False
- tags:
- - step004
- - common_tasks
- roles:
- # - { role: "set-repositories", when: 'repo_method is defined' }
- - { role: "set_env_authorized_key", when: 'set_env_authorized_key' }
-
-
-# - name: Configuring Bastion Hosts
-# hosts: bastions
-# become: true
-# roles:
-# - { role: "common", when: 'install_common' }
-# - {role: "bastion", when: 'install_bastion' }
-# - { role: "bastion-opentlc-ipa", when: 'install_ipa_client' }
-
-# tags:
-# - step004
-# - bastion_tasks
-- name: PreSoftware flight-check
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - presoftware_flight_check
- tasks:
- - debug:
- msg: "Pre-Software checks completed successfully"
diff --git a/ansible/configs/multi-cloud-capsule/requirements.yml b/ansible/configs/multi-cloud-capsule/requirements.yml
deleted file mode 100644
index c25829929b9..00000000000
--- a/ansible/configs/multi-cloud-capsule/requirements.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-
-collections:
-- name: openstack.cloud
- version: 1.8.0
\ No newline at end of file
diff --git a/ansible/configs/multi-cloud-capsule/sample_vars_ec2.yml b/ansible/configs/multi-cloud-capsule/sample_vars_ec2.yml
deleted file mode 100644
index 2684e3aca6f..00000000000
--- a/ansible/configs/multi-cloud-capsule/sample_vars_ec2.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-
-env_type: multi-cloud-capsule
-output_dir: /tmp/workdir # Writable working scratch directory
-email: capsule_vm@example.com
-guid: capaws01
-cloud_provider: ec2
-aws_region: ap-southeast-2
-
-
-satellite_version: 6.4
-install_capsule: true
-configure_capsule: true
-
-
-satellite_public_fqdn: satellite1.cap01.example.opentlc.com
-capsule_activationkey: capsule_key
-capsule_org: gpte
-
-consumer_key: "cuBfSo9NhB338aSwvRC5VKgZt5Sqhez5"
-consumer_secret: "mpYncnDHkRq9XrHDoereQ3Hwejyyed6c"
-
-capsule_cert_path: /tmp/capsule-cert.tar
\ No newline at end of file
diff --git a/ansible/configs/multi-cloud-capsule/sample_vars_osp.yml b/ansible/configs/multi-cloud-capsule/sample_vars_osp.yml
deleted file mode 100644
index adebc442735..00000000000
--- a/ansible/configs/multi-cloud-capsule/sample_vars_osp.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-
-env_type: multi-cloud-capsule
-output_dir: /tmp/workdir # Writable working scratch directory
-email: capsule_vm@example.com
-cloud_provider: osp
-guid: caposp01
-osp_cluster_dns_zone: red.osp.opentlc.com
-
-###### satellite env related variables ###############
-satellite_version: 6.4
-satellite_public_fqdn: satellite1.cap01.example.opentlc.com
-
-capsule_activationkey: capsule_key
-capsule_org: gpte
-
-consumer_key: "cuBfSo9NhB338aSwvRC5VKgZt5Sqhez5"
-consumer_secret: "mpYncnDHkRq9XrHDoereQ3Hwejyyed6c"
-
-capsule_cert_path: /tmp/capsule-cert.tar
-
-install_capsule: true
-configure_capsule: true
\ No newline at end of file
diff --git a/ansible/configs/multi-cloud-capsule/software.yml b/ansible/configs/multi-cloud-capsule/software.yml
deleted file mode 100644
index 30f396e34af..00000000000
--- a/ansible/configs/multi-cloud-capsule/software.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Step 00xxxxx software
- hosts: localhost
- gather_facts: False
- become: false
- tasks:
- - debug:
- msg: "Software tasks Started"
-
-- name: Configuring capsule Hosts
- hosts: capsules
- become: True
- gather_facts: True
- roles:
- - { role: "satellite-public-hostname" }
- - { role: "satellite-capsule-installation", when: install_capsule }
- - { role: "satellite-capsule-configuration", when: configure_capsule }
-
-- name: Software flight-check
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - post_flight_check
- tasks:
- - debug:
- msg: "Software checks completed successfully"
diff --git a/ansible/configs/multi-cloud-capsule/start.yml b/ansible/configs/multi-cloud-capsule/start.yml
deleted file mode 100644
index e50def69fc6..00000000000
--- a/ansible/configs/multi-cloud-capsule/start.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- import_playbook: ../../include_vars.yml
-
-- name: Stop instances
- hosts: localhost
- gather_facts: false
- become: false
- environment:
- AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
- AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
- tasks:
- - debug:
- msg: "Step 002 Post Infrastructure"
-
- - name: Start instances
- ec2:
- instance_tags:
- "aws:cloudformation:stack-name": "{{ project_tag }}"
- state: running
- region: "{{ aws_region }}"
-
diff --git a/ansible/configs/multi-cloud-capsule/stop.yml b/ansible/configs/multi-cloud-capsule/stop.yml
deleted file mode 100644
index 00703a412d1..00000000000
--- a/ansible/configs/multi-cloud-capsule/stop.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- import_playbook: ../../include_vars.yml
-
-- name: Stop instances
- hosts: localhost
- gather_facts: false
- become: false
- environment:
- AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
- AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
- tasks:
- - debug:
- msg: "Step 002 Post Infrastructure"
-
- - name: Stop instances
- ec2:
- instance_tags:
- "aws:cloudformation:stack-name": "{{ project_tag }}"
- state: stopped
- region: "{{ aws_region }}"
-
diff --git a/ansible/configs/ocp-workshop/post_software.yml b/ansible/configs/ocp-workshop/post_software.yml
index 0542f01c2d4..b335e26baa5 100644
--- a/ansible/configs/ocp-workshop/post_software.yml
+++ b/ansible/configs/ocp-workshop/post_software.yml
@@ -676,7 +676,7 @@
register: ansible_agnostic_deployer_head
- name: Gather ec2 facts
- ec2_instance_facts:
+ ec2_instance_info:
aws_access_key: "{{ aws_access_key_id }}"
aws_secret_key: "{{ aws_secret_access_key }}"
region: "{{ aws_region_final | default(aws_region) }}"
diff --git a/ansible/configs/ocp4-cluster/destroy_env_azure.yml b/ansible/configs/ocp4-cluster/destroy_env_azure.yml
index db35118370b..02bb0e50807 100644
--- a/ansible/configs/ocp4-cluster/destroy_env_azure.yml
+++ b/ansible/configs/ocp4-cluster/destroy_env_azure.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
- name: Set up environment for destroy
hosts: localhost
connection: local
diff --git a/ansible/configs/ocp4-cluster/destroy_env_ec2.yml b/ansible/configs/ocp4-cluster/destroy_env_ec2.yml
index 3257dc18e3e..6c24b1d6011 100644
--- a/ansible/configs/ocp4-cluster/destroy_env_ec2.yml
+++ b/ansible/configs/ocp4-cluster/destroy_env_ec2.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
- name: Destroy environment on AWS
hosts: localhost
connection: local
diff --git a/ansible/configs/ocp4-cluster/destroy_env_osp.yml b/ansible/configs/ocp4-cluster/destroy_env_osp.yml
index 36827c56dcd..db62e83c417 100644
--- a/ansible/configs/ocp4-cluster/destroy_env_osp.yml
+++ b/ansible/configs/ocp4-cluster/destroy_env_osp.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
# Call Remove Workloads for workloads that need to clean up "other" infrastructure.
# Those removal playbooks need to be able to be run on the provisioning host (aka not a Bastion)
- name: Remove workloads
diff --git a/ansible/configs/ocp4-cluster/destroy_env_vmc.yml b/ansible/configs/ocp4-cluster/destroy_env_vmc.yml
index bddf2fdb635..fa9038438c2 100644
--- a/ansible/configs/ocp4-cluster/destroy_env_vmc.yml
+++ b/ansible/configs/ocp4-cluster/destroy_env_vmc.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
- name: Destroy environment on VMC
hosts: localhost
connection: local
diff --git a/ansible/configs/ocp4-cluster/files/requirements_osp.txt b/ansible/configs/ocp4-cluster/files/requirements_osp.txt
index a34b1becbc3..67cdb75951a 100644
--- a/ansible/configs/ocp4-cluster/files/requirements_osp.txt
+++ b/ansible/configs/ocp4-cluster/files/requirements_osp.txt
@@ -1,2 +1,9 @@
-openstacksdk==0.52.0
-python-openstackclient==5.4.0
+openstacksdk==1.4.0
+python-openstackclient==5.8.0
+python-heatclient==2.5.1
+python-cinderclient==8.3.0
+python-designateclient==4.5.1
+python-keystoneclient==4.5.0
+python-neutronclient==7.8.0
+python-novaclient==17.7.0
+python-swiftclient==4.4.0
diff --git a/ansible/configs/ocp4-cluster/lifecycle_hook_post_start.yml b/ansible/configs/ocp4-cluster/lifecycle_hook_post_start.yml
index 2f3b8a94481..ce1c6153d19 100644
--- a/ansible/configs/ocp4-cluster/lifecycle_hook_post_start.yml
+++ b/ansible/configs/ocp4-cluster/lifecycle_hook_post_start.yml
@@ -7,6 +7,18 @@
gather_facts: false
become: false
tasks:
+ - name: Set facts for ssh provision SSH key
+ when:
+ - ssh_provision_key_name is undefined
+ - cloud_provider == 'azure'
+ ansible.builtin.include_role:
+ name: create_ssh_provision_key
+
+ - name: Locate environment SSH key
+ when: cloud_provider == 'azure'
+ include_role:
+ name: locate_env_authorized_key
+
- when: cloud_provider == 'ec2'
name: Run infra-ec2-create-inventory Role
include_role:
diff --git a/ansible/configs/ocp4-cluster/requirements.yml b/ansible/configs/ocp4-cluster/requirements.yml
index 358110a3334..caacad39d75 100644
--- a/ansible/configs/ocp4-cluster/requirements.yml
+++ b/ansible/configs/ocp4-cluster/requirements.yml
@@ -21,4 +21,6 @@ collections:
- name: google.cloud
version: 1.0.2
- name: openstack.cloud
- version: 1.7.2
+ version: 2.1.0
+- name: community.okd
+ version: 2.3.0
diff --git a/ansible/configs/ocp4-equinix-aio/requirements.yml b/ansible/configs/ocp4-equinix-aio/requirements.yml
index 4da58b1f142..6a47c5c1642 100644
--- a/ansible/configs/ocp4-equinix-aio/requirements.yml
+++ b/ansible/configs/ocp4-equinix-aio/requirements.yml
@@ -1,72 +1,68 @@
---
roles:
- - src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_base_software.git
- scm: git
- name: ocp4_aio_base_software
- version: v0.0.10
+- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_base_software.git
+ scm: git
+ name: ocp4_aio_base_software
+ version: v0.0.10
- - name: ocp4_aio_base_virt
- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_base_virt.git
- scm: git
- version: v0.1.5
+- name: ocp4_aio_base_virt
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_base_virt.git
+ scm: git
+ version: v0.1.6
- - name: ocp4_aio_prepare_bastion
- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_prepare_bastion.git
- scm: git
- version: v0.0.4
+- name: ocp4_aio_prepare_bastion
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_prepare_bastion.git
+ scm: git
+ version: v0.0.5
- - name: ocp4_aio_role_acm
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_acm.git
- scm: git
- version: v0.0.1
+- name: ocp4_aio_role_acm
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_acm.git
+ scm: git
+ version: v0.0.1
- - name: ocp4_aio_role_acs
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_acs.git
- scm: git
- version: v0.0.1
+- name: ocp4_aio_role_acs
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_acs.git
+ scm: git
+ version: v0.0.1
- - name: ocp4_aio_role_cnv
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_cnv.git
- scm: git
- version: v0.0.1
+- name: ocp4_aio_role_cnv
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_cnv.git
+ scm: git
+ version: v0.0.1
- - name: ocp4_aio_role_imgreg
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_imgreg.git
- scm: git
- version: v0.0.3
+- name: ocp4_aio_role_imgreg
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_imgreg.git
+ scm: git
+ version: v0.0.3
- - name: ocp4_aio_role_nfsmount
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_nfsmount.git
- scm: git
- version: v0.0.3
+- name: ocp4_aio_role_nfsmount
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_nfsmount.git
+ scm: git
+ version: v0.0.3
- - name: ocp4_aio_role_ocs
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_ocs.git
- scm: git
- version: v0.0.8
+- name: ocp4_aio_role_ocs
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_ocs.git
+ scm: git
+ version: v0.0.8
- - name: ocp4_aio_deploy_bastion
- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_bastion.git
- scm: git
- version: v0.0.12
+- name: ocp4_aio_deploy_bastion
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_bastion.git
+ scm: git
+ version: v0.0.12
- - name: ocp4_aio_deploy_guac
- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_guacamole.git
- scm: git
- version: v0.0.1
+- name: ocp4_aio_deploy_guac
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_guacamole.git
+ scm: git
+ version: v0.0.1
- - name: ocp4_aio_deploy_ocp
- src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_ocp.git
- scm: git
- version: v0.0.7
+- name: ocp4_aio_deploy_ocp
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_infra_role_deploy_ocp.git
+ scm: git
+ version: v0.0.9
- - name: ocp4_aio_workload_cnvlab
- src: https://github.com/RHFieldProductManagement/ocp4_aio_role_deploy_cnvlab.git
- scm: git
- version: v0.0.16
+- name: ocp4_aio_workload_cnvlab
+ src: https://github.com/RHFieldProductManagement/ocp4_aio_role_deploy_cnvlab.git
+ scm: git
+ version: v0.0.16
-collections:
- - name: community.general
- - name: containers.podman
- - name: equinix.metal
- version: 1.4.1
+# Collections removed because everything is in multicloud-EE now
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/default_vars.yml b/ansible/configs/ocp4-on-rosa-with-rhods/default_vars.yml
index 179b30a9f8d..b8fceb276c9 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/default_vars.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/default_vars.yml
@@ -28,11 +28,6 @@ cloud_tags:
course_name: "{{ course_name | default( 'unknown' ) }}"
platform: "{{ platform | default( 'unknown' ) }}"
-set_env_authorized_key: true
-env_authorized_key: "{{guid}}key"
-key_name: "rosa_key"
-ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
-
bastion_user_name: rosa
bastion_user_enable_sudo: false
bastion_user_use_password: false
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/destroy_env.yml b/ansible/configs/ocp4-on-rosa-with-rhods/destroy_env.yml
index aa1598979c1..a8bdb5b457d 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/destroy_env.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/destroy_env.yml
@@ -9,27 +9,6 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Create infra key
- include_role:
- name: infra-ec2-ssh-key
- when:
- - install_infra_ssh_key | default(false) | bool
-
- - name: Get fact for cloudformation stack
- cloudformation_facts:
- stack_name: "{{ project_tag }}"
- register: stack_facts
-
- - name: Grab and set stack creation time
- when: project_tag in stack_facts.ansible_facts.cloudformation
- vars:
- _stack_description: "{{ stack_facts.ansible_facts.cloudformation[project_tag].stack_description }}"
- set_fact:
- stack_creation_time: >-
- {{ _stack_description.creation_time | default(_stack_description.CreationTime) }}
- stack_status: >-
- {{ _stack_description.stack_status | default(_stack_description.StackStatus) }}
-
- name: Run infra-ec2-create-inventory role
include_role:
name: infra-ec2-create-inventory
@@ -60,33 +39,7 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Get all EC2 instances
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: stopped
- register: r_stopped_instances
-
- - name: Ensure EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance:
- instance_ids: "{{ item.instance_id }}"
- state: started
- wait: false
- loop: "{{ r_stopped_instances.instances }}"
-
- - name: Wait until all EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: running
- register: r_running_instances
- until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
- delay: 10
- retries: 60
+ - include_tasks: ec2_instances_start.yaml
- name: Destroy ROSA
hosts: bastions
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/ec2_instances_start.yaml b/ansible/configs/ocp4-on-rosa-with-rhods/ec2_instances_start.yaml
new file mode 100644
index 00000000000..3969c2b0e5e
--- /dev/null
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/ec2_instances_start.yaml
@@ -0,0 +1,32 @@
+---
+- name: Get all EC2 instances
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: stopped
+ register: r_stopped_instances
+
+# Wk: Don't wait for instances to be running. Otherwise this is
+# a very sequential task. Just start the instances.
+# The next task will wait until all instances are running - but
+# this happens now in parallel instead of sequentially.
+- name: Ensure EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ item.instance_id }}"
+ state: started
+ wait: false
+ loop: "{{ r_stopped_instances.instances }}"
+
+- name: Wait until all EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: running
+ register: r_running_instances
+ until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
+ delay: 10
+ retries: 60
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/ocp4-on-rosa-with-rhods/files/cloud_providers/ec2_cloud_template.j2
index b7f704b987d..f7055cc370f 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/files/cloud_providers/ec2_cloud_template.j2
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/files/cloud_providers/ec2_cloud_template.j2
@@ -204,7 +204,7 @@ Resources:
- {{ instance.image | default(aws_default_image) }}
{% endif %}
InstanceType: "{{instance['flavor'][cloud_provider]}}"
- KeyName: "{{instance.key_name | default(key_name)}}"
+ KeyName: "{{instance.key_name | default(ssh_provision_key_name) | default(key_name)}}"
{% if instance['UserData'] is defined %}
{{instance['UserData']}}
{% endif %}
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/pre_infra_ec2.yml b/ansible/configs/ocp4-on-rosa-with-rhods/pre_infra_ec2.yml
index f2458e21ff9..126f0eb5929 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/pre_infra_ec2.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/pre_infra_ec2.yml
@@ -2,8 +2,9 @@
- name: Set rosa console password
set_fact:
rosa_console_password: >-
- {{ lookup('password', '/dev/null length=12') -}}
- {{- lookup('password', '/dev/null length=1 chars=digits') }}
+ {{ lookup('community.general.random_string',
+ length=12, min_lower=1, min_upper=1, special=false,
+ min_numeric=1) }}
- name: Get the current caller identity information
environment:
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/pre_software.yml b/ansible/configs/ocp4-on-rosa-with-rhods/pre_software.yml
index 348efcc5b0f..23585a5a2dc 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/pre_software.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/pre_software.yml
@@ -1,23 +1,4 @@
---
-- name: Step 003 - Pre Software
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step003
- - generate_env_keys
- tasks:
- - name: Generate SSH keys
- when: set_env_authorized_key | bool
- openssh_keypair:
- state: present
- path: "{{ output_dir }}/{{ env_authorized_key }}"
- comment: "{{ key_name }}"
- size: 4096
- type: rsa
- mode: 0400
-
# Cloudformation or Heat template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
hosts: all
@@ -29,8 +10,6 @@
roles:
- role: common
when: install_common | default( true ) | bool
- - role: set_env_authorized_key
- when: set_env_authorized_key | bool
tasks:
- name: Add GUID to /etc/skel/.bashrc
lineinfile:
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/requirements.yml b/ansible/configs/ocp4-on-rosa-with-rhods/requirements.yml
index e0f10c64c47..762c4fe0d0e 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/requirements.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/requirements.yml
@@ -8,3 +8,5 @@ collections:
version: 4.6.1
- name: ansible.posix
version: 1.3.0
+- name: community.okd
+ version: 2.3.0
diff --git a/ansible/configs/ocp4-on-rosa-with-rhods/workloads.yml b/ansible/configs/ocp4-on-rosa-with-rhods/workloads.yml
index d41d454b64b..ee2d6435192 100644
--- a/ansible/configs/ocp4-on-rosa-with-rhods/workloads.yml
+++ b/ansible/configs/ocp4-on-rosa-with-rhods/workloads.yml
@@ -17,11 +17,11 @@
rosa_api_server_url: "https://api{{ rosa_console_url.stdout | regex_search('(?<=\\.apps).*') }}:6443"
- name: Run authentication
- k8s_auth:
+ community.okd.openshift_auth:
+ validate_certs: false
host: "{{ rosa_api_server_url }}"
username: cluster-admin
password: "{{ rosa_admin_result.stdout }}"
- validate_certs: false
register: _r_kube_auth
retries: 30
delay: 120
@@ -41,121 +41,6 @@
src: templates/kubeconfig.j2
dest: ~/.kube/config
- - name: Remove restricted operations on ROSA clusters from validatingwebhookconfiguration.
- shell: |
- oc login --insecure-skip-tls-verify=true -u cluster-admin -p {{ rosa_admin_result.stdout }} {{ rosa_api_server_url }}
- oc delete validatingwebhookconfiguration sre-namespace-validation
-
- # Deploy Workloads
- - name: Deploy demo operator
- k8s:
- state: present
- definition: "{{ lookup('template', item ) | from_yaml }}"
- validate_certs: false
- loop:
- - templates/demo-operator-namespace.yaml
- - templates/demo-operator-catalog-source.yaml
- - templates/demo-operator-operator-group.yaml
- - templates/demo-operator-subscription.yaml
- register: r_operator_install
- retries: 240
- delay: 10
- until:
- - r_operator_install is defined
- - r_operator_install is not failed
-
- - name: Pause for 2 minutes for demo operator to install
- ansible.builtin.pause:
- minutes: 2
-
- - name: Deploy demo
- k8s:
- state: present
- definition: "{{ lookup('template', 'templates/demo-workshop-install.yaml.j2' ) | from_yaml }}"
- validate_certs: false
- register: r_demo
- retries: 240
- delay: 10
- until:
- - r_demo is defined
- - r_demo is not failed
- vars:
- scm_ref: "{{ vars['__meta__']['deployer']['scm_ref'] }}"
-
- - name: Check if demo has completed install
- k8s_info:
- api_version: demos.redhat.com/v1
- kind: Demo
- name: "{{ demo_instance_name }}"
- namespace: demo-provisioner-operator-system
- validate_certs: false
- register: result_demo_install
- retries: 480
- delay: 15
- until:
- - result_demo_install is defined
- - result_demo_install.resources is defined
- - result_demo_install.resources | length > 0
- - result_demo_install.resources[0].status is defined
- - result_demo_install.resources[0].status.phase is defined
- - result_demo_install.resources[0].status.phase != 'Running'
-
- - name: Check if demo failed installation
- ansible.builtin.fail:
- msg: The demo did not provision successfully. Please view the logs on the demo pod.
- when: result_demo_install.resources[0].status.phase == 'Failed'
-
- - name: Get user data and info
- shell: |
- oc login --insecure-skip-tls-verify=true -u cluster-admin -p {{ rosa_admin_result.stdout }} {{ rosa_api_server_url }}
- POD=$(oc get pod -l app=demo-provisioner -n demo-provisioner-operator-system | grep demo- | awk '{print $1}')
- oc rsync $POD:/tmp/user-info.yaml /tmp -c demo-playbook -n demo-provisioner-operator-system > /dev/null 2>&1
- oc rsync $POD:/tmp/user-data.yaml /tmp -c demo-playbook -n demo-provisioner-operator-system > /dev/null 2>&1
-
- - name: Upload AgnosticD user info
- block:
- - name: Fetch user data
- ansible.builtin.fetch:
- src: /tmp/user-data.yaml
- dest: /tmp/
- flat: yes
- ignore_errors: true
-
- - name: Get user-data
- include_vars:
- file: "/tmp/user-data.yaml"
- name: _userdata
- ignore_errors: true
-
- - name: Fetch user info
- ansible.builtin.fetch:
- src: /tmp/user-info.yaml
- dest: /tmp/
- flat: yes
- ignore_errors: true
-
- - name: Get user-info
- set_fact:
- _userinfo: "{{ lookup('file', '/tmp/user-info.yaml') }}"
- ignore_errors: true
-
- - name: Create upload task from template
- ansible.builtin.template:
- src: templates/agnosticd_user_info_upload.yaml.j2
- dest: /tmp/agnosticd_user_info_upload.yaml
- ignore_errors: true
-
- - name: Fetch upload task
- ansible.builtin.fetch:
- src: /tmp/agnosticd_user_info_upload.yaml
- dest: /tmp/
- flat: yes
- ignore_errors: true
-
- - name: Run upload task
- include_tasks: /tmp/agnosticd_user_info_upload.yaml
- ignore_errors: true
-
- name: Install ocp-student-workloads
when:
- user_count | default(0) | int > 0
@@ -183,6 +68,44 @@
loop_control:
loop_var: workload_loop_var
+ - name: install ocp-infra-workloads
+ vars:
+ ACTION: "provision"
+ ocp_username: "system:admin"
+ # Variables defined for running infra workloads
+ output_dir: "/tmp"
+ num_users: "{{ num_users }}"
+ ocp4_workload_authentication_rosa_admin_user: admin
+ ocp4_workload_authentication_rosa_admin_password: Openshift@1
+ ocp4_workload_generate_kubeconfig_openshift_username: cluster-admin
+ ocp4_workload_generate_kubeconfig_openshift_password: "{{ rosa_admin_result.stdout }}"
+ ocp4_workload_generate_kubeconfig_openshift_api_url: "{{ rosa_api_server_url }}"
+ guid: "{{ guid | default(omit) }}"
+ ocp4_workload_authentication_rosa_aws_access_key_id: "{{ aws_access_key_id }}"
+ ocp4_workload_authentication_rosa_aws_region: "{{ aws_region }}"
+ ocp4_workload_authentication_rosa_aws_secret_access_key: "{{ aws_secret_access_key }}"
+ ocp4_workload_authentication_rosa_token: "{{ gpte_rosa_token | default(omit) }}"
+ ansible.builtin.include_role:
+ name: "{{ workload_loop_var }}"
+ loop: "{{ infra_workloads }}"
+ loop_control:
+ loop_var: workload_loop_var
+
+ - name: Check validatingwebhooconfiguration sre-namespace-validation exists.
+ k8s_info:
+ api_version: admissionregistration.k8s.io/v1
+ kind: ValidatingWebhookConfiguration
+ register: r_failed_validation
+ until: "{{ r_failed_validation.resources | json_query('[?metadata.name == `sre-namespace-validation`]') }}"
+ retries: 60
+ delay: 10
+
+ - name: Remove restricted operations on ROSA clusters from validatingwebhookconfiguration.
+ shell: |
+ oc login --insecure-skip-tls-verify=true -u cluster-admin -p {{ rosa_admin_result.stdout }} {{ rosa_api_server_url }}
+ sleep 10
+ oc delete validatingwebhookconfiguration sre-namespace-validation
+
- name: Update project template
k8s:
state: present
@@ -191,12 +114,3 @@
register: r_project_template
retries: 2
delay: 5
-
- - name: Remmove htpasswd identity provider
- shell: |
- oc delete secret htpasswd-secret -n openshift-config
- oc patch OAuth cluster --type json --patch '[{ "op": "remove", "path": "/spec/identityProviders/1" }]'
- oc delete deployment oauth-openshift -n openshift-authentication
- oc delete user cluster-admin
- rosa delete admin -c rosa-{{ guid }} -y
- ignore_errors: true
diff --git a/ansible/configs/ocp4-workshop/lifecycle.yml b/ansible/configs/ocp4-workshop/lifecycle.yml
index ba522dc22a7..d5c6319322f 100644
--- a/ansible/configs/ocp4-workshop/lifecycle.yml
+++ b/ansible/configs/ocp4-workshop/lifecycle.yml
@@ -114,7 +114,7 @@
seconds: "{{ lifecycle_start_pause | default(180) }}"
- name: Get CSRs that need to be approved
- k8s_facts:
+ k8s_info:
api_version: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
# Field selectors don't seem to work
@@ -134,7 +134,7 @@
seconds: 10
- name: Get additional CSRs that need to be approved
- k8s_facts:
+ k8s_info:
api_version: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
# Field selectors don't seem to work
diff --git a/ansible/configs/ocs4-external-implementation/lifecycle_hook_post_start.yml b/ansible/configs/ocs4-external-implementation/lifecycle_hook_post_start.yml
index 03b051f5708..35837943979 100644
--- a/ansible/configs/ocs4-external-implementation/lifecycle_hook_post_start.yml
+++ b/ansible/configs/ocs4-external-implementation/lifecycle_hook_post_start.yml
@@ -49,7 +49,7 @@
seconds: "{{ lifecycle_start_pause | default(180) }}"
- name: Get CSRs that need to be approved
- k8s_facts:
+ k8s_info:
api_version: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
# Field selectors don't seem to work
@@ -67,7 +67,7 @@
seconds: 10
- name: Get additional CSRs that need to be approved
- k8s_facts:
+ k8s_info:
api_version: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
# Field selectors don't seem to work
diff --git a/ansible/configs/ocs4-external-implementation/post_software.yml b/ansible/configs/ocs4-external-implementation/post_software.yml
index 668725125fa..187b4809729 100644
--- a/ansible/configs/ocs4-external-implementation/post_software.yml
+++ b/ansible/configs/ocs4-external-implementation/post_software.yml
@@ -89,7 +89,7 @@
when: test_deploy_runs is defined
block:
- name: Check on status of job
- k8s_facts:
+ k8s_info:
api_version: batch/v1
kind: Job
name: fio-test
diff --git a/ansible/configs/open-environment-azure/post_software.yml b/ansible/configs/open-environment-azure/post_software.yml
index 742bfcf4224..0dd83e5d1b2 100644
--- a/ansible/configs/open-environment-azure/post_software.yml
+++ b/ansible/configs/open-environment-azure/post_software.yml
@@ -127,6 +127,7 @@
generated_password: "{{ generated_password }}"
bastion_ssh_command: "ssh {{ remote_user }}@bastion.{{ guid }}.{{ cluster_dns_zone }}"
bastion_password: "{{ generated_password }}"
+ preconfigure_aad: "{{ preconfigure_aad }}"
- name: Bookbag
hosts: localhost
diff --git a/ansible/configs/osp-migration/destroy_env.yml b/ansible/configs/osp-migration/destroy_env.yml
index 606779ed25f..48b97de22f6 100644
--- a/ansible/configs/osp-migration/destroy_env.yml
+++ b/ansible/configs/osp-migration/destroy_env.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
- name: Teardown OpenStack project and resources
hosts: localhost
connection: local
@@ -21,50 +19,69 @@
}}-{{ guid }}
- name: Check if project exists
- environment:
- OS_AUTH_URL: "{{ osp_auth_url }}"
- OS_USERNAME: "{{ osp_auth_username }}"
- OS_PASSWORD: "{{ osp_auth_password }}"
- OS_PROJECT_NAME: "admin"
- OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
- OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_project_info:
+ openstack.cloud.project_info:
name: "{{ osp_project_name }}"
- register: project_exists
+ register: project_facts
- meta: end_play
- when: project_exists.openstack_projects | length == 0
+ when: project_facts.projects | length == 0
- name: Gather instance facts
environment:
OS_PROJECT_NAME: "{{ osp_project_name }}"
- os_server_info:
+ openstack.cloud.server_info:
+ all_projects: false
server: "*"
filters:
metadata:
guid: "{{ guid }}"
- register: r_osp_facts
+ register: r_osp_server_facts
+
+ - name: Iterate over all instances and delete DNS entries
+ loop: "{{ r_osp_server_facts.servers }}"
+ loop_control:
+ loop_var: _instance
+ vars:
+ _infra_osp_dns_default_ttl: 300
+ _dns_state: absent
+ include_tasks: instance_loop.yml
+
+ - name: Delete all servers inside the project
+ when: r_osp_server_facts.servers | length > 0
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ openstack.cloud.server:
+ name: "{{ item.id }}"
+ state: absent
+ wait: true
+ loop: "{{ r_osp_server_facts.servers }}"
- - name: Delete objects inside the project
+ - name: Gather volume facts
environment:
OS_PROJECT_NAME: "{{ osp_project_name }}"
- os_stack:
+ ansible.builtin.command:
+ openstack volume list --project {{ osp_project_name }} -f json
+ register: r_osp_volume_facts
+
+ - name: Detach all volumes
+ when:
+ - r_osp_volume_facts.stdout|from_json | length > 0
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ command:
+ openstack volume set --state available {{ item.ID }} --detached
+ loop: "{{ r_osp_volume_facts.stdout|from_json }}"
+
+ - name: Delete remaining objects inside the project
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ openstack.cloud.stack:
name: "create-objects-{{ osp_project_name }}"
state: absent
wait: true
- name: Delete project and unassign permission
- os_stack:
+ openstack.cloud.stack:
name: "create-project-{{ osp_project_name }}"
state: absent
wait: true
-
- - name: Iterate over all instances and delete DNS entries
- loop: "{{ r_osp_facts.openstack_servers }}"
- loop_control:
- loop_var: _instance
- vars:
- _infra_osp_dns_default_ttl: 300
- _dns_state: absent
-
- include_tasks: instance_loop.yml
diff --git a/ansible/configs/osp-migration/dns_loop.yml b/ansible/configs/osp-migration/dns_loop.yml
index 8dbee93ecf4..69ff53b41eb 100644
--- a/ansible/configs/osp-migration/dns_loop.yml
+++ b/ansible/configs/osp-migration/dns_loop.yml
@@ -4,26 +4,25 @@
- debug:
msg: >-
The floating IP for {{ _dns }}
- is {{ _instance.public_v4 }}
+ is {{ _instance.access_ipv4 }}
- name: DNS entry ({{ _dns_state | default('present') }})
nsupdate:
server: >-
{{ osp_cluster_dns_server
| ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone }}"
- #zone: rhpds.opentlc.com
record: "{{ _dns }}"
type: A
ttl: "{{ _infra_osp_dns_default_ttl }}"
- value: "{{ _instance.public_v4 }}"
+ value: "{{ _instance.access_ipv4 }}"
key_name: "{{ ddns_key_name }}"
key_algorithm: "{{ ddns_key_algorithm | d('hmac-md5') }}"
key_secret: "{{ ddns_key_secret }}"
-# When state == absent, don't use r_osp_facts (should not be needed)
+# When state == absent, don't use r_osp_server_facts (should not be needed)
- when: _dns_state == 'absent'
block:
- name: DNS entry ({{ _dns_state | default('present') }})
@@ -31,10 +30,9 @@
server: >-
{{ osp_cluster_dns_server
| ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone }}"
- #zone: rhpds.opentlc.com
record: "{{ _dns }}"
type: A
ttl: "{{ _infra_osp_dns_default_ttl }}"
diff --git a/ansible/configs/osp-migration/infra.yml b/ansible/configs/osp-migration/infra.yml
index 4158c67d2c0..59ed55324b4 100644
--- a/ansible/configs/osp-migration/infra.yml
+++ b/ansible/configs/osp-migration/infra.yml
@@ -1,6 +1,6 @@
---
- hosts: localhost
- gather_facts: false
+ gather_facts: true
vars:
api_user: "{{ guid }}"
default_metadata:
@@ -22,13 +22,13 @@
OS_PROJECT_NAME: "admin"
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_project_info:
+ openstack.cloud.project_info:
name: "{{ osp_project_name }}"
register: project_exists
- fail:
msg: Project exists, can't continue
- when: project_exists.openstack_projects
+ when: project_exists.projects
- name: Create project and assign permission
register: stack_admin_output
@@ -39,7 +39,7 @@
OS_PROJECT_NAME: "admin"
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_stack:
+ openstack.cloud.stack:
name: "create-project-{{osp_project_name}}"
template: "{{ output_dir }}/imported-templates/heat-templates/{{ project }}/stack_admin.yaml"
timeout: "{{ stack_create_timeout |d('3600') }}"
@@ -68,7 +68,7 @@
OS_PROJECT_NAME: "admin"
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_user_role:
+ openstack.cloud.role_assignment:
state: present
user: "{{ osp_auth_username }}"
role: "admin"
@@ -112,7 +112,7 @@
OS_PROJECT_NAME: "{{ osp_project_name }}"
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_stack:
+ openstack.cloud.stack:
name: "create-objects-{{osp_project_name}}"
template: "{{ output_dir }}/imported-templates/heat-templates/{{ project }}/stack_user.yaml"
timeout: "{{ stack_create_timeout |d('3600') }}"
@@ -202,12 +202,12 @@
OS_PROJECT_NAME: "{{ osp_project_name }}"
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_server_info:
+ openstack.cloud.server_info:
server: "*"
filters:
metadata:
guid: "{{ guid }}"
- register: r_osp_facts
+ register: r_osp_server_facts
- set_fact:
@@ -225,7 +225,7 @@
- set_fact:
cloud_metadata: "{{ cloud_tags_final |combine(default_metadata) }}"
- - loop: "{{ r_osp_facts.openstack_servers }}"
+ - loop: "{{ r_osp_server_facts.servers }}"
loop_control:
loop_var: _server
@@ -237,20 +237,20 @@
OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_server_metadata:
+ openstack.cloud.server_metadata:
name: "{{ _server.name }}"
meta:
"{{ cloud_metadata }}"
- name: debug osp_facts
debug:
- var: r_osp_facts
+ var: r_osp_server_facts
- name: Iterate over all instances and create DNS entries
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{ r_osp_server_facts.servers }}"
loop_control:
loop_var: _instance
- when: _instance.public_v4 | default('') != ''
+ when: _instance.access_ipv4 | default('') != ''
vars:
_infra_osp_dns_default_ttl: 300
_dns_state: present
diff --git a/ansible/configs/osp-migration/pre_infra.yml b/ansible/configs/osp-migration/pre_infra.yml
index aa0771574ef..60c13f96142 100644
--- a/ansible/configs/osp-migration/pre_infra.yml
+++ b/ansible/configs/osp-migration/pre_infra.yml
@@ -1,23 +1,51 @@
---
-- name: Step 000 Pre Infrastructure
+- name: Build inventory
hosts: localhost
connection: local
become: false
+ gather_facts: false
tags:
- - step001
- - pre_infrastructure
- - osp_migration
+ - step002
tasks:
- - name: Create migration host group
- add_host:
- name: "{{ import_host }}"
- ansible_become: true
- ansible_ssh_private_key_file: "{{ migration_key_path | default(omit) }}"
- ansible_user: "opentlc-mgr"
- bastion: "{{ import_host }}"
- group: "migration"
- output_dir: "{{ output_dir }}"
- remote_user: "opentlc-mgr"
+
+ - when: target_host is mapping
+ block:
+ - when:
+ - '"ansible_ssh_private_key_content" in target_host'
+ - '"ansible_ssh_private_key_file" in target_host'
+ fail:
+ msg: You cannot set both ansible_ssh_private_key_content and ansible_ssh_private_key_file
+
+ - when: '"ansible_ssh_private_key_content" in target_host'
+ block:
+ - name: Prepare ssh_key from provided content
+ copy:
+ content: "{{ target_host.ansible_ssh_private_key_content }}"
+ dest: "{{ output_dir }}/ssh_key.pem"
+ mode: 0600
+
+ - set_fact:
+ target_host_ansible_ssh_private_key_file: "{{ output_dir }}/ssh_key.pem"
+
+ - name: Add migration host to inventory
+ add_host:
+ name: >-
+ {{
+ target_host.name
+ | default(target_host.hostname)
+ | default(target_host.ansible_host)
+ }}
+ ansible_host: "{{ target_host.ansible_host | default(omit) }}"
+ group: migration
+ ansible_user: "{{ target_host.ansible_user | default(omit) }}"
+ ansible_port: "{{ target_host.ansible_port | default(omit) }}"
+ ansible_ssh_private_key_file: >-
+ {{ target_host.ansible_ssh_private_key_file
+ | default(target_host_ansible_ssh_private_key_file)
+ | default(omit) }}
+ ansible_ssh_extra_args: "{{ target_host.ansible_ssh_extra_args | default(omit) }}"
+ ansible_ssh_pipelining: true
+ ansible_python_interpreter: /root/virtualenvs/python3.8-migration/bin/python
- name: Step 001 Migrating blueprints
hosts: migration
@@ -25,30 +53,30 @@
remote_user: opentlc-mgr
gather_facts: true
tags:
- - step001
- - pre_infrastructure
- - osp_migration
+ - step001
+ - pre_infrastructure
+ - osp_migration
tasks:
- - name: Download images from project
- become: true
- environment:
- OS_AUTH_URL: "{{ osp_auth_url }}"
- OS_USERNAME: "{{ osp_auth_username }}"
- OS_PASSWORD: "{{ osp_auth_password }}"
- OS_PROJECT_NAME: "admin"
- OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
- OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- OS_INTERFACE: "{{ osp_interface | default('internal') }}"
- PATH: "/root/.local/bin:{{ ansible_env.PATH }}"
- CEPH_CONF: "/etc/ceph/{{ ceph_cluster | default('red') }}.conf"
- convert_blueprint:
- ibm_endpoint: "{{ ibm_endpoint }}"
- ibm_auth_endpoint: "{{ ibm_auth_endpoint }}"
- ibm_api_key: "{{ ibm_api_key }}"
- ibm_resource_id: "{{ ibm_resource_id }}"
- bucket: "{{ ibm_bucket_name }}"
- project: "{{ project }}"
- output_dir: "{{ output_dir }}"
- mode: "download"
- glance_pool: "{{ ceph_cluster | default('red') }}-images"
- overwrite: "{{ overwrite_image | default('false') }}"
+ - name: Download images from project
+ become: true
+ environment:
+ OS_AUTH_URL: "{{ osp_auth_url }}"
+ OS_USERNAME: "{{ osp_auth_username }}"
+ OS_PASSWORD: "{{ osp_auth_password }}"
+ OS_PROJECT_NAME: "admin"
+ OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
+ OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
+ OS_INTERFACE: "{{ osp_interface | default('internal') }}"
+ PATH: "/root/.local/bin:{{ ansible_env.PATH }}"
+ CEPH_CONF: "/etc/ceph/{{ ceph_cluster | default('red') }}.conf"
+ convert_blueprint:
+ ibm_endpoint: "{{ ibm_endpoint }}"
+ ibm_auth_endpoint: "{{ ibm_auth_endpoint }}"
+ ibm_api_key: "{{ ibm_api_key }}"
+ ibm_resource_id: "{{ ibm_resource_id }}"
+ bucket: "{{ ibm_bucket_name }}"
+ project: "{{ project }}"
+ output_dir: "{{ output_dir }}"
+ mode: "download"
+ glance_pool: "{{ ceph_cluster | default('red') }}-images"
+ overwrite: "{{ overwrite_image | default('false') }}"
diff --git a/ansible/configs/osp-migration/requirements.yml b/ansible/configs/osp-migration/requirements.yml
index c7e18ddd82c..a799eb02964 100644
--- a/ansible/configs/osp-migration/requirements.yml
+++ b/ansible/configs/osp-migration/requirements.yml
@@ -1,4 +1,4 @@
---
collections:
- name: openstack.cloud
- version: 1.8.0
+ version: 2.1.0
diff --git a/ansible/configs/osp-migration/sample_vars.yml b/ansible/configs/osp-migration/sample_vars.yml
deleted file mode 100644
index 5f7593c5d5c..00000000000
--- a/ansible/configs/osp-migration/sample_vars.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-guid: gucore3
-env_type: osp-migration
-
-project: dev-ansible-tower-implementation-3.3-v6
-#project: EMEA-PC-azure-gold-image-bp
-
-cloud_provider: osp
-
-admin_user: gucore
-
-student_name: guillaume
-
-
-external_network: ee9d9e11-9f4b-4b78-8802-3d3e670ca0f0
-
-osp_cluster_dns_zone: red.osp.opentlc.com
-osp_cluster_dns_server: ddns01.opentlc.com
-
-heat_retries: 0
-
-repo_method: file
-
-common_install_basic_packages_retries: 0
-
-output_dir: /tmp/output_dir
diff --git a/ansible/configs/osp-sandbox/post_software.yml b/ansible/configs/osp-sandbox/post_software.yml
index 58d17a22f3e..55dd2c96a26 100644
--- a/ansible/configs/osp-sandbox/post_software.yml
+++ b/ansible/configs/osp-sandbox/post_software.yml
@@ -9,29 +9,42 @@
- name: Report user info
agnosticd_user_info:
- msg: "{{ item }}"
- loop:
- - "You can access your bastion via SSH:"
- - "ssh {{ student_name }}@bastion.{{ guid }}.{{ osp_cluster_dns_zone }}"
- - ""
- - "Make sure you use the username '{{ student_name }}' and the password '{{ hostvars.bastion.student_password }}' when prompted."
- - ""
- - "Your base domain is '{{ student_dns_zone | default(osp_cluster_dns_zone) }}'"
- - ""
- - "For reference, the data you need to create your clouds.yaml file is:"
- - ""
- - "clouds:"
- - " {{ osp_project_name }}:"
- - " auth:"
- - " auth_url: {{ osp_auth_url }}"
- - " username: {{ guid }}-user"
- - " project_name: {{ osp_project_name }}"
- - " project_id: {{ hostvars.localhost.osp_project_info[0].id }}"
- - " user_domain_name: Default"
- - " password: {{ hostvars.localhost.heat_user_password }}"
- - " region_name: regionOne"
- - " interface: public"
- - " identity_api_version: 3"
+ msg:
+ - "You can access your bastion via SSH:"
+ - "ssh {{ student_name }}@bastion.{{ guid }}.{{ osp_cluster_dns_zone }}"
+ - "Use password '{{ hostvars.bastion.student_password }}' when prompted."
+ - ""
+ - "You can access the Horizon dashboard at:"
+ - "https://{{ osp_auth_url | urlsplit('hostname') }}"
+ - ""
+ - "For reference, the data you need to create your clouds.yaml file is:"
+ - ""
+ - "
"
+ - "clouds:"
+ - " {{ osp_project_name }}:"
+ - " auth:"
+ - " auth_url: {{ osp_auth_url }}"
+ - " username: {{ guid }}-user"
+ - " project_name: {{ osp_project_name }}"
+ - " project_id: {{ hostvars.localhost.osp_project_info[0].id }}"
+ - " user_domain_name: Default"
+ - " password: {{ hostvars.localhost.heat_user_password }}"
+ - " region_name: regionOne"
+ - " interface: public"
+ - " identity_api_version: 3"
+ - "
"
+ - ""
+ - "Alternatively the environment variables that need to be set:"
+ - ""
+ - "export OS_AUTH_URL={{ osp_auth_url }}"
+ - "export OS_USERNAME={{ guid }}-user"
+ - "export OS_PASSWORD={{ hostvars.localhost.heat_user_password }}"
+ - "export OS_PROJECT_NAME={{ osp_project_name }}"
+ - "export OS_PROJECT_ID={{ hostvars.localhost.osp_project_info[0].id }}"
+ - "export OS_USER_DOMAIN_NAME=Default"
+ - "export OS_REGION_NAME=regionOne"
+ - ""
+ - "Your base domain is '{{ student_dns_zone | default(osp_cluster_dns_zone) }}'"
- debug:
msg: "Post-Software checks completed successfully"
diff --git a/ansible/configs/osp-satellite-vm/destroy_env.yml b/ansible/configs/osp-satellite-vm/destroy_env.yml
index 606779ed25f..48b97de22f6 100644
--- a/ansible/configs/osp-satellite-vm/destroy_env.yml
+++ b/ansible/configs/osp-satellite-vm/destroy_env.yml
@@ -1,6 +1,4 @@
---
-- import_playbook: ../../setup_runtime.yml
-
- name: Teardown OpenStack project and resources
hosts: localhost
connection: local
@@ -21,50 +19,69 @@
}}-{{ guid }}
- name: Check if project exists
- environment:
- OS_AUTH_URL: "{{ osp_auth_url }}"
- OS_USERNAME: "{{ osp_auth_username }}"
- OS_PASSWORD: "{{ osp_auth_password }}"
- OS_PROJECT_NAME: "admin"
- OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
- OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
- os_project_info:
+ openstack.cloud.project_info:
name: "{{ osp_project_name }}"
- register: project_exists
+ register: project_facts
- meta: end_play
- when: project_exists.openstack_projects | length == 0
+ when: project_facts.projects | length == 0
- name: Gather instance facts
environment:
OS_PROJECT_NAME: "{{ osp_project_name }}"
- os_server_info:
+ openstack.cloud.server_info:
+ all_projects: false
server: "*"
filters:
metadata:
guid: "{{ guid }}"
- register: r_osp_facts
+ register: r_osp_server_facts
+
+ - name: Iterate over all instances and delete DNS entries
+ loop: "{{ r_osp_server_facts.servers }}"
+ loop_control:
+ loop_var: _instance
+ vars:
+ _infra_osp_dns_default_ttl: 300
+ _dns_state: absent
+ include_tasks: instance_loop.yml
+
+ - name: Delete all servers inside the project
+ when: r_osp_server_facts.servers | length > 0
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ openstack.cloud.server:
+ name: "{{ item.id }}"
+ state: absent
+ wait: true
+ loop: "{{ r_osp_server_facts.servers }}"
- - name: Delete objects inside the project
+ - name: Gather volume facts
environment:
OS_PROJECT_NAME: "{{ osp_project_name }}"
- os_stack:
+ ansible.builtin.command:
+ openstack volume list --project {{ osp_project_name }} -f json
+ register: r_osp_volume_facts
+
+ - name: Detach all volumes
+ when:
+ - r_osp_volume_facts.stdout|from_json | length > 0
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ command:
+ openstack volume set --state available {{ item.ID }} --detached
+ loop: "{{ r_osp_volume_facts.stdout|from_json }}"
+
+ - name: Delete remaining objects inside the project
+ environment:
+ OS_PROJECT_NAME: "{{ osp_project_name }}"
+ openstack.cloud.stack:
name: "create-objects-{{ osp_project_name }}"
state: absent
wait: true
- name: Delete project and unassign permission
- os_stack:
+ openstack.cloud.stack:
name: "create-project-{{ osp_project_name }}"
state: absent
wait: true
-
- - name: Iterate over all instances and delete DNS entries
- loop: "{{ r_osp_facts.openstack_servers }}"
- loop_control:
- loop_var: _instance
- vars:
- _infra_osp_dns_default_ttl: 300
- _dns_state: absent
-
- include_tasks: instance_loop.yml
diff --git a/ansible/configs/osp-satellite-vm/infra.yml b/ansible/configs/osp-satellite-vm/infra.yml
index 342dce62560..b887c047cbd 100644
--- a/ansible/configs/osp-satellite-vm/infra.yml
+++ b/ansible/configs/osp-satellite-vm/infra.yml
@@ -147,9 +147,9 @@
filters:
metadata:
guid: "{{ guid }}"
- register: r_osp_facts
+ register: r_osp_server_facts
- - loop: "{{ r_osp_facts.openstack_servers }}"
+ - loop: "{{ r_osp_server_facts.openstack_servers }}"
loop_control:
loop_var: _server
@@ -168,10 +168,10 @@
- name: debug osp_facts
debug:
- var: r_osp_facts
+ var: r_osp_server_facts
- name: Iterate over all instances and create DNS entries
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{ r_osp_server_facts.openstack_servers }}"
loop_control:
loop_var: _instance
when: _instance.public_v4 | default('') != ''
diff --git a/ansible/configs/osp-satellite-vm/requirements.yml b/ansible/configs/osp-satellite-vm/requirements.yml
index c7e18ddd82c..783fcc47b0f 100644
--- a/ansible/configs/osp-satellite-vm/requirements.yml
+++ b/ansible/configs/osp-satellite-vm/requirements.yml
@@ -1,4 +1,4 @@
---
collections:
- name: openstack.cloud
- version: 1.8.0
+ version: 1.10.0
diff --git a/ansible/configs/osp-satellite-vm/sample_vars.yml b/ansible/configs/osp-satellite-vm/sample_vars.yml
deleted file mode 100644
index a5d29f1d766..00000000000
--- a/ansible/configs/osp-satellite-vm/sample_vars.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-guid: gucore3
-env_type: osp-migration
-
-project: dev-ansible-tower-implementation-3.3-v6
-#project: EMEA-PC-azure-gold-image-bp
-
-cloud_provider: osp
-
-admin_user: gucore
-
-student_name: guillaume
-
-
-external_network: ee9d9e11-9f4b-4b78-8802-3d3e670ca0f0
-
-osp_cluster_dns_zone: red.osp.opentlc.com
-osp_cluster_dns_server: ddns01.opentlc.com
-
-heat_retries: 0
-
-repo_method: file
-
-common_install_basic_packages_retries: 0
-
-output_dir: /tmp/output_dir
-
-satellite_hosts_register_key: rhel-8_ak
-satellite_org_label: Red_Hat
-satellite_admin: admin
-satellite_admin_password: admin
diff --git a/ansible/configs/osp-stf/post_software.yml b/ansible/configs/osp-stf/post_software.yml
index b3b27805356..ece378b9888 100644
--- a/ansible/configs/osp-stf/post_software.yml
+++ b/ansible/configs/osp-stf/post_software.yml
@@ -141,7 +141,7 @@
gather_facts: false
vars:
crc_find_ip_query: ansible_facts.openstack_servers[?name=='stfcrc'].public_v4 | [0]
- crc_public_ip: "{{ r_osp_facts | json_query(crc_find_ip_query) }}"
+ crc_public_ip: "{{ r_osp_server_facts | json_query(crc_find_ip_query) }}"
tasks:
- name: Print labconsole information as user.info
agnosticd_user_info:
diff --git a/ansible/configs/osp17-director-deployment/files/cloud_providers/osp_cloud_template_master.j2 b/ansible/configs/osp17-director-deployment/files/cloud_providers/osp_cloud_template_master.j2
index 09d58e87d7b..634a8e7a58c 100644
--- a/ansible/configs/osp17-director-deployment/files/cloud_providers/osp_cloud_template_master.j2
+++ b/ansible/configs/osp17-director-deployment/files/cloud_providers/osp_cloud_template_master.j2
@@ -161,6 +161,8 @@ resources:
fip_association_{{ iname }}:
type: OS::Neutron::FloatingIPAssociation
+ depends_on:
+ - {{ network }}-router_private_interface
properties:
floatingip_id: {get_resource: fip_{{ network }}_{{ iname }}}
port_id: {get_resource: port_{{ network }}_{{ iname }}}
diff --git a/ansible/configs/osp17-director-deployment/pre_infra.yml b/ansible/configs/osp17-director-deployment/pre_infra.yml
index 489867de703..1efda1872a0 100644
--- a/ansible/configs/osp17-director-deployment/pre_infra.yml
+++ b/ansible/configs/osp17-director-deployment/pre_infra.yml
@@ -3,11 +3,52 @@
hosts: localhost
connection: local
become: false
- gather_facts: false
tags:
- - step001
- - pre_infrastructure
+ - step001
+ - pre_infrastructure
+ - osp_migration
tasks:
- - name: Pre-Infra
- debug:
- msg: "Pre-Infra work is done"
+ - name: Create migration host group
+ add_host:
+ name: "{{ import_host }}"
+ ansible_become: true
+ ansible_ssh_private_key_file: "{{ migration_key_path | default(omit) }}"
+ ansible_user: "opentlc-mgr"
+ bastion: "{{ import_host }}"
+ group: "migration"
+ output_dir: "{{ output_dir }}"
+ remote_user: "opentlc-mgr"
+
+- name: Step 001 Migrating blueprints
+ hosts: migration
+ become: true
+ remote_user: opentlc-mgr
+ gather_facts: true
+ tags:
+ - step001
+ - pre_infrastructure
+ - osp_migration
+ tasks:
+ - name: Download images from project
+ become: true
+ environment:
+ OS_AUTH_URL: "{{ osp_auth_url }}"
+ OS_USERNAME: "{{ osp_auth_username }}"
+ OS_PASSWORD: "{{ osp_auth_password }}"
+ OS_PROJECT_NAME: "admin"
+ OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
+ OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
+ OS_INTERFACE: "{{ osp_interface | default('internal') }}"
+ PATH: "/root/.local/bin:{{ ansible_env.PATH }}"
+ CEPH_CONF: "/etc/ceph/{{ ceph_cluster | default('red') }}.conf"
+ convert_blueprint:
+ ibm_endpoint: "{{ ibm_endpoint }}"
+ ibm_auth_endpoint: "{{ ibm_auth_endpoint }}"
+ ibm_api_key: "{{ ibm_api_key }}"
+ ibm_resource_id: "{{ ibm_resource_id }}"
+ bucket: "{{ ibm_bucket_name }}"
+ project: "{{ image_store }}"
+ output_dir: "{{ output_dir }}"
+ mode: "download"
+ glance_pool: "{{ ceph_cluster | default('red') }}-images"
+ overwrite: "{{ overwrite_image | default('false') }}"
diff --git a/ansible/configs/osp17-director-deployment/requirements.yml b/ansible/configs/osp17-director-deployment/requirements.yml
index cbd0c0482bc..7a550f58402 100644
--- a/ansible/configs/osp17-director-deployment/requirements.yml
+++ b/ansible/configs/osp17-director-deployment/requirements.yml
@@ -8,4 +8,6 @@ collections:
version: 4.0.2
- name: ansible.posix
version: 1.3.0
+- name: kubernetes.core
+ version: 2.4.0
...
diff --git a/ansible/configs/prp-binder/README.adoc b/ansible/configs/prp-binder/README.adoc
new file mode 100644
index 00000000000..f7c4da0d518
--- /dev/null
+++ b/ansible/configs/prp-binder/README.adoc
@@ -0,0 +1,72 @@
+== Overview
+
+*prp-binder* _config_ is an empty test config that does nothing other
+call in sequnece the default playbooks.
+image::topology.png[width=100%]
+
+== Supported Cloud Providers
+
+An empty test cloud prover has been created
+
+* `test`
+
+== Review the Env_Type variable file
+
+For further information on customizing images consult the link:../../../docs/Creating_a_config.adoc[Creating a Config Guide]
+
+== Review the `sample_vars.yml` variable file
+
+----
+
+---
+guid: test-config-00
+env_type: prp-binder
+cloud_provider: test
+...
+
+----
+
+== Deploying the `prp-binder`
+
+You can deploy this config by running the following command from the `ansible`
+directory.
+
+
+`ansible-playbook main.yml -e @configs/prp-binder/sample_vars.yml`
+
+== Force failing the `prp-binder`
+
+You can force this config to fail at any stage including the cloud provider stage
+by setting or passing the appropriate boolean value:
+
+[source,yaml]
+----
+fail_pre_infra
+fail_test_cloud_provider
+fail_post_infra
+fail_pre_software
+fail_software
+fail_post_software
+----
+
+`ansible-playbook main.yml -e @configs/prp-binder/sample_vars.yml -e '{ "fail_software" : true }'`
+
+== Controlling provision duration
+
+You can control how long it takes this config to complete by enabling a pause during the.
+
+[source,yaml]
+----
+prp_binder_pause_post_software
+prp_binder_pause_post_software_seconds
+----
+
+`ansible-playbook main.yml -e @configs/prp-binder/sample_vars.yml -e '{"prp_binder_pause_post_software" : true, "prp_binder_pause_post_software_seconds": 600}'`
+
+=== To Delete an environment
+
+This step is unnecessary as nothing is actiually created. However the following
+will simulate a deletion.
+
+
+`ansible-playbook destroy.yml -e @configs/prp-binder/sample_vars.yml`
diff --git a/ansible/configs/prp-binder/default_vars.yml b/ansible/configs/prp-binder/default_vars.yml
new file mode 100644
index 00000000000..ab1180b2cec
--- /dev/null
+++ b/ansible/configs/prp-binder/default_vars.yml
@@ -0,0 +1,10 @@
+---
+# To use bookbag, bookbag_deploy must be true and a value must be provided for
+# bookbag_git_repo
+bookbag_deploy: false
+#bookbag_git_repo: https://github.com/redhat-gpte-labs/bookbag-template.git
+
+# Control whether to simulate multi-user environment by reporting per-user info messages and data
+prp_binder_multi_user: false
+prp_binder_user_count: "{{ user_count | default(num_users) | default(10) }}"
+...
diff --git a/ansible/configs/prp-binder/default_vars_ec2.yml b/ansible/configs/prp-binder/default_vars_ec2.yml
new file mode 100644
index 00000000000..dd16960f718
--- /dev/null
+++ b/ansible/configs/prp-binder/default_vars_ec2.yml
@@ -0,0 +1,3 @@
+---
+# mandatory to run ansible/destroy.yml playbook
+aws_region: us-east-1
diff --git a/ansible/configs/prp-binder/destroy_env.yml b/ansible/configs/prp-binder/destroy_env.yml
new file mode 100644
index 00000000000..2b3277a9854
--- /dev/null
+++ b/ansible/configs/prp-binder/destroy_env.yml
@@ -0,0 +1,40 @@
+---
+- name: Destroy playbook
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tasks:
+
+ - name: Entering the prp-binder destroy.yml
+ debug:
+ msg:
+ - Entering the prp-binder destroy.yml
+
+ - name: Remove Bookbag
+ when:
+ - bookbag_git_repo is defined
+ include_role:
+ name: bookbag
+ vars:
+ ACTION: destroy
+
+ - when: pause_destroy | default(false) | bool
+ pause:
+ seconds: 30
+
+ - when: cloud_provider == 'osp'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-osp-dry-run
+
+ - when: cloud_provider == 'ec2'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-aws-dry-run
+
+ - name: Exiting the prp-binder destroy.yml
+ debug:
+ msg:
+ - Exiting the prp-binder destroy.yml
+...
diff --git a/ansible/configs/prp-binder/infra.yml b/ansible/configs/prp-binder/infra.yml
new file mode 100644
index 00000000000..d7459e6162c
--- /dev/null
+++ b/ansible/configs/prp-binder/infra.yml
@@ -0,0 +1,41 @@
+---
+- name: Step 001 infra
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step001
+ - infrastructure
+ tasks:
+
+ - name: Entering the prp-binder infra.yml
+ debug:
+ msg:
+ - Entering the prp-binder infra.yml
+
+ - when: fail_infra | default(false) | bool
+ name: Fail the prp-binder infra.yml if requested
+ fail:
+ msg: infra.yml failed as requested
+
+ - when: cloud_provider == 'osp'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-osp-dry-run
+
+ - when: cloud_provider == 'ec2'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-aws-dry-run
+
+ - when: cloud_provider == 'equinix_metal'
+ name: Include Equinix Metal dry-run read-only role
+ include_role:
+ name: infra-equinix-metal-dry-run
+
+ - name: Exiting the prp-binder infra.yml
+ debug:
+ msg:
+ - Exiting the prp-binder infra.yml
+...
diff --git a/ansible/configs/prp-binder/lifecycle.yml b/ansible/configs/prp-binder/lifecycle.yml
new file mode 100644
index 00000000000..1de28146ef2
--- /dev/null
+++ b/ansible/configs/prp-binder/lifecycle.yml
@@ -0,0 +1,20 @@
+- name: Step lifecycle
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tasks:
+ - when: cloud_provider == 'osp'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-osp-dry-run
+
+ - when: cloud_provider == 'ec2'
+ name: Include AWS dry-run read-only role
+ include_role:
+ name: infra-aws-dry-run
+
+ - when: cloud_provider == 'equinix_metal'
+ name: Include Equinix Metal dry-run read-only role
+ include_role:
+ name: infra-equinix-metal-dry-run
diff --git a/ansible/configs/prp-binder/post_infra.yml b/ansible/configs/prp-binder/post_infra.yml
new file mode 100644
index 00000000000..d3f3c4936a8
--- /dev/null
+++ b/ansible/configs/prp-binder/post_infra.yml
@@ -0,0 +1,26 @@
+---
+- name: Step 002 Post Infrastructure
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step002
+ - post_infrastructure
+ tasks:
+
+ - name: Entering the prp-binder post_infra.yml
+ debug:
+ msg:
+ - Entering the prp-binder post_infra.yml
+
+ - when: fail_post_infra | default(false) | bool
+ name: Fail the prp-binder post_infra.yml if requested
+ fail:
+ msg: post_infra.yml failed as requested
+
+ - name: Exiting the prp-binder post_infra.yml
+ debug:
+ msg:
+ - Exiting the prp-binder post_infra.yml
+...
diff --git a/ansible/configs/prp-binder/post_software.yml b/ansible/configs/prp-binder/post_software.yml
new file mode 100644
index 00000000000..ca117dd4ad0
--- /dev/null
+++ b/ansible/configs/prp-binder/post_software.yml
@@ -0,0 +1,37 @@
+---
+- name: Step 005 Post Software
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step005
+ - post_software
+ environment:
+ K8S_AUTH_VERIFY_SSL: false
+ K8S_AUTH_HOST: "{{ prp_ocp_argo.openshift_api_server_url }}"
+ K8S_AUTH_USERNAME: "{{ prp_ocp_argo.openshift_cluster_admin_username }}"
+ K8S_AUTH_PASSWORD: "{{ prp_ocp_argo.openshift_cluster_admin_password }}"
+ tasks:
+
+ - name: Entering the prp-binder post_software.yml
+ debug:
+ msg:
+ - Entering the prp-binder post_software.yml
+
+ # must call this as a role to allow the collections to be updated.
+ # roles lazy evaluate, allowing time (and context?) for the requirements.yml
+ # to be processed
+ - name: Log in to OpenShift and run the gitops_bootstrapper
+ ansible.builtin.include_role:
+ name: ocp_auth_bootstrapper
+
+ - name: Print string expected by Cloudforms
+ debug:
+ msg: "Post-Software checks completed successfully"
+
+ - name: Exiting the prp-binder post_software.yml
+ debug:
+ msg:
+ - Exiting the prp-binder post_software.yml
+...
diff --git a/ansible/configs/prp-binder/pre_infra.yml b/ansible/configs/prp-binder/pre_infra.yml
new file mode 100644
index 00000000000..660f32a7d3a
--- /dev/null
+++ b/ansible/configs/prp-binder/pre_infra.yml
@@ -0,0 +1,28 @@
+---
+- name: Step 000 Pre Infrastructure
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+
+ tags:
+ - step001
+ - pre_infrastructure
+
+ tasks:
+
+ - name: Entering the prp-binder pre_infra.yml
+ debug:
+ msg:
+ - Entering the prp-binder pre_infra.yml
+
+ - when: fail_pre_infra | default(false) | bool
+ name: Fail the prp-binder pre_infra.yml if requested
+ fail:
+ msg: pre_infra.yml failed as requested
+
+ - name: Exiting the prp-binder pre_infra.yml
+ debug:
+ msg:
+ - Exiting the prp-binder pre_infra.yml
+...
diff --git a/ansible/configs/prp-binder/pre_software.yml b/ansible/configs/prp-binder/pre_software.yml
new file mode 100644
index 00000000000..74dccc69fd7
--- /dev/null
+++ b/ansible/configs/prp-binder/pre_software.yml
@@ -0,0 +1,28 @@
+---
+- name: Step 003 Pre Software
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step003
+ - pre_software
+ tasks:
+
+ - name: Entering the prp-binder pre_software.yml
+ debug:
+ msg:
+ - Entering the prp-binder pre_software.yml
+
+ - when: fail_pre_software | default(false) | bool
+ name: Fail the prp-binder pre_software.yml if requested
+ fail:
+ msg: pre_software.yml failed as requested
+
+ - name: Exiting the prp-binder pre_software.yml
+ debug:
+ msg:
+ - Exiting the prp-binder pre_software.yml
+ - debug:
+ msg: Pre-Software checks completed successfully
+...
diff --git a/ansible/configs/prp-binder/requirements.yml b/ansible/configs/prp-binder/requirements.yml
new file mode 100644
index 00000000000..30fb09e6a10
--- /dev/null
+++ b/ansible/configs/prp-binder/requirements.yml
@@ -0,0 +1,6 @@
+---
+collections:
+ - name: community.okd
+ version: 2.3.0
+ - name: kubernetes.core
+ version: 2.4.0
diff --git a/ansible/configs/prp-binder/roles/ocp_auth_bootstrapper/tasks/main.yml b/ansible/configs/prp-binder/roles/ocp_auth_bootstrapper/tasks/main.yml
new file mode 100644
index 00000000000..4eea5e9bde5
--- /dev/null
+++ b/ansible/configs/prp-binder/roles/ocp_auth_bootstrapper/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+- name: Log in obtain access token
+ #community.okd.openshift_auth:
+ community.okd.openshift_auth:
+ validate_certs: false
+ username: "{{ prp_ocp_argo.openshift_cluster_admin_username }}"
+ password: "{{ prp_ocp_argo.openshift_cluster_admin_password }}"
+ host: "{{ prp_ocp_argo.openshift_api_server_url }}"
+ register: _auth_results
+
+- name: |
+ Call role ocp4_workload_gitops_bootstrap with environment
+ ansible.builtin.include_role:
+ name: ocp4_workload_gitops_bootstrap
+ apply:
+ environment:
+ K8S_AUTH_VERIFY_SSL: false
+ K8S_AUTH_HOST: "{{ prp_ocp_argo.openshift_api_server_url }}"
+ K8S_AUTH_USERNAME: "{{ prp_ocp_argo.openshift_cluster_admin_username }}"
+ K8S_AUTH_API_KEY: "{{ _auth_results.openshift_auth.api_key }}"
diff --git a/ansible/configs/prp-binder/sample_vars.yml b/ansible/configs/prp-binder/sample_vars.yml
new file mode 100644
index 00000000000..775866816a9
--- /dev/null
+++ b/ansible/configs/prp-binder/sample_vars.yml
@@ -0,0 +1,9 @@
+---
+guid: test-config-00
+env_type: prp-binder
+cloud_provider: test
+
+prp_binder_passthrough_user_data: |
+ hello: world
+ foo: bar
+...
diff --git a/ansible/configs/prp-binder/software.yml b/ansible/configs/prp-binder/software.yml
new file mode 100644
index 00000000000..16ad73a0305
--- /dev/null
+++ b/ansible/configs/prp-binder/software.yml
@@ -0,0 +1,32 @@
+---
+- name: Step 004 Software
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tags:
+ - step004
+ - deploy_software
+ tasks:
+
+ - name: Entering the prp-binder software.yml
+ debug:
+ msg:
+ - Entering the prp-binder software.yml
+
+ - when: fail_software | default(false) | bool
+ name: Fail the prp-binder software.yml if requested
+ fail:
+ msg: software.yml failed as requested
+
+ - name: Exiting the prp-binder software.yml
+ debug:
+ msg:
+ - Exiting the prp-binder software.yml
+
+ - name: Test agnosticd_user_info with GUID message and data
+ agnosticd_user_info:
+ msg: GUID is {{ guid }}
+ data:
+ GUID: "{{ guid }}"
+...
diff --git a/ansible/configs/prp-binder/status.yml b/ansible/configs/prp-binder/status.yml
new file mode 100644
index 00000000000..f196b40a3ba
--- /dev/null
+++ b/ansible/configs/prp-binder/status.yml
@@ -0,0 +1,19 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: false
+ tasks:
+ - name: Report status data in user info
+ agnosticd_user_info:
+ data:
+ instances:
+ - name: fake-server
+ state: running
+ type: fake-type
+
+ - name: Report status messages in user info
+ agnosticd_user_info:
+ msg: |-
+ {{ "%-60s %-10s %s" | format("Instance", "State", "Type") }}
+ ----------------------------------------------------------------
+ {{ "%-60s %-10s %s" | format("fake-server", "running", "fake-type") }}
diff --git a/ansible/configs/prp-binder/update.yml b/ansible/configs/prp-binder/update.yml
new file mode 100644
index 00000000000..c5153e359e1
--- /dev/null
+++ b/ansible/configs/prp-binder/update.yml
@@ -0,0 +1,34 @@
+---
+- name: Update prp-binder
+ hosts: localhost
+ connection: local
+ become: false
+ gather_facts: false
+ tasks:
+ - name: Entering the prp-binder update.yml
+ debug:
+ msg:
+ - Entering the prp-binder update.yml
+
+ - name: Check presence of random_string in user info from initial provision
+ debug:
+ msg: "random_string: {{ lookup('agnosticd_user_data', 'random_string') }}"
+
+ - when: fail_update | default(false) | bool
+ name: Fail the prp-binder update.yml if requested
+ fail:
+ msg: update.yml failed as requested
+
+ - name: Test update agnosticd_user_info with current timestamp
+ agnosticd_user_info:
+ msg: Updated at {{ __timestamp }}
+ data:
+ test_update_timestamp: "{{ __timestamp }}"
+ vars:
+ __timestamp: "{{ now(utc=true, fmt='%FT%TZ') }}"
+
+ - name: Exiting the prp-binder update.yml
+ debug:
+ msg:
+ - Exiting the prp-binder update.yml
+...
diff --git a/ansible/configs/rhel8-base/post_software.yml b/ansible/configs/rhel8-base/post_software.yml
index 8725d9f13f2..643bdff9244 100644
--- a/ansible/configs/rhel8-base/post_software.yml
+++ b/ansible/configs/rhel8-base/post_software.yml
@@ -9,18 +9,22 @@
- name: Print out user infos
when: install_student_user
block:
+ - name: Set short hostname
+ set_fact:
+ rhel_host: "{{ groups['bastions'][0].split('.')[0] }}"
+
- name: print out user.info
agnosticd_user_info:
msg: "{{ item }}"
loop:
- "You can access your bastion via SSH:"
- ""
- - "SSH Access: ssh {{ student_name }}@rhel8.{{ guid }}{{ subdomain_base_suffix }}"
+ - "SSH Access: ssh {{ student_name }}@{{ rhel_host }}.{{ guid }}{{ subdomain_base_suffix }}"
- "SSH password: {{ hostvars[groups.bastions.0].student_password | d('The password is a myth.') }}"
- name: Save user data
agnosticd_user_info:
data:
- ssh_command: "ssh {{ student_name }}@rhel8.{{ guid }}{{ subdomain_base_suffix }}"
+ ssh_command: "ssh {{ student_name }}@{{ rhel_host }}.{{ guid }}{{ subdomain_base_suffix }}"
ssh_user: "{{ student_name }}"
ssh_password: "{{ hostvars[groups.bastions.0].student_password | d('The password is a myth.') }}"
diff --git a/ansible/configs/rhel8lab/infra-osp-create-inventory.yml b/ansible/configs/rhel8lab/infra-osp-create-inventory.yml
index 6579524cfc4..7cde7e225f3 100644
--- a/ansible/configs/rhel8lab/infra-osp-create-inventory.yml
+++ b/ansible/configs/rhel8lab/infra-osp-create-inventory.yml
@@ -32,7 +32,7 @@
image_id: "{{ server.image.id | default('') }}"
ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
# bastion: "{{ local_bastion | default('') }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{ r_osp_server_facts.openstack_servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
@@ -43,7 +43,7 @@
- add_host:
name: "{{ server | json_query(_name_selector) | default(server.name) }}"
groups: "{{ server.metadata.AnsibleGroup }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{ r_osp_server_facts.openstack_servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
diff --git a/ansible/configs/rhel9-workshop/default_vars_ec2.yml b/ansible/configs/rhel9-workshop/default_vars_ec2.yml
index bd8d7b68458..22724970624 100644
--- a/ansible/configs/rhel9-workshop/default_vars_ec2.yml
+++ b/ansible/configs/rhel9-workshop/default_vars_ec2.yml
@@ -42,6 +42,14 @@ security_groups:
- name: NodeSG
rules:
+ - name: NodeHTTPSPorts
+ description: "General HTTPS Public"
+ from_port: 443
+ to_port: 443
+ protocol: tcp
+ cidr: "0.0.0.0/0"
+ rule_type: Ingress
+
- name: FromNodeSGtcp
description: "Allow everything from HostSG nodes"
from_port: 0
@@ -151,7 +159,7 @@ instances:
- name: "node"
count: 3
- public_dns: false
+ public_dns: true
image_id: "{{ node_instance_image | default(aws_default_image) }}"
image: "{{ node_instance_image | default(aws_default_image) }}"
flavor:
diff --git a/ansible/configs/rhel9-workshop/pre_software.yml b/ansible/configs/rhel9-workshop/pre_software.yml
index e49058448ba..626d18666ea 100644
--- a/ansible/configs/rhel9-workshop/pre_software.yml
+++ b/ansible/configs/rhel9-workshop/pre_software.yml
@@ -97,6 +97,30 @@
## While debugging things, ignore if this fails
ignore_errors: yes
+# - name: Create gpte-targethost config file
+# vars:
+# targethost: "{{ groups['bastions'][0] | regex_replace('\\..*$') }}.{{ guid }}{{ subdomain_base_suffix }}"
+# copy:
+# dest: "/root/RHEL9-Workshop/config/gpte-targethost.txt"
+# mode: "400"
+# content: "{{ targethost }}"
+# ## While debugging things, ignore if this fails
+# ignore_errors: yes
+
+ - name: Create gpte-pub-fqdn-shortname config files
+ vars:
+ shortname: "{{ item | regex_replace('\\..*$') }}"
+ hostname: "{{ item | regex_replace('\\..*$') }}.{{ guid }}{{ subdomain_base_suffix }}"
+ copy:
+ dest: "/root/RHEL9-Workshop/config/gpte-pub-fqdn-{{ shortname }}.txt"
+ mode: "400"
+ content: "{{ hostname }}"
+ ## While debugging things, ignore if this fails
+ ignore_errors: yes
+ with_items:
+ - "{{ groups['bastions'][0] }}"
+ - "{{ groups['nodes'] }}"
+
- name: "rhel9-prep : RUN the workshop installer"
shell:
chdir: "/root/RHEL9-Workshop"
diff --git a/ansible/configs/rhel9-workshop/start.yml b/ansible/configs/rhel9-workshop/start.yml
deleted file mode 100644
index b3b7934b374..00000000000
--- a/ansible/configs/rhel9-workshop/start.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- import_playbook: ../../include_vars.yml
-
-- name: Start instances
- hosts: localhost
- gather_facts: false
- become: false
- environment:
- AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
- AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
- tasks:
- - debug:
- msg: "Step 002 Post Infrastructure"
-
- - name: Start instances
- ec2:
- instance_tags:
- "aws:cloudformation:stack-name": "{{ project_tag }}"
- state: running
- region: "{{ aws_region }}"
-
diff --git a/ansible/configs/rhel9-workshop/stop.yml b/ansible/configs/rhel9-workshop/stop.yml
deleted file mode 100644
index 00703a412d1..00000000000
--- a/ansible/configs/rhel9-workshop/stop.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- import_playbook: ../../include_vars.yml
-
-- name: Stop instances
- hosts: localhost
- gather_facts: false
- become: false
- environment:
- AWS_ACCESS_KEY_ID: "{{aws_access_key_id}}"
- AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
- tasks:
- - debug:
- msg: "Step 002 Post Infrastructure"
-
- - name: Stop instances
- ec2:
- instance_tags:
- "aws:cloudformation:stack-name": "{{ project_tag }}"
- state: stopped
- region: "{{ aws_region }}"
-
diff --git a/ansible/configs/rosa-manual/default_vars.yml b/ansible/configs/rosa-manual/default_vars.yml
index 71b0eaf30c3..bf41a5e704e 100644
--- a/ansible/configs/rosa-manual/default_vars.yml
+++ b/ansible/configs/rosa-manual/default_vars.yml
@@ -28,11 +28,6 @@ cloud_tags:
course_name: "{{ course_name | default( 'unknown' ) }}"
platform: "{{ platform | default( 'unknown' ) }}"
-set_env_authorized_key: true
-env_authorized_key: "{{guid}}key"
-key_name: "rosa_key"
-ansible_ssh_private_key_file: ~/.ssh/{{key_name}}.pem
-
bastion_user_name: rosa
bastion_user_enable_sudo: false
bastion_user_use_password: false
diff --git a/ansible/configs/rosa-manual/destroy_env.yml b/ansible/configs/rosa-manual/destroy_env.yml
index 8d61756de7e..13a38a7328c 100644
--- a/ansible/configs/rosa-manual/destroy_env.yml
+++ b/ansible/configs/rosa-manual/destroy_env.yml
@@ -1,6 +1,29 @@
---
+- name: Destroy ROSA clusters (if any)
+ hosts: bastions
+ gather_facts: false
+ become: false
+ environment:
+ AWS_DEFAULT_REGION: "{{ aws_region }}"
+ tasks:
+ - name: Check for ROSA binary
+ ansible.builtin.stat:
+ path: /usr/local/bin/rosa
+ register: rosa_check
+ ignore_errors: true
+
+ - name: Get a list of ROSA clusters
+ when: rosa_check.stat.exists
+ ansible.builtin.command: "/usr/local/bin/rosa list cluster -i json"
+ register: r_rosa_list
+
+ - name: Try to gracefully uninstall ROSA cluster
+ when: rosa_check.stat.exists
+ include_tasks: uninstall_rosa.yml
+ loop: "{{ r_rosa_list.stdout | from_json }}"
+
- name: Import cloud provider specific destroy playbook
- import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
+ ansible.builtin.import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
- name: Bookbag
hosts: localhost
@@ -8,9 +31,9 @@
gather_facts: false
become: false
tasks:
- - name: Destroy Bookbag
- when: deploy_bookbag | bool
- include_role:
- name: bookbag
- vars:
- ACTION: destroy
+ - name: Destroy Bookbag
+ when: deploy_bookbag | bool
+ ansible.builtin.include_role:
+ name: bookbag
+ vars:
+ ACTION: destroy
diff --git a/ansible/configs/rosa-manual/ec2_instances_start.yaml b/ansible/configs/rosa-manual/ec2_instances_start.yaml
new file mode 100644
index 00000000000..3969c2b0e5e
--- /dev/null
+++ b/ansible/configs/rosa-manual/ec2_instances_start.yaml
@@ -0,0 +1,32 @@
+---
+- name: Get all EC2 instances
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: stopped
+ register: r_stopped_instances
+
+# Wk: Don't wait for instances to be running. Otherwise this is
+# a very sequential task. Just start the instances.
+# The next task will wait until all instances are running - but
+# this happens now in parallel instead of sequentially.
+- name: Ensure EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ item.instance_id }}"
+ state: started
+ wait: false
+ loop: "{{ r_stopped_instances.instances }}"
+
+- name: Wait until all EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: running
+ register: r_running_instances
+ until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
+ delay: 10
+ retries: 60
diff --git a/ansible/configs/rosa-manual/files/cloud_providers/ec2_cloud_template.j2 b/ansible/configs/rosa-manual/files/cloud_providers/ec2_cloud_template.j2
index b7f704b987d..f7055cc370f 100644
--- a/ansible/configs/rosa-manual/files/cloud_providers/ec2_cloud_template.j2
+++ b/ansible/configs/rosa-manual/files/cloud_providers/ec2_cloud_template.j2
@@ -204,7 +204,7 @@ Resources:
- {{ instance.image | default(aws_default_image) }}
{% endif %}
InstanceType: "{{instance['flavor'][cloud_provider]}}"
- KeyName: "{{instance.key_name | default(key_name)}}"
+ KeyName: "{{instance.key_name | default(ssh_provision_key_name) | default(key_name)}}"
{% if instance['UserData'] is defined %}
{{instance['UserData']}}
{% endif %}
diff --git a/ansible/configs/rosa-manual/pre_infra_ec2.yml b/ansible/configs/rosa-manual/pre_infra_ec2.yml
index f2458e21ff9..126f0eb5929 100644
--- a/ansible/configs/rosa-manual/pre_infra_ec2.yml
+++ b/ansible/configs/rosa-manual/pre_infra_ec2.yml
@@ -2,8 +2,9 @@
- name: Set rosa console password
set_fact:
rosa_console_password: >-
- {{ lookup('password', '/dev/null length=12') -}}
- {{- lookup('password', '/dev/null length=1 chars=digits') }}
+ {{ lookup('community.general.random_string',
+ length=12, min_lower=1, min_upper=1, special=false,
+ min_numeric=1) }}
- name: Get the current caller identity information
environment:
diff --git a/ansible/configs/rosa-manual/pre_software.yml b/ansible/configs/rosa-manual/pre_software.yml
index 4f526d70192..06b7e4667d8 100644
--- a/ansible/configs/rosa-manual/pre_software.yml
+++ b/ansible/configs/rosa-manual/pre_software.yml
@@ -1,23 +1,4 @@
---
-- name: Step 003 - Pre Software
- hosts: localhost
- connection: local
- gather_facts: false
- become: false
- tags:
- - step003
- - generate_env_keys
- tasks:
- - name: Generate SSH keys
- when: set_env_authorized_key | bool
- openssh_keypair:
- state: present
- path: "{{ output_dir }}/{{ env_authorized_key }}"
- comment: "{{ key_name }}"
- size: 4096
- type: rsa
- mode: 0400
-
# Cloudformation or Heat template or equivalent should tag all hosts with Project:{{ env_type }}-{{ guid }}
- name: Configure all hosts with Repositories, Common Files and Set environment key
hosts: all
@@ -29,8 +10,6 @@
roles:
- role: common
when: install_common | default( true ) | bool
- - role: set_env_authorized_key
- when: set_env_authorized_key | bool
tasks:
- name: Add GUID to /etc/skel/.bashrc
lineinfile:
diff --git a/ansible/configs/rosa-manual/software.yml b/ansible/configs/rosa-manual/software.yml
index 3f94e73bb51..d99ba296626 100644
--- a/ansible/configs/rosa-manual/software.yml
+++ b/ansible/configs/rosa-manual/software.yml
@@ -235,6 +235,36 @@
owner: "{{ bastion_user_name }}"
remote_src: true
+ - when: install_tektoncd_cli | default(false) | bool
+ become: true
+ block:
+ - name: Enable dnf copr chmouel/tektoncd-cli repository
+ ansible.builtin.command: >-
+ dnf copr enable chmouel/tektoncd-cli -y
+
+ - name: Install tektoncd-cli
+ ansible.builtin.package:
+ name: tektoncd-cli
+ state: present
+
+ - when: install_github_cli | default(false) | bool
+ become: true
+ block:
+ - name: Packages for the GitHub CLI
+ ansible.builtin.yum_repository:
+ name: github-cli
+ description: Packages for the GitHub CLI
+ file: github-cli
+ baseurl: https://cli.github.com/packages/rpm
+ gpgkey: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x23F3D4EA75716059
+ gpgcheck: true
+ enabled: true
+
+ - name: Install gh-cli
+ ansible.builtin.package:
+ name: gh
+ state: present
+
- block:
- name: Set ROSA token warning boolean true
when: rosa_token == gpte_rosa_token
@@ -256,6 +286,7 @@
rosa_subdomain_base: "{{ subdomain_base }}"
rosa_user_password: "{{ rosa_user_password }}"
rosa_token_warning: "{{ rosa_token_warning }}"
+ rosa_console_url: "none"
- name: Print ROSA admin credentials as user.info
when: print_agnosticd_user_info | bool
diff --git a/ansible/configs/rosa-manual/uninstall_rosa.yml b/ansible/configs/rosa-manual/uninstall_rosa.yml
new file mode 100644
index 00000000000..c81fc347180
--- /dev/null
+++ b/ansible/configs/rosa-manual/uninstall_rosa.yml
@@ -0,0 +1,22 @@
+---
+- name: Destroy ROSA Cluster
+ ansible.builtin.command: >-
+ /usr/local/bin/rosa delete cluster -y --cluster={{ item.name }}
+ register: r_rosa_delete
+ failed_when: >-
+ r_rosa_delete.rc != 0 and 'ERR: There is no cluster with identifier or name' not in r_rosa_delete.stderr
+
+- name: Wait for ROSA deletion to complete
+ ansible.builtin.command: >-
+ /usr/local/bin/rosa describe cluster -c {{ item.name }}
+ register: rosa_cluster_status
+ ignore_errors: true
+ until: rosa_cluster_status.rc != 0
+ retries: 60
+ delay: 60
+
+- name: Make sure ROSA cluster is gone
+ ansible.builtin.fail:
+ msg: >
+ The ROSA cluster still exists after one hour of trying to delete. Please look at it manually.
+ when: rosa_cluster_status.rc == 0
diff --git a/ansible/configs/rosa/default_vars.yml b/ansible/configs/rosa/default_vars.yml
index 1b963916304..f27ee2eabe6 100644
--- a/ansible/configs/rosa/default_vars.yml
+++ b/ansible/configs/rosa/default_vars.yml
@@ -52,3 +52,5 @@ rosa_token: ""
# REQUIRES Ansible 2.7+ on the deployer host
# Empty by default - to be set by specific configurations
infra_workloads: []
+
+deploy_bookbag: false
diff --git a/ansible/configs/rosa/destroy_env.yml b/ansible/configs/rosa/destroy_env.yml
index 5ceeee2366c..266f1d42d9d 100644
--- a/ansible/configs/rosa/destroy_env.yml
+++ b/ansible/configs/rosa/destroy_env.yml
@@ -9,30 +9,22 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Get fact for cloudformation stack
- cloudformation_facts:
- stack_name: "{{ project_tag }}"
- register: stack_facts
+ - name: Run infra-ec2-create-inventory role
+ include_role:
+ name: infra-ec2-create-inventory
- - name: Grab and set stack creation time
- when: project_tag in stack_facts.ansible_facts.cloudformation
- vars:
- _stack_description: "{{ stack_facts.ansible_facts.cloudformation[project_tag].stack_description }}"
- set_fact:
- stack_creation_time: >-
- {{ _stack_description.creation_time | default(_stack_description.CreationTime) }}
- stack_status: >-
- {{ _stack_description.stack_status | default(_stack_description.StackStatus) }}
+ - name: Create local ssh provision facts (key already exists)
+ include_role:
+ name: create_ssh_provision_key
+ when:
+ - ssh_provision_key_name is undefined
- - name: Run infra-ec2-create-inventory role
- include_role:
- name: infra-ec2-create-inventory
- - name: SSH config setup
- when:
- - groups["bastions"] is defined
- - groups["bastions"] | length > 0
- include_role:
- name: infra-common-ssh-config-generate
+ - name: SSH config setup
+ when:
+ - groups["bastions"] is defined
+ - groups["bastions"] | length > 0
+ include_role:
+ name: infra-common-ssh-config-generate
- name: Set ssh extra args for all hosts, use ssh_config just created
hosts: all
@@ -40,9 +32,9 @@
any_errors_fatal: true
ignore_errors: false
tasks:
- - name: add -F option ansible_ssh_extra_args
- set_fact:
- ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{ hostvars['localhost'].ansible_ssh_config }}"
+ - name: add -F option ansible_ssh_extra_args
+ set_fact:
+ ansible_ssh_extra_args: "{{ ansible_ssh_extra_args|d() }} -F {{ hostvars['localhost'].ansible_ssh_config }}"
- name: Start all EC2 instances if they are stopped
hosts: localhost
@@ -54,33 +46,7 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Get all EC2 instances
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: stopped
- register: r_stopped_instances
-
- - name: Ensure EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance:
- instance_ids: "{{ item.instance_id }}"
- state: started
- wait: false
- loop: "{{ r_stopped_instances.instances }}"
-
- - name: Wait until all EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: running
- register: r_running_instances
- until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
- delay: 10
- retries: 60
+ - include_tasks: ec2_instances_start.yaml
- name: Destroy ROSA
hosts: bastions
@@ -89,37 +55,21 @@
environment:
AWS_DEFAULT_REGION: "{{ aws_region }}"
tasks:
- - name: Check for ROSA binary
- stat:
- path: /usr/local/bin/rosa
- register: rosa_check
- ignore_errors: true
-
- - name: Try to gracefully uninstall ROSA if binary is installed, otherwise just nuke the sandbox
- when: rosa_check.stat.exists
- block:
- - set_fact:
- rosa_cluster_name: "rosa-{{ guid }}"
-
- - name: Destroy ROSA Cluster
- command: "/usr/local/bin/rosa delete cluster -y --cluster={{ rosa_cluster_name }}"
- register: r_rosa_delete
- failed_when: >-
- r_rosa_delete.rc != 0
- and 'ERR: There is no cluster with identifier or name' not in r_rosa_delete.stderr
-
- - name: Wait for ROSA deletion to complete
- command: "/usr/local/bin/rosa describe cluster -c {{ rosa_cluster_name }}"
- register: rosa_cluster_status
+ - name: Check for ROSA binary
+ stat:
+ path: /usr/local/bin/rosa
+ register: rosa_check
ignore_errors: true
- until: rosa_cluster_status.rc != 0
- retries: 60
- delay: 60
- - name: Make sure ROSA cluster is gone
- fail:
- msg: "The ROSA cluster still exists after one hour of trying to delete. Please look at it manually."
- when: rosa_cluster_status.rc == 0
+ - name: Get a list of ROSA clusters
+ when: rosa_check.stat.exists
+ ansible.builtin.command: "/usr/local/bin/rosa list cluster -i json"
+ register: r_rosa_list
+
+ - name: Try to gracefully uninstall ROSA cluster
+ when: rosa_check.stat.exists
+ include_tasks: uninstall_rosa.yml
+ loop: "{{ r_rosa_list.stdout | from_json }}"
- name: Import cloud provider specific destroy playbook
import_playbook: "../../cloud_providers/{{ cloud_provider }}_destroy_env.yml"
diff --git a/ansible/configs/rosa/ec2_instances_start.yaml b/ansible/configs/rosa/ec2_instances_start.yaml
new file mode 100644
index 00000000000..3969c2b0e5e
--- /dev/null
+++ b/ansible/configs/rosa/ec2_instances_start.yaml
@@ -0,0 +1,32 @@
+---
+- name: Get all EC2 instances
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: stopped
+ register: r_stopped_instances
+
+# Wk: Don't wait for instances to be running. Otherwise this is
+# a very sequential task. Just start the instances.
+# The next task will wait until all instances are running - but
+# this happens now in parallel instead of sequentially.
+- name: Ensure EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ item.instance_id }}"
+ state: started
+ wait: false
+ loop: "{{ r_stopped_instances.instances }}"
+
+- name: Wait until all EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: running
+ register: r_running_instances
+ until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
+ delay: 10
+ retries: 60
diff --git a/ansible/configs/rosa/post_software.yml b/ansible/configs/rosa/post_software.yml
index 48b03ddeb21..606ca7a6fd1 100644
--- a/ansible/configs/rosa/post_software.yml
+++ b/ansible/configs/rosa/post_software.yml
@@ -1,15 +1,47 @@
---
- name: Step 005 Post Software
hosts: bastions
- become: true
+ become: false
gather_facts: false
+ environment:
+ K8S_AUTH_VERIFY_SSL: false
tasks:
- debug:
msg: "Post-Software Steps starting"
-- name: deploy workloads
- when: infra_workloads | default("") | length > 0
- ansible.builtin.import_playbook: workloads.yml
+ - name: Download oc openshift-client via rosa cli
+ ansible.builtin.shell:
+ cmd: |
+ /usr/local/bin/rosa download oc 2>&1 | sed -ne 's/.* downloaded \(.*\)/\1/p'
+ register: _oc_archive_filename
+
+ - name: Unpack openshift-client
+ ansible.builtin.unarchive:
+ src: "/home/{{ ansible_user }}/{{ _oc_archive_filename.stdout }}"
+ dest: '/usr/local/bin'
+ remote_src: true
+ become: true
+
+ - name: Deploy workloads
+ when: infra_workloads | default("") | length > 0
+ ansible.builtin.include_tasks: workloads.yml
+
+ # - name: Install bookbag
+ #when: deploy_bookbag | bool
+ #ansible.builtin.include_tasks: install-bookbag.yaml
+
+- name: Bookbag
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ become: false
+ tasks:
+ - name: Deploy Bookbag
+ when: deploy_bookbag | bool
+ include_role:
+ name: bookbag
+ vars:
+ ACTION: create
- name: PostSoftware flight-check
hosts: localhost
diff --git a/ansible/configs/rosa/pre_infra_ec2.yml b/ansible/configs/rosa/pre_infra_ec2.yml
index f2458e21ff9..126f0eb5929 100644
--- a/ansible/configs/rosa/pre_infra_ec2.yml
+++ b/ansible/configs/rosa/pre_infra_ec2.yml
@@ -2,8 +2,9 @@
- name: Set rosa console password
set_fact:
rosa_console_password: >-
- {{ lookup('password', '/dev/null length=12') -}}
- {{- lookup('password', '/dev/null length=1 chars=digits') }}
+ {{ lookup('community.general.random_string',
+ length=12, min_lower=1, min_upper=1, special=false,
+ min_numeric=1) }}
- name: Get the current caller identity information
environment:
diff --git a/ansible/configs/rosa/requirements.yml b/ansible/configs/rosa/requirements.yml
index e0f10c64c47..762c4fe0d0e 100644
--- a/ansible/configs/rosa/requirements.yml
+++ b/ansible/configs/rosa/requirements.yml
@@ -8,3 +8,5 @@ collections:
version: 4.6.1
- name: ansible.posix
version: 1.3.0
+- name: community.okd
+ version: 2.3.0
diff --git a/ansible/configs/rosa/software.yml b/ansible/configs/rosa/software.yml
index 257580e9a6d..9b6fb46db8c 100644
--- a/ansible/configs/rosa/software.yml
+++ b/ansible/configs/rosa/software.yml
@@ -153,6 +153,13 @@
retries: 120
delay: 60
+ - tags:
+ - get_rosa_api_url
+ block:
+ - name: Get ROSA API URL
+ shell: "/usr/local/bin/rosa describe cluster -c {{ rosa_cluster_name }} |grep '^API URL:'|awk '{print $3}'"
+ register: rosa_api_url
+
- tags:
- get_rosa_console_url
block:
@@ -201,7 +208,9 @@
owner: "{{ bastion_user_name }}"
remote_src: true
- - when:
+ - tags:
+ - emit_rosa_user_data
+ when:
- rosa_admin_result is defined
- rosa_console_url is defined
block:
@@ -225,6 +234,7 @@
rosa_subdomain_base: "{{ subdomain_base }}"
rosa_user_password: "{{ rosa_user_password }}"
rosa_console_url: "{{ rosa_console_url.stdout }}"
+ rosa_api_url: "{{ rosa_api_url.stdout }}"
rosa_admin_password: "{{ rosa_admin_result.stdout }}"
rosa_token_warning: "{{ rosa_token_warning }}"
@@ -264,3 +274,9 @@
https://console.redhat.com/openshift. It is recommended that you generate and provide your own ROSA token when deploying
this catalog item so that you have full functionality and control of your cluster. You can generate a rosa token from
your Red Hat console account here: https://console.redhat.com/openshift/token/rosa
+
+ - name: Emit a rosa_console_url=none when undefined
+ when: rosa_console_url is not defined
+ agnosticd_user_info:
+ data:
+ rosa_console_url: none
diff --git a/ansible/configs/rosa/uninstall_rosa.yml b/ansible/configs/rosa/uninstall_rosa.yml
new file mode 100644
index 00000000000..c81fc347180
--- /dev/null
+++ b/ansible/configs/rosa/uninstall_rosa.yml
@@ -0,0 +1,22 @@
+---
+- name: Destroy ROSA Cluster
+ ansible.builtin.command: >-
+ /usr/local/bin/rosa delete cluster -y --cluster={{ item.name }}
+ register: r_rosa_delete
+ failed_when: >-
+ r_rosa_delete.rc != 0 and 'ERR: There is no cluster with identifier or name' not in r_rosa_delete.stderr
+
+- name: Wait for ROSA deletion to complete
+ ansible.builtin.command: >-
+ /usr/local/bin/rosa describe cluster -c {{ item.name }}
+ register: rosa_cluster_status
+ ignore_errors: true
+ until: rosa_cluster_status.rc != 0
+ retries: 60
+ delay: 60
+
+- name: Make sure ROSA cluster is gone
+ ansible.builtin.fail:
+ msg: >
+ The ROSA cluster still exists after one hour of trying to delete. Please look at it manually.
+ when: rosa_cluster_status.rc == 0
diff --git a/ansible/configs/rosa/workloads.yml b/ansible/configs/rosa/workloads.yml
index 713d84feba3..8657b60d3c5 100644
--- a/ansible/configs/rosa/workloads.yml
+++ b/ansible/configs/rosa/workloads.yml
@@ -1,14 +1,12 @@
---
-- name: install workloads
+- name: Install workloads
hosts: bastions
gather_facts: false
run_once: true
become: false
- environment:
- K8S_AUTH_VERIFY_SSL: false
tasks:
- - name: setup k8s virtualenv
+ - name: Setup k8s virtualenv
vars:
ansible_become: true
host_virtualenv_path: /opt/virtualenvs/k8s
@@ -16,16 +14,17 @@
ansible.builtin.include_role:
name: host_virtualenv
- - name: set ansible python interpreter to k8s virtualenv
+ - name: Set ansible python interpreter to k8s virtualenv
ansible.builtin.set_fact:
ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python
- - name: generate cluster api
+ - name: Generate cluster api
ansible.builtin.set_fact:
rosa_api_server_url: "https://api{{ rosa_console_url.stdout | regex_search('(?<=\\.apps).*') }}:6443"
- - name: run authentication
- k8s_auth:
+ - name: Run authentication
+ community.okd.openshift_auth:
+ validate_certs: false
host: "{{ rosa_api_server_url }}"
username: cluster-admin
password: "{{ rosa_admin_result.stdout }}"
@@ -37,18 +36,18 @@
- _r_kube_auth.k8s_auth is defined
- _r_kube_auth.k8s_auth.api_key is defined
- - name: create a directory if it does not exist
+ - name: Create a directory if it does not exist
ansible.builtin.file:
path: ~/.kube
state: directory
mode: 0755
- - name: generate kubeconfig
+ - name: Generate kubeconfig
ansible.builtin.template:
src: templates/kubeconfig.j2
dest: ~/.kube/config
- - name: install ocp-infra-workloads
+ - name: Install ocp-infra-workloads
vars:
ACTION: "provision"
ocp_username: "system:admin"
diff --git a/ansible/configs/sap-integration/custom_workloads.yml b/ansible/configs/sap-integration/custom_workloads.yml
index a1472200a74..8b3743a5546 100644
--- a/ansible/configs/sap-integration/custom_workloads.yml
+++ b/ansible/configs/sap-integration/custom_workloads.yml
@@ -48,7 +48,6 @@
loop:
- ocs-storagecluster-cephfs
- ocs-storagecluster-ceph-rbd
- - openshift-storage.noobaa.io
- name: Patch Storage Class standard to remove it as the default storage class
k8s:
@@ -214,7 +213,7 @@
resource_definition: "{{ lookup( 'template', './files/k8s/camelk_subscription.j2' ) | from_yaml }}"
- name: Wait for the status of the Camel-K subscription to not be empty
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: red-hat-camel-k
diff --git a/ansible/configs/sap-integration/default_vars.yml b/ansible/configs/sap-integration/default_vars.yml
index 82dfe3e2f77..c35231c8be0 100644
--- a/ansible/configs/sap-integration/default_vars.yml
+++ b/ansible/configs/sap-integration/default_vars.yml
@@ -35,7 +35,7 @@ student_name: lab-user
# TODO: What does this really do besides run the role?
set_env_authorized_key: true
env_authorized_key: "{{guid}}key"
-ssh_provision_key_name: "{{ env_authorized_key }}"
+#ssh_provision_key_name: "{{ env_authorized_key }}"
# Run the bastion-lite role
install_bastion: true
diff --git a/ansible/configs/sap-integration/default_vars_ec2.yml b/ansible/configs/sap-integration/default_vars_ec2.yml
index 3a76c9d0815..de51a0da438 100644
--- a/ansible/configs/sap-integration/default_vars_ec2.yml
+++ b/ansible/configs/sap-integration/default_vars_ec2.yml
@@ -68,7 +68,7 @@ sap_extra_device: vdb
# Bastion Configuration
bastion_instance_type: "t3a.medium"
-bastion_instance_image: RHEL81GOLD
+bastion_instance_image: RHEL86GOLD-latest
# Root Filesystem Size
bastion_rootfs_size: 30
diff --git a/ansible/configs/sap-integration/destroy_env.yml b/ansible/configs/sap-integration/destroy_env.yml
index 38e16f0d534..c1b075108d2 100644
--- a/ansible/configs/sap-integration/destroy_env.yml
+++ b/ansible/configs/sap-integration/destroy_env.yml
@@ -1,3 +1,3 @@
---
- name: Import cloud provider specific destroy playbook
- import_playbook: "./destroy_env_{{ cloud_provider }}.yml"
\ No newline at end of file
+ import_playbook: "./destroy_env_{{ cloud_provider }}.yml"
diff --git a/ansible/configs/sap-integration/destroy_env_ec2.yml b/ansible/configs/sap-integration/destroy_env_ec2.yml
index efae72124e2..29967fa50e6 100644
--- a/ansible/configs/sap-integration/destroy_env_ec2.yml
+++ b/ansible/configs/sap-integration/destroy_env_ec2.yml
@@ -11,31 +11,16 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Create infra key
- include_role:
- name: infra-ec2-ssh-key
- when:
- - install_infra_ssh_key | default(false) | bool
-
- - name: Get fact for cloudformation stack
- cloudformation_facts:
- stack_name: "{{ project_tag }}"
- register: stack_facts
-
- - name: Grab and set stack creation time
- when: project_tag in stack_facts.ansible_facts.cloudformation
- vars:
- _stack_description: "{{ stack_facts.ansible_facts.cloudformation[project_tag].stack_description }}"
- set_fact:
- stack_creation_time: >-
- {{ _stack_description.creation_time | default(_stack_description.CreationTime) }}
- stack_status: >-
- {{ _stack_description.stack_status | default(_stack_description.StackStatus) }}
-
- name: Run infra-ec2-create-inventory role
include_role:
name: infra-ec2-create-inventory
+ - name: Create local ssh provision facts (key already exists)
+ include_role:
+ name: create_ssh_provision_key
+ when:
+ - ssh_provision_key_name is undefined
+
- name: SSH config setup
when:
- groups["bastions"] is defined
@@ -63,37 +48,7 @@
AWS_SECRET_ACCESS_KEY: "{{aws_secret_access_key}}"
AWS_DEFAULT_REGION: "{{aws_region_final|d(aws_region)}}"
tasks:
- - name: Get all EC2 instances
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: stopped
- register: r_stopped_instances
-
- # Wk: Don't wait for instances to be running. Otherwise this is
- # a very sequential task. Just start the instances.
- # The next task will wait until all instances are running - but
- # this happens now in parallel instead of sequentially.
- - name: Ensure EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance:
- instance_ids: "{{ item.instance_id }}"
- state: started
- wait: false
- loop: "{{ r_stopped_instances.instances }}"
-
- - name: Wait until all EC2 instances are running
- when: r_stopped_instances.instances | length > 0
- ec2_instance_info:
- filters:
- "tag:guid": "{{ guid }}"
- "tag:env_type": "{{ env_type }}"
- instance-state-name: running
- register: r_running_instances
- until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
- delay: 10
- retries: 60
+ - include_tasks: ec2_instances_start.yaml
- name: Have the OpenShift installer cleanup what it did
hosts: bastions
diff --git a/ansible/configs/sap-integration/ec2_instances_start.yaml b/ansible/configs/sap-integration/ec2_instances_start.yaml
new file mode 100644
index 00000000000..3969c2b0e5e
--- /dev/null
+++ b/ansible/configs/sap-integration/ec2_instances_start.yaml
@@ -0,0 +1,32 @@
+---
+- name: Get all EC2 instances
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: stopped
+ register: r_stopped_instances
+
+# Wk: Don't wait for instances to be running. Otherwise this is
+# a very sequential task. Just start the instances.
+# The next task will wait until all instances are running - but
+# this happens now in parallel instead of sequentially.
+- name: Ensure EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ item.instance_id }}"
+ state: started
+ wait: false
+ loop: "{{ r_stopped_instances.instances }}"
+
+- name: Wait until all EC2 instances are running
+ when: r_stopped_instances.instances | length > 0
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:guid": "{{ guid }}"
+ "tag:env_type": "{{ env_type }}"
+ instance-state-name: running
+ register: r_running_instances
+ until: r_running_instances.instances | length | int >= r_stopped_instances.instances | length | int
+ delay: 10
+ retries: 60
diff --git a/ansible/configs/multi-cloud-capsule/files/cloud_providers/osp_cloud_template_master.j2 b/ansible/configs/sap-integration/files/cloud_providers/osp_cloud_template_master.j2
similarity index 91%
rename from ansible/configs/multi-cloud-capsule/files/cloud_providers/osp_cloud_template_master.j2
rename to ansible/configs/sap-integration/files/cloud_providers/osp_cloud_template_master.j2
index 64ee358fd86..de6aea54e61 100644
--- a/ansible/configs/multi-cloud-capsule/files/cloud_providers/osp_cloud_template_master.j2
+++ b/ansible/configs/sap-integration/files/cloud_providers/osp_cloud_template_master.j2
@@ -43,6 +43,10 @@ resources:
name: "{{ guid }}-{{ network['name'] }}-router"
external_gateway_info:
network: "{{ provider_network }}"
+{% if osp_public_subnet is defined %}
+ external_fixed_ips:
+ - subnet: "{{ osp_public_subnet }}"
+{% endif %}
{{ network['name'] }}-router_private_interface:
type: OS::Neutron::RouterInterface
@@ -55,7 +59,8 @@ resources:
###################
# Security groups #
###################
-{% for security_group in security_groups | list %}
+{% for security_group in security_groups | list + default_security_groups | list
+ if security_group.name in used_security_groups %}
{{ security_group['name'] }}:
type: OS::Neutron::SecurityGroup
properties:
@@ -124,6 +129,9 @@ resources:
type: OS::Neutron::FloatingIP
properties:
floating_network: {{ provider_network }}
+{% if osp_public_subnet is defined %}
+ floating_subnet: "{{ osp_public_subnet }}"
+{% endif %}
depends_on:
- {{ instance['network'] | default('default') }}-router_private_interface
@@ -141,11 +149,18 @@ resources:
flavor: {{ instance.flavor.osp }}
key_name: {get_resource: {{ guid }}-infra_key}
+ config_drive: True
block_device_mapping_v2:
- image: {{ instance.image_id | default(instance.image) }}
delete_on_termination: true
volume_size: {{ instance['rootfs_size'] | default(osp_default_rootfs_size) }}
boot_index: 0
+ {% if iname == "bastion-" + guid %}
+ - image: {{ instance.sofware_image_id | default("software-sap") }}
+ delete_on_termination: true
+ volume_size: {{ instance['softwarefs_size'] }}
+ boot_index: -1
+ {% endif %}
user_data: |
#cloud-config
@@ -199,6 +214,7 @@ resources:
{% endfor %}
{% endfor %}
+
outputs:
{{ guid }}-infra_key:
diff --git a/ansible/configs/sap-integration/files/requirements_k8s.txt b/ansible/configs/sap-integration/files/requirements_k8s.txt
index d9d822f79e5..9855bf7a124 100644
--- a/ansible/configs/sap-integration/files/requirements_k8s.txt
+++ b/ansible/configs/sap-integration/files/requirements_k8s.txt
@@ -25,6 +25,7 @@ MarkupSafe==2.0.1
oauthlib==3.1.1
openshift==0.13.1
paramiko==2.7.1
+passlib==1.7.4
pyasn1==0.4.8
pyasn1-modules==0.2.8
pycparser==2.19
diff --git a/ansible/configs/sap-integration/lifecycle_hook_post_start.yml b/ansible/configs/sap-integration/lifecycle_hook_post_start.yml
index 0dc39e8662f..2f3b8a94481 100644
--- a/ansible/configs/sap-integration/lifecycle_hook_post_start.yml
+++ b/ansible/configs/sap-integration/lifecycle_hook_post_start.yml
@@ -7,38 +7,33 @@
gather_facts: false
become: false
tasks:
- - when: cloud_provider == 'ec2'
- name: Run infra-ec2-create-inventory Role
- include_role:
- name: infra-ec2-create-inventory
-
- - when: cloud_provider == 'osp'
- name: Run infra-osp-create-inventory Role
- include_role:
- name: infra-osp-create-inventory
+ - when: cloud_provider == 'ec2'
+ name: Run infra-ec2-create-inventory Role
+ include_role:
+ name: infra-ec2-create-inventory
- - when: cloud_provider == 'azure'
- name: Run infra-azure-create-inventory Role
- include_role:
- name: infra-azure-create-inventory
+ - when: cloud_provider == 'osp'
+ name: Run infra-osp-create-inventory Role
+ include_role:
+ name: infra-osp-create-inventory
- - name: Run Common SSH Config Generator Role
- include_role:
- name: infra-common-ssh-config-generate
- when: "'bastions' in groups"
+ - name: Azure post start actions
+ when: cloud_provider == 'azure'
+ include_role:
+ name: infra-azure-create-inventory
- name: Set ansible_ssh_extra_args
hosts:
- - all:!windows:!network
+ - all:!windows:!network
gather_facts: false
any_errors_fatal: true
ignore_errors: false
tasks:
- - name: Set facts for remote access
- set_fact:
- ansible_ssh_extra_args: >-
- {{ ansible_ssh_extra_args|d() }}
- -F {{hostvars.localhost.output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf
+ - name: Set facts for remote access
+ set_fact:
+ ansible_ssh_extra_args: >-
+ {{ ansible_ssh_extra_args|d() }}
+ -F {{hostvars.localhost.output_dir}}/{{ env_type }}_{{ guid }}_ssh_conf
- name: Run recover cluster actions
hosts: bastions
@@ -46,17 +41,21 @@
become: false
gather_facts: false
tasks:
- - name: Set Ansible Python interpreter to k8s virtualenv
- set_fact:
- ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python
-
- - name: Perform actions on start
- when: ACTION == 'start'
- block:
- - name: Approve CertificateSigningRequests
- include_role:
- name: ocp4_approve_certificate_signing_requests
-
- - name: Cleanup failed pods
- include_role:
- name: ocp_cleanup_failed_pods
+ - name: Set Ansible Python interpreter to k8s virtualenv
+ set_fact:
+ ansible_python_interpreter: /opt/virtualenvs/k8s/bin/python
+
+ - name: Perform actions on start
+ when: ACTION == 'start'
+ block:
+ - name: Test the bastion host is available, if not skip approve csr and pod cleanup
+ wait_for_connection:
+ timeout: 60
+ register: bwait
+ ignore_errors: true
+
+ - when: bwait is successful
+ block:
+ - name: Approve CertificateSigningRequests
+ include_role:
+ name: ocp4_approve_certificate_signing_requests
diff --git a/ansible/configs/sap-integration/pre_software.yml b/ansible/configs/sap-integration/pre_software.yml
index f667ad49ce1..04bce787fb3 100644
--- a/ansible/configs/sap-integration/pre_software.yml
+++ b/ansible/configs/sap-integration/pre_software.yml
@@ -9,25 +9,48 @@
tasks:
- debug:
msg: "Step 003 - Pre Software"
+
+- name: Remove satellite registration
+ hosts: nodes
+ gather_facts: false
+ become: true
+ tags:
+ - step004
+ tasks:
+ - name: unregister
+ redhat_subscription:
+ state: absent
+
+ - name: remove ketello package
+ yum:
+ name: katello-ca-consumer*
+ state: absent
+
+- name: Configure all hosts with Repositories
+ hosts: all
+ become: true
+ gather_facts: false
+ tags:
+ - step004
+ - common_tasks
+ tasks:
+ - import_role:
+ name: set-repositories
+ when: repo_method is defined
-- name: Configure all hosts with repositories, common files and set environment key
+- name: Install common packages and set environment key
hosts:
- - all:!windows
+ - all
become: true
- gather_facts: False
+ gather_facts: false
tags:
- - step003
- - common_tasks
+ - step004
+ - common_tasks
roles:
- - { role: "set-repositories", when: 'repo_method is defined' }
- - { role: "common", when: 'install_common | bool' }
- - { role: "set_env_authorized_key", when: 'set_env_authorized_key | bool' }
- tasks:
- - name: Add GUID to /etc/skel/.bashrc
- lineinfile:
- path: "/etc/skel/.bashrc"
- regexp: "^export GUID"
- line: "export GUID={{ guid }}"
+ - role: common
+ when: install_common
+ - role: set_env_authorized_key
+ when: set_env_authorized_key
- name: Step 003.1 - Configuring Bastion Hosts
hosts: bastions
@@ -39,35 +62,43 @@
- step003.1
- bastion_tasks
tasks:
- - name: Setup Student SSH Key
- when:
- - install_student_user | bool
- - student_name is defined
- - env_authorized_key is defined
- block:
- - name: Copy SSH private key to student user .ssh directory
- copy:
- src: "/root/.ssh/{{env_authorized_key}}.pem"
- dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pem"
- mode: 0600
- owner: "{{ student_name }}"
- remote_src: true
-
- - name: Copy SSH public key to student user .ssh directory
- copy:
- src: "/root/.ssh/{{env_authorized_key}}.pub"
- dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pub"
- mode: 0600
- owner: "{{ student_name }}"
- remote_src: true
+ - include_role:
+ name: bastion
+ when: install_bastion | bool
- - name: Copy SSH config to student user .ssh directory
- copy:
- src: "/root/.ssh/config"
- dest: "/home/{{ student_name }}/.ssh/config"
- mode: 0600
- owner: "{{ student_name }}"
- remote_src: true
+ - include_role:
+ name: bastion-student-user
+ when: install_student_user | bool
+
+ - name: Setup Student SSH Key
+ when:
+ - install_student_user | bool
+ - student_name is defined
+ - env_authorized_key is defined
+ block:
+ - name: Copy SSH private key to student user .ssh directory
+ copy:
+ src: "/root/.ssh/{{env_authorized_key}}.pem"
+ dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pem"
+ mode: 0600
+ owner: "{{ student_name }}"
+ remote_src: true
+
+ - name: Copy SSH public key to student user .ssh directory
+ copy:
+ src: "/root/.ssh/{{env_authorized_key}}.pub"
+ dest: "/home/{{ student_name }}/.ssh/{{env_authorized_key}}.pub"
+ mode: 0600
+ owner: "{{ student_name }}"
+ remote_src: true
+
+ - name: Copy SSH config to student user .ssh directory
+ copy:
+ src: "/root/.ssh/config"
+ dest: "/home/{{ student_name }}/.ssh/config"
+ mode: 0600
+ owner: "{{ student_name }}"
+ remote_src: true
- name: Create a Python3 VirtualEnv for use in the k8s Ansible tasks
hosts: bastions
diff --git a/ansible/configs/sap-integration/requirements.yml b/ansible/configs/sap-integration/requirements.yml
index b3f8089855c..ca81d5990a8 100644
--- a/ansible/configs/sap-integration/requirements.yml
+++ b/ansible/configs/sap-integration/requirements.yml
@@ -7,9 +7,13 @@ roles:
version: v0.17
collections:
+- name: kubernetes.core
+ version: 2.3.0
- name: amazon.aws
version: 2.2.0
- name: ansible.posix
version: 1.3.0
- name: openstack.cloud
- version: 1.7.2
+ version: 2.1.0
+- name: community.general
+ version: 4.6.1
diff --git a/ansible/configs/sap-integration/sample_vars.yml b/ansible/configs/sap-integration/sample_vars.yml
deleted file mode 100644
index 07fdf6a448c..00000000000
--- a/ansible/configs/sap-integration/sample_vars.yml
+++ /dev/null
@@ -1,68 +0,0 @@
-cloud_provider: osp
-env_type: sap-integration
-output_dir: /output
-
-
-guid: sapi
-
-repo_method: satellite
-satellite_org: MY_SATELLITE_ORG
-satellite_activationkey: MY_ACTIVATION_KEY
-satellite_url: MY_SATELLITE_URL
-use_content_view: true
-
-ocp4_pull_secret: 'MY OCP PULL SECRET'
-
-# Authenication credentials for OpenStack in order to create the things.
-# These should be included with your secrets, but are listed here for reference
-# osp_auth_url:
-# osp_auth_username:
-# osp_auth_password:
-# osp_auth_cloud:
-# osp_auth_project_domain: #usually set to "default"
-# osp_auth_user_domain: #usually set to "default"
-# osp_project_name:
-# osp_project_id:
-# osp_project_create: false (this must be false when tested in sandbox)
-
-# These should be included with your secrets, but are listed here for reference
-osp_cluster_dns_server: FROMSECRET
-osp_cluster_dns_zone: FROMSECRET
-ddns_key_name: FROMSECRET
-ddns_key_secret: FROMSECRET
-
-osp_use_swift: false
-software_to_deploy: openshift4
-
-# -------------------------------------------------------------------
-# Infra Workload Variables
-# -------------------------------------------------------------------
-
-# Authentication (HT Passwd)
-ocp4_workload_authentication_idm_type: htpasswd
-ocp4_workload_authentication_admin_user: admin
-# When no password specified it is generated
-ocp4_workload_authentication_htpasswd_admin_password: PASSWORD_FOR_ADMIN
-ocp4_workload_authentication_htpasswd_user_base: integration
-# When no password specified it is generated
-# ocp4_workload_authentication_htpasswd_user_password: ocp_student
-
-# Create 1 student users
-ocp4_workload_authentication_htpasswd_user_count: 1
-
-# Remove the standard kubeadmin user
-ocp4_workload_authentication_remove_kubeadmin: true
-
-
-email: 'myredhatemail@redhat.com'
-rh_internal: true
-
-pull_secret_token: 'TOKEN FOR PULLING IMAGES FROM registry.redhat.io'
-
-# Variables required for the side by side microservices
-
-s4hana_host: FROMSECRET
-s4hana_port: FROMSECRET
-s4hana_portjco: FROMSECRET
-s4hana_user: FROMSECRET
-s4hana_password: FROMSECRET
\ No newline at end of file
diff --git a/ansible/configs/service-interconnect-binder/patch_instruction.yml b/ansible/configs/service-interconnect-binder/patch_instruction.yml
new file mode 100644
index 00000000000..beb2e698352
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/patch_instruction.yml
@@ -0,0 +1,98 @@
+---
+
+- name: Evaluate namespace if not exists -> solution-explorer
+ kubernetes.core.k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ webapp_namespace }}"
+ state: present
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
+
+- name: Set temp dir
+ ansible.builtin.set_fact:
+ webapp_operator_tmp: "/tmp/webapp-operator"
+
+
+- name: Ensure example directory exists
+ ansible.builtin.file:
+ path: "{{ webapp_operator_tmp }}"
+ state: directory
+ mode: "u+rwx"
+
+- name: Download example files
+ ansible.builtin.unarchive:
+ src: "https://github.com/RedHat-Middleware-Workshops/tutorial-web-app-operator/archive/v0.0.63-workshop-1.zip"
+ dest: "{{ webapp_operator_tmp }}"
+ remote_src: true
+
+- name: Create WebApp Operator Resources
+ kubernetes.core.k8s:
+ state: present
+ namespace: "{{ webapp_namespace }}"
+ src: "{{ webapp_operator_tmp }}/tutorial-web-app-operator-0.0.63-workshop-1/deploy/{{ item }}"
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
+ loop: "{{ ocp4_workload_service_interconnect_webapp_operator_resource_items }}"
+
+- name: Add additional walkthrough locations in the default list
+ ansible.builtin.set_fact:
+ ocp4_workload_service_interconnect_webapp_walkthrough_locations: "https://github.com/RedHat-Middleware-Workshops/service-interconnect-lab-instructions.git"
+
+- name: Retrieve route subdomains
+ ansible.builtin.set_fact:
+ aws_route_subdomain: "{{ aws_a_provision_data.openshift_console_url | replace('https://console-openshift-console.','') }}"
+ aws_console_url: "{{ aws_a_provision_data.openshift_console_url }}"
+ azure_route_subdomain: "{{ azure_a_provision_data.openshift_console_url | replace('https://console-openshift-console.','') }}"
+ azure_console_url: "{{ azure_a_provision_data.openshift_console_url }}"
+ rhel_hostname: "{{ rhel_a_provision_data.hostname }}"
+
+- name: Retrieve additional services
+ ansible.builtin.set_fact:
+ solution_explorer_services: '{{ lookup("template", "instructions-services.json.j2") }}'
+
+- name: Create WebApp custom resource
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'instructions-webapp.yaml.j2') }}"
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
+
+- name: Get webapp secure route
+ kubernetes.core.k8s_info:
+ kind: Route
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+ api_version: route.openshift.io/v1
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
+ register: webapp_secure_route
+ until:
+ - webapp_secure_route.resources is defined
+ - webapp_secure_route.resources | length > 0
+ retries: 10
+ delay: 30
+
+- name: Retrieve Route
+ ansible.builtin.set_fact:
+ webapp_secure_route: "{{ webapp_secure_route.resources[0].spec.host }}"
+
+- name: Create OpenShift OAuth client
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'oauthclient.yaml.j2') }}"
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
+
+- name: Create OpenShift Group
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'instructions-group.yaml.j2') }}"
+ api_key: "{{ __r_aws_cluster.k8s_auth.api_key }}"
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ validate_certs: false
diff --git a/ansible/configs/service-interconnect-binder/post_software.yml b/ansible/configs/service-interconnect-binder/post_software.yml
index 096d10c5450..f83a3972227 100644
--- a/ansible/configs/service-interconnect-binder/post_software.yml
+++ b/ansible/configs/service-interconnect-binder/post_software.yml
@@ -16,7 +16,7 @@
## -------------------------------------------
## Setup AWS Cluster Connections
## -------------------------------------------
- - name: Add rhel_a host to inventory
+ - name: Add AWS cluster host to inventory
ansible.builtin.add_host:
name: "{{ aws_a_provision_data.bastion_public_hostname }}"
groups: aws_bastion
@@ -25,10 +25,44 @@
ansible_user: "ec2-user"
remote_user: "ec2-user"
+ - name: Log into OpenShift Cluster on AWS
+ k8s_auth:
+ host: "{{ aws_a_provision_data.openshift_api_url }}"
+ username: "{{ aws_a_provision_data.openshift_cluster_admin_username }}"
+ password: "{{ aws_a_provision_data.openshift_cluster_admin_password }}"
+ validate_certs: false
+ register: __r_aws_cluster
+ retries: 240
+ delay: 15
+ until:
+ - __r_aws_cluster.k8s_auth.api_key is defined
+
+ - name: Patch Instructions
+ ansible.builtin.include_tasks:
+ file: patch_instruction.yml
+ vars:
+ webapp_namespace: "solution-explorer"
+ ocp_username: '{{ aws_a_provision_data.openshift_cluster_admin_username | default("admin", True)}}'
+ ocp4_workload_service_interconnect_webapp_operator_tag: 0.0.63-workshop-1
+ ocp4_workload_service_interconnect_webapp_client_id: tutorial-web-app
+ ocp4_workload_service_interconnect_webapp_group_name: dedicated-admins
+ ocp4_workload_service_interconnect_webapp_operator_template_path: /home/tutorial-web-app-operator/deploy/template/tutorial-web-app.yml
+ ocp4_workload_service_interconnect_webapp_operator_resources: >-
+ https://github.com/RedHat-Middleware-Workshops/tutorial-web-app-operator/archive/v{{ocp4_workload_service_interconnect_webapp_operator_tag}}.zip
+ ocp4_workload_service_interconnect_webapp_operator_resource_items:
+ - rbac.yaml
+ - sa.yaml
+ - crd.yaml
+ - operator.yaml
+ ocp4_workload_service_interconnect_webapp_walkthrough_locations:
+ - "https://github.com/RedHat-Middleware-Workshops/service-interconnect-lab-instructions.git"
+
+
+
## -------------------------------------------
## Setup Azure Cluster Connections
## -------------------------------------------
- - name: Add rhel_a host to inventory
+ - name: Add Azure host to inventory
ansible.builtin.add_host:
name: "{{ azure_a_provision_data.bastion_public_hostname }}"
groups: azure_bastion
@@ -37,6 +71,18 @@
ansible_user: "ec2-user"
remote_user: "ec2-user"
+ - name: Log into OpenShift Cluster on Azure
+ k8s_auth:
+ host: "{{ azure_a_provision_data.openshift_api_url }}"
+ username: "{{ azure_a_provision_data.openshift_cluster_admin_username }}"
+ password: "{{ azure_a_provision_data.openshift_cluster_admin_password }}"
+ validate_certs: false
+ register: __r_azure_cluster
+ retries: 240
+ delay: 15
+ until:
+ - __r_azure_cluster.k8s_auth.api_key is defined
+
## -------------------------------------------
## Setup RHEL Host Connections
## -------------------------------------------
@@ -50,34 +96,118 @@
remote_user: "ec2-user"
## -----------------------------------------------
-## Deploy Application pods on RHEL bastion
+## Deploy Skupper on AWS OpenShift Cluster
+## -----------------------------------------------
+- name: Login to AWS bastion
+ hosts: aws_bastion
+ tasks:
+ - name: Automating skupper steps for event
+ when: purpose == "event"
+ block:
+ - name: Skupper intall block
+ become: true
+ vars:
+ skupper_cli:
+ force: "True"
+ block:
+ - name: CLI Install
+ ansible.builtin.include_role:
+ name: skupper.network.skupper_cli_install
+
+ - name: Include skupper tasks
+ ansible.builtin.include_tasks:
+ file: skupper_aws_cluster.yml
+
+## -----------------------------------------------
+## Deploy Skupper on Azure OpenShift Cluster
## -----------------------------------------------
+- name: Login to azure bastion
+ hosts: azure_bastion
+ tasks:
+ - name: Automating skupper steps for event
+ when: purpose == "event"
+ block:
+ - name: Set fact
+ ansible.builtin.set_fact:
+ student_name: "{{ rhel_a_provision_data.ssh_username }}"
+ student_group: "{{ rhel_a_provision_data.ssh_username }}"
+
+ - name: Skupper intall block
+ become: true
+ vars:
+ skupper_cli:
+ force: "True"
+ block:
+ - name: CLI Install
+ ansible.builtin.include_role:
+ name: skupper.network.skupper_cli_install
+ - name: Include skupper tasks
+ ansible.builtin.include_tasks:
+ file: skupper_azure_cluster.yml
+
+## -----------------------------------------------
+## Deploy Application pods on RHEL bastion
+## -----------------------------------------------
- name: Login to RHEL bastion
hosts: rhel_bastion
become: true
tasks:
- - name: Set up application pods on RHEL
- vars:
+ - name: Set fact
+ ansible.builtin.set_fact:
student_name: "{{ rhel_a_provision_data.ssh_username }}"
student_group: "{{ rhel_a_provision_data.ssh_username }}"
+
+ - name: Set up application pods on RHEL
ansible.builtin.include_tasks:
file: pod_deployer.yml
- # - name: Enable lingering is needed
- # ansible.builtin.command: >-
- # loginctl enable-linger {{ rhel_a_provision_data.ssh_username }}
+ - name: Copy secret_aws_vm_token
+ when: purpose == "event"
+ become_user: "{{ student_name }}"
+ block:
+ - name: Copy token from aws
+ ansible.builtin.copy:
+ content: "{{ hostvars[groups['aws_bastion'][0]].secret_aws_vm_token }}"
+ dest: /home/{{ student_name }}/secret_aws_vm.token
- - name: Download and Install Skupper on Host
- become_user: "{{ rhel_a_provision_data.ssh_username }}"
- ansible.builtin.shell:
- cmd: curl https://skupper.io/install.sh | sh
+ - name: Copy toke from azure
+ ansible.builtin.copy:
+ content: "{{ hostvars[groups['azure_bastion'][0]].secret_azure_vm_token }}"
+ dest: /home/{{ student_name }}/secret_azure_vm.token
+
+ - name: Install skupper
+ ansible.builtin.include_role:
+ name: skupper.network.skupper_cli_install
+ vars:
+ skupper_cli:
+ force: "True"
+
+ - name: Export bash variable
+ ansible.builtin.blockinfile:
+ path: /etc/profile
+ marker: "# skupper platform variabler"
+ block: "export SKUPPER_PLATFORM=podman"
- name: Reboot required for pod serivce
ansible.builtin.reboot:
connect_timeout: 300
msg: "Rebooting now.."
+ - name: Enable lingering is needed
+ ansible.builtin.command:
+ argv:
+ - loginctl
+ - enable-linger
+ - "{{ student_name }}"
+
+ - name: Switch skupper platform
+ when: purpose == "event"
+ ansible.builtin.command:
+ argv:
+ - /usr/local/bin/skupper
+ - switch
+
- name: Step 005 Post Software
hosts: localhost
diff --git a/ansible/configs/service-interconnect-binder/requirements.yml b/ansible/configs/service-interconnect-binder/requirements.yml
index 29183e11c89..013e4b55588 100644
--- a/ansible/configs/service-interconnect-binder/requirements.yml
+++ b/ansible/configs/service-interconnect-binder/requirements.yml
@@ -4,8 +4,6 @@
# src: https://github.com/redhat-gpte-devopsautomation/ftl-injector
# version: v0.17
collections:
-- name: kubernetes.core
- version: 2.3.1
- name: amazon.aws
version: 2.2.0
- name: community.general
@@ -14,3 +12,7 @@ collections:
version: 1.3.0
- name: ansible.utils
version: 2.7.0
+- name: skupper.network
+ version: 1.0.1
+- name: kubernetes.core
+ version: 2.4.0
diff --git a/ansible/configs/service-interconnect-binder/skupper_aws_cluster.yml b/ansible/configs/service-interconnect-binder/skupper_aws_cluster.yml
new file mode 100644
index 00000000000..0178f399e0c
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/skupper_aws_cluster.yml
@@ -0,0 +1,64 @@
+---
+- name: Set common vars for skupper
+ set_fact:
+ platform: kubernetes
+ namespace: aws
+
+- name: Skopper install
+ become: true
+ block:
+ - include_role:
+ name: skupper.network.skupper_cli_install
+ vars:
+ skupper_cli:
+ force: "True"
+
+- name: Initialize skupper
+ include_role:
+ name: skupper.network.skupper_init
+ vars:
+ init:
+ enableConsole: "True"
+ enableFlowCollector: "True"
+ consoleAuth: unsecured
+
+- name: Create skupper service
+ include_role:
+ name: skupper.network.skupper_service
+ vars:
+ services:
+ database:
+ ports:
+ - 5432
+ payment-processor:
+ ports:
+ - 8080
+ protocol: http
+
+- name: Generate token secret-aws-azure-token
+ include_role:
+ name: skupper.network.skupper_token
+ vars:
+ token:
+ name: secret-aws-azure-token
+ type: claim
+ expiry: "7200m0s"
+ uses: 10
+
+- name: Get token secret-aws-azure-token
+ set_fact:
+ secret_aws_azure_token: "{{ generatedToken }}"
+
+- name: Generate token secret-aws-vm-token
+ include_role:
+ name: skupper.network.skupper_token
+ vars:
+ token:
+ name: secret-aws-vm-token
+ type: claim
+ expiry: "7200m0s"
+ uses: 10
+
+- name: Get token secret-aws-vm-token
+ set_fact:
+ secret_aws_vm_token: "{{ generatedToken }}"
diff --git a/ansible/configs/service-interconnect-binder/skupper_azure_cluster.yml b/ansible/configs/service-interconnect-binder/skupper_azure_cluster.yml
new file mode 100644
index 00000000000..d97d62e6389
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/skupper_azure_cluster.yml
@@ -0,0 +1,54 @@
+- name: Set common vars for skupper
+ set_fact:
+ platform: kubernetes
+ namespace: azure
+
+- name: Skopper install
+ become: true
+ block:
+ - include_role:
+ name: skupper.network.skupper_cli_install
+ vars:
+ skupper_cli:
+ force: "True"
+
+- name: Initialize skupper
+ include_role:
+ name: skupper.network.skupper_init
+
+- name: Generate token secret-azure-vm-token
+ include_role:
+ name: skupper.network.skupper_token
+ vars:
+ token:
+ name: secret-azure-vm-token
+ type: claim
+ expiry: "7200m0s"
+ uses: 10
+
+- name: Get token secret-azure-vm-token
+ set_fact:
+ secret_azure_vm_token: "{{ generatedToken }}"
+
+- name: Execute link block
+ become: true
+ become_user: "{{ student_name }}"
+ block:
+ - name: Copy secret_aws_azure_token
+ ansible.builtin.copy:
+ content: "{{ hostvars[groups['aws_bastion'][0]].secret_aws_azure_token }}"
+ dest: /home/{{ student_name }}/secret_aws_azure.token
+
+ # - name: Execute link command
+ # ansible.builtin.command:
+ # argv:
+ # - /usr/local/bin/skupper
+ # - link
+ # - create
+ # - /home/{{ student_name }}/secret_aws_azure.token
+ # - --name
+ # - aws-to-azure
+ # - --namespace
+ # - azure
+ # - --platform
+ # - kubernetes
diff --git a/ansible/configs/service-interconnect-binder/templates/instructions-group.yaml.j2 b/ansible/configs/service-interconnect-binder/templates/instructions-group.yaml.j2
new file mode 100644
index 00000000000..b7de4cfb80e
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/templates/instructions-group.yaml.j2
@@ -0,0 +1,6 @@
+kind: Group
+apiVersion: user.openshift.io/v1
+metadata:
+ name: '{{ocp4_workload_service_interconnect_webapp_group_name}}'
+users:
+ - "{{ocp4_workload_authentication_admin_user|default(ocp_username,true)}}"
\ No newline at end of file
diff --git a/ansible/configs/service-interconnect-binder/templates/instructions-services.json.j2 b/ansible/configs/service-interconnect-binder/templates/instructions-services.json.j2
new file mode 100644
index 00000000000..469f929c3bf
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/templates/instructions-services.json.j2
@@ -0,0 +1,36 @@
+{
+ "3scale": {
+ "Host":"https://3scale-admin.{{ aws_route_subdomain }}",
+ "Version":"2.7.0.GA"
+ },
+ "codeready":{
+ "Host":"https://devspaces.{{ aws_route_subdomain }}",
+ "Version":"3.4.0"
+ },
+ "AWS": {
+ "Attributes": {
+ "aws-subdomain": "{{ aws_route_subdomain }}",
+ "aws-console": "{{ aws_console_url }}",
+ "aws-admin": "{{ aws_a_provision_data.openshift_cluster_admin_username }}",
+ "aws-password": "{{ aws_a_provision_data.openshift_cluster_admin_password }}"
+ },
+ "Host": "{{ azure_console_url | replace('https://', '') }}"
+ },
+ "Azure": {
+ "Attributes": {
+ "azure-subdomain": "{{ azure_route_subdomain }}",
+ "azure-console": "{{ azure_console_url }}",
+ "azure-admin": "{{ azure_a_provision_data.openshift_cluster_admin_username }}",
+ "azure-password": "{{ azure_a_provision_data.openshift_cluster_admin_password}}"
+ },
+ "Host": "{{ azure_console_url | replace('https://', '') }}"
+ },
+ "RHEL9": {
+ "Attributes": {
+ "rhel-hostname": "{{ rhel_hostname }}",
+ "rhel-admin": "{{ rhel_a_provision_data.ssh_username }}"
+ },
+ "Host": "{{ rhel_hostname }}",
+ "Version": "9"
+ }
+}
diff --git a/ansible/configs/service-interconnect-binder/templates/instructions-webapp.yaml.j2 b/ansible/configs/service-interconnect-binder/templates/instructions-webapp.yaml.j2
new file mode 100644
index 00000000000..e1a2c152e0b
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/templates/instructions-webapp.yaml.j2
@@ -0,0 +1,22 @@
+apiVersion: "integreatly.org/v1alpha1"
+kind: "WebApp"
+metadata:
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+ labels:
+ app: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+spec:
+ app_label: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ template:
+ path: "{{ ocp4_workload_service_interconnect_webapp_operator_template_path }}"
+ parameters:
+ IMAGE: quay.io/redhatintegration/tutorial-web-app:latest
+ OPENSHIFT_OAUTHCLIENT_ID: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ OPENSHIFT_OAUTH_HOST: "oauth-openshift.{{ aws_route_subdomain }}"
+ OPENSHIFT_HOST: "console-openshift-console.{{ aws_route_subdomain }}"
+ INSTALLED_SERVICES: |-
+ {{ solution_explorer_services }}
+ OPENSHIFT_VERSION: "4"
+{% if ocp4_workload_service_interconnect_webapp_walkthrough_locations is defined %}
+ WALKTHROUGH_LOCATIONS: "{{ ocp4_workload_service_interconnect_webapp_walkthrough_locations|join(',') }}"
+{% endif %}
\ No newline at end of file
diff --git a/ansible/configs/service-interconnect-binder/templates/oauthclient.yaml.j2 b/ansible/configs/service-interconnect-binder/templates/oauthclient.yaml.j2
new file mode 100644
index 00000000000..5c488f541f2
--- /dev/null
+++ b/ansible/configs/service-interconnect-binder/templates/oauthclient.yaml.j2
@@ -0,0 +1,8 @@
+apiVersion: oauth.openshift.io/v1
+grantMethod: auto
+kind: OAuthClient
+metadata:
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+redirectURIs:
+ - "https://{{ webapp_secure_route }}"
\ No newline at end of file
diff --git a/ansible/roles-infra/infra-aws-open-environment/defaults/main.yaml b/ansible/roles-infra/infra-aws-open-environment/defaults/main.yaml
index ec248e6f3a2..e443a37f066 100644
--- a/ansible/roles-infra/infra-aws-open-environment/defaults/main.yaml
+++ b/ansible/roles-infra/infra-aws-open-environment/defaults/main.yaml
@@ -1,10 +1,8 @@
---
admin_console_password_gen: >-
- {{- lookup('password', '/dev/null length=1 chars=letters') | upper -}}
- {{- lookup('password', '/dev/null length=1 chars=letters') | lower -}}
- {{- lookup('password', '/dev/null length=1 chars=punctuation') -}}
- {{- lookup('password', '/dev/null length=9') -}}
- {{- lookup('password', '/dev/null length=1 chars=digits') -}}
+ {{ lookup('community.general.random_string',
+ length=12, min_lower=1, min_upper=1, special=false,
+ min_numeric=1) }}
sandbox_enable_ui: false
diff --git a/ansible/roles-infra/infra-common-ssh-config-generate/tasks/main.yml b/ansible/roles-infra/infra-common-ssh-config-generate/tasks/main.yml
index 22a71a42729..16af43d0f84 100644
--- a/ansible/roles-infra/infra-common-ssh-config-generate/tasks/main.yml
+++ b/ansible/roles-infra/infra-common-ssh-config-generate/tasks/main.yml
@@ -69,7 +69,7 @@
{% endif %}
User {{ remote_user }}
IdentityFile {{ ssh_provision_key_path | default(ssh_key) | default(infra_ssh_key) | default(ansible_ssh_private_key_file) | default(default_key_name) }}
- {% if hostvars[item].bastion != '' %}
+ {% if 'bastion' in hostvars[item] and hostvars[item].bastion != '' %}
ProxyCommand ssh -F {{ ansible_ssh_config }} {{ hostvars[item].bastion }} -W %h:%p
{% else %}
ProxyCommand ssh -F {{ ansible_ssh_config }} {{ bastion_hostname }} -W %h:%p
@@ -81,7 +81,7 @@
ControlPersist 5m
when:
- item not in [bastion_hostname, 'localhost', '127.0.0.1']
- - item != hostvars[item].bastion
+ - ('bastion' in hostvars[item] and item != hostvars[item].bastion)
with_items: "{{ groups['all'] }}"
tags:
- bastion_proxy_config_hosts
diff --git a/ansible/roles-infra/infra-dns/defaults/main.yml b/ansible/roles-infra/infra-dns/defaults/main.yml
index 4fce275367a..23865406b69 100644
--- a/ansible/roles-infra/infra-dns/defaults/main.yml
+++ b/ansible/roles-infra/infra-dns/defaults/main.yml
@@ -6,7 +6,7 @@ infra_dns_num_format: '%d'
infra_dns_inventory_var: >-
{%- if cloud_provider == 'osp' -%}
- r_osp_facts
+ r_osp_server_facts
{%- elif cloud_provider == 'equinix_metal' -%}
r_equinix_metal_devices
{%- elif cloud_provider == 'vmc' -%}
diff --git a/ansible/roles-infra/infra-dns/tasks/nested_loop.yml b/ansible/roles-infra/infra-dns/tasks/nested_loop.yml
index 409c75a51f2..01a393b2646 100644
--- a/ansible/roles-infra/infra-dns/tasks/nested_loop.yml
+++ b/ansible/roles-infra/infra-dns/tasks/nested_loop.yml
@@ -3,7 +3,7 @@
set_fact:
find_ip_query: >-
{%- if cloud_provider == 'osp' -%}
- ansible_facts.openstack_servers[?name=='{{ _instance_name }}'].public_v4 | [0]
+ openstack_servers[?name=='{{ _instance_name }}'].public_v4 | [0]
{%- elif cloud_provider == 'equinix_metal' -%}
results[].devices[?hostname=='{{ _instance_name }}'].public_ipv4[]|[0]
{%- elif cloud_provider == 'vmc' -%}
diff --git a/ansible/roles-infra/infra-images/defaults/main.yaml b/ansible/roles-infra/infra-images/defaults/main.yaml
index f9352c95417..f02afd3c258 100644
--- a/ansible/roles-infra/infra-images/defaults/main.yaml
+++ b/ansible/roles-infra/infra-images/defaults/main.yaml
@@ -8,6 +8,14 @@ infra_images_redhat_owner_id: 309956199498
infra_images_predefined:
+ RHEL92GOLD-latest:
+ owner: "{{ infra_images_redhat_owner_id }}"
+ name: RHEL-9.2.*_HVM-*Access*
+ architecture: x86_64
+ aws_filters:
+ is-public: false
+
+
RHEL91GOLD-latest:
owner: "{{ infra_images_redhat_owner_id }}"
name: RHEL-9.1.*_HVM-*Access*
diff --git a/ansible/roles-infra/infra-osp-create-inventory/tasks/main.yml b/ansible/roles-infra/infra-osp-create-inventory/tasks/main.yml
index d6245fd9457..aef461e583a 100644
--- a/ansible/roles-infra/infra-osp-create-inventory/tasks/main.yml
+++ b/ansible/roles-infra/infra-osp-create-inventory/tasks/main.yml
@@ -21,7 +21,7 @@
when:
- server.status != 'terminated'
- '"bastions" in server.metadata.AnsibleGroup | default("")'
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{r_osp_server_facts.servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
@@ -44,16 +44,21 @@
state: "{{ server.status }}"
instance_id: "{{ server.id }}"
isolated: "{{ server.metadata.isolated | default(false) }}"
- private_ip_address: "{{ server.private_v4 }}"
- public_ip_address: "{{ server.public_v4 | default('') }}"
+ private_ip_address: "{{ server.addresses | json_query(private_ip_query) | first }}"
+ public_ip_address: "{{ server.addresses | json_query(public_ip_query) | first }}"
image_id: "{{ server.image.id | default('') }}"
ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
ansible_python_interpreter: "{{ server.metadata.ansible_python_interpreter | default(omit) }}"
bastion: "{{ local_bastion | default('') }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{r_osp_server_facts.servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
+ vars:
+ private_ip_query: >
+ *[?"OS-EXT-IPS:type"=='fixed'] | [].addr || ['']
+ public_ip_query: >
+ *[?"OS-EXT-IPS:type"=='floating'] | [].addr || ['']
tags:
- create_inventory
- must
@@ -66,7 +71,7 @@
add_host:
name: "{{ server | json_query(_name_selector) | default(server.name) }}"
private_ip_address: "{{ server.addresses[multi_network_primary] | json_query(private_ip_query) }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{r_osp_server_facts.servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
@@ -77,7 +82,7 @@
- add_host:
name: "{{ server | json_query(_name_selector) | default(server.name) }}"
groups: "{{ server.metadata.AnsibleGroup }}"
- loop: "{{ r_osp_facts.openstack_servers }}"
+ loop: "{{r_osp_server_facts.servers }}"
loop_control:
label: "{{ server | json_query(_name_selector) | default(server.name) }}"
loop_var: server
@@ -95,19 +100,12 @@
loop_var: host
when: hostvars[host].public_ip_address != ''
-- debug:
- var: hostvars[local_bastion].public_ip_address
-
-- debug:
- msg: >-
- bastion IP is {{ lookup('dig', hostvars[local_bastion].public_dns_name) }}
- ignore_errors: true
-
- name: Verify that DNS matches bastion host_var
assert:
that:
# Requires dnspython library
- - lookup('dig', hostvars[local_bastion].public_dns_name) == hostvars[local_bastion].public_ip_address
+ - lookup('community.general.dig',
+ hostvars[local_bastion].public_dns_name + ".") == hostvars[local_bastion].public_ip_address
- name: debug hostvars
debug:
diff --git a/ansible/roles-infra/infra-osp-dns/tasks/nested_loop.yml b/ansible/roles-infra/infra-osp-dns/tasks/nested_loop.yml
index 6362afebdd9..1ae01fb5256 100644
--- a/ansible/roles-infra/infra-osp-dns/tasks/nested_loop.yml
+++ b/ansible/roles-infra/infra-osp-dns/tasks/nested_loop.yml
@@ -1,7 +1,7 @@
---
- name: Set the query to find the public IPv4 IP of the instance
set_fact:
- find_ip_query: openstack_servers[?name=='{{ _instance_name }}'].public_v4 | [0]
+ find_ip_query: servers[?name=='{{ _instance_name }}'].access_ipv4|[0]
- when: _dns_state == 'present'
block:
@@ -9,20 +9,20 @@
debug:
msg: >-
The floating IP for {{ _instance_name }}
- is {{ r_osp_facts | json_query(find_ip_query) }}
+ is {{ r_osp_server_facts | json_query(find_ip_query) }}
- name: DNS entry ({{ _dns_state | default('present') }})
- nsupdate:
+ community.general.nsupdate:
server: >-
{{ osp_cluster_dns_server
- | ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ansible.utils.ipaddr
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone }}"
record: "{{ _instance_name }}.{{ guid }}"
type: A
ttl: "{{ infra_osp_dns_default_ttl }}"
- value: "{{ r_osp_facts | json_query(find_ip_query) }}"
+ value: "{{ r_osp_server_facts | json_query(find_ip_query) }}"
port: "{{ osp_cluster_dns_port | d('53') }}"
key_name: "{{ ddns_key_name }}"
key_algorithm: "{{ ddns_key_algorithm | d('hmac-md5') }}"
@@ -38,11 +38,11 @@
loop: "{{ _alt_names }}"
loop_control:
loop_var: _alt_name
- nsupdate:
+ community.general.nsupdate:
server: >-
{{ osp_cluster_dns_server
- | ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ansible.utils.ipaddr
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone | default(cluster_dns_zone) }}"
record: "{{ _alt_name }}{{_index}}.{{ guid }}"
@@ -54,15 +54,15 @@
key_algorithm: "{{ ddns_key_algorithm | d('hmac-md5') }}"
key_secret: "{{ ddns_key_secret }}"
-# When state == absent, don't use r_osp_facts (should not be needed)
+# When state == absent, don't use r_osp_server_facts (should not be needed)
- when: _dns_state == 'absent'
block:
- name: DNS entry ({{ _dns_state | default('present') }})
- nsupdate:
+ community.general.nsupdate:
server: >-
{{ osp_cluster_dns_server
- | ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ansible.utils.ipaddr
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone }}"
record: "{{ _instance_name }}.{{ guid }}"
@@ -79,11 +79,11 @@
loop: "{{ _alt_names }}"
loop_control:
loop_var: _alt_name
- nsupdate:
+ community.general.nsupdate:
server: >-
{{ osp_cluster_dns_server
- | ipaddr
- | ternary(osp_cluster_dns_server, lookup('dig', osp_cluster_dns_server))
+ | ansible.utils.ipaddr
+ | ternary(osp_cluster_dns_server, lookup('community.general.dig', osp_cluster_dns_server + "."))
}}
zone: "{{ osp_cluster_dns_zone | default(cluster_dns_zone) }}"
record: "{{ _alt_name }}{{_index}}.{{ guid }}"
diff --git a/ansible/roles-infra/infra-osp-dry-run/tasks/main.yml b/ansible/roles-infra/infra-osp-dry-run/tasks/main.yml
index 4257797543a..be33979b7ab 100644
--- a/ansible/roles-infra/infra-osp-dry-run/tasks/main.yml
+++ b/ansible/roles-infra/infra-osp-dry-run/tasks/main.yml
@@ -13,7 +13,7 @@
src: basic_heat_template.yml.j2
- name: Delete Heat stack that doesn't exist
- os_stack:
+ openstack.cloud.stack:
name: "dry-run-stack-{{ 999999 | random }}"
state: absent
wait: true
@@ -34,6 +34,6 @@
- validate_heat_template
- name: Gather instance facts
- os_server_info:
+ openstack.cloud.server_info:
server: "*"
- register: r_osp_facts
+ register: r_osp_server_facts
diff --git a/ansible/roles-infra/infra-osp-project-create/tasks/main.yml b/ansible/roles-infra/infra-osp-project-create/tasks/main.yml
index 252afaa2983..5c88bd773d9 100644
--- a/ansible/roles-infra/infra-osp-project-create/tasks/main.yml
+++ b/ansible/roles-infra/infra-osp-project-create/tasks/main.yml
@@ -67,7 +67,7 @@
when: osp_project_create
block:
- name: Create project for user
- os_project:
+ openstack.cloud.project:
name: "{{ osp_project_name }}"
state: present
description: "{{ env_type }} {{ guid }}"
@@ -100,14 +100,14 @@
{{ osp_project_name | quote }}
- name: Grant access to admin account on new project
- os_user_role:
+ openstack.cloud.role_assignment:
state: present
user: "{{ admin_user }}"
role: "admin"
project: "{{ osp_project_name }}"
- name: Set quotas on new project
- os_quota:
+ openstack.cloud.quota:
name: "{{ osp_project_name }}"
instances: "{{ quota_num_instances }}"
cores: "{{ quota_num_cores }}"
@@ -124,7 +124,7 @@
security_group_rule: "{{ quota_sg_rules }}"
- name: Create user in new project
- os_user:
+ openstack.cloud.identity_user:
state: present
name: "{{ osp_auth_username_member }}"
password: "{{ heat_user_password }}"
@@ -132,13 +132,13 @@
default_project: "{{ osp_project_name }}"
- name: Add member role to user
- os_user_role:
+ openstack.cloud.role_assignment:
user: "{{ osp_auth_username_member }}"
role: _member_
project: "{{ osp_project_name }}"
- name: Add Swift role to user
- os_user_role:
+ openstack.cloud.role_assignment:
user: "{{ osp_auth_username_member }}"
role: swiftoperator
project: "{{ osp_project_name }}"
@@ -148,13 +148,12 @@
when: osp_project_id is not defined
block:
- name: Get project info
- # This changes to os_project_info in Ansible 2.9
- os_project_facts:
+ openstack.cloud.project_info:
name: "{{ osp_project_name }}"
register: r_osp_project
- set_fact:
- osp_project_info: "{{ r_osp_project.ansible_facts.openstack_projects }}"
+ osp_project_info: "{{ r_osp_project.projects }}"
- when: osp_project_info | length == 0
fail:
@@ -164,7 +163,7 @@
# when: osp_create_sandbox
# block:
# - name: Create sandbox user in project
- # os_user:
+ # openstack.cloud.identity_user:
# state: present
# name: sandbox-{{ guid }}-user
# password: "{{ heat_user_password }}"
@@ -172,7 +171,7 @@
# default_project: "{{ osp_project_name }}"
# - name: Add member role to user
- # os_user_role:
+ # openstack.cloud.role_assignment:
# user: sandbox-{{ guid }}-user
# role: _member_
# project: "{{ osp_project_name }}"
diff --git a/ansible/roles-infra/infra-osp-resources-destroy/tasks/detect_project.yml b/ansible/roles-infra/infra-osp-resources-destroy/tasks/detect_project.yml
index 36880647ba9..cb7a9cc2f33 100644
--- a/ansible/roles-infra/infra-osp-resources-destroy/tasks/detect_project.yml
+++ b/ansible/roles-infra/infra-osp-resources-destroy/tasks/detect_project.yml
@@ -2,10 +2,10 @@
- name: Get project information
environment: >-
{{ __infra_osp_resources_destroy_environment | combine({"OS_PROJECT_NAME": "admin"}) }}
- os_project_info:
+ openstack.cloud.project_info:
name: "{{ osp_project_name }}"
register: r_osp_project
- name: Set osp_project_info
set_fact:
- osp_project_info: "{{ r_osp_project.openstack_projects }}"
+ osp_project_info: "{{ r_osp_project.projects }}"
diff --git a/ansible/roles-infra/infra-osp-resources-destroy/tasks/keypairs.yml b/ansible/roles-infra/infra-osp-resources-destroy/tasks/keypairs.yml
index 24f2f722320..45fd9cd5a7a 100644
--- a/ansible/roles-infra/infra-osp-resources-destroy/tasks/keypairs.yml
+++ b/ansible/roles-infra/infra-osp-resources-destroy/tasks/keypairs.yml
@@ -9,18 +9,18 @@
environment: "{{ __infra_osp_resources_destroy_environment }}"
block:
- name: Get user info
- os_user_info:
+ openstack.cloud.identity_user_info:
name: "{{ _keypair_owner }}"
domain: default
register: r_osp_user_info
- - when: r_osp_user_info.openstack_users | length > 0
+ - when: r_osp_user_info.users | length > 0
block:
- name: Get UUID of user
set_fact:
osp_user_uuid: "{{ r_osp_user_info | json_query(uuid_query) }}"
vars:
- uuid_query: openstack_users[].id|[0]
+ uuid_query: r_osp_user_info.users[].id|[0]
- name: List keypairs for user
command: nova keypair-list --user {{ osp_user_uuid | quote }}
@@ -37,6 +37,6 @@
command: nova keypair-delete --user {{ osp_user_uuid | quote }} {{ __key_name | quote }}
- name: Delete user
- os_user:
+ openstack.cloud.identity_user:
state: absent
name: "{{ _keypair_owner }}"
diff --git a/ansible/roles-infra/infra-osp-resources-destroy/tasks/project.yml b/ansible/roles-infra/infra-osp-resources-destroy/tasks/project.yml
index 6eb1a0be958..6868cd8d282 100644
--- a/ansible/roles-infra/infra-osp-resources-destroy/tasks/project.yml
+++ b/ansible/roles-infra/infra-osp-resources-destroy/tasks/project.yml
@@ -1,7 +1,7 @@
---
- name: Delete project
environment: "{{ __infra_osp_resources_destroy_environment }}"
- os_project:
+ openstack.cloud.project:
name: "{{ osp_project_name }}"
state: absent
tags:
diff --git a/ansible/roles-infra/infra-osp-resources-destroy/tasks/project_resources.yml b/ansible/roles-infra/infra-osp-resources-destroy/tasks/project_resources.yml
index 6a260bbee26..9c71aa0cef2 100644
--- a/ansible/roles-infra/infra-osp-resources-destroy/tasks/project_resources.yml
+++ b/ansible/roles-infra/infra-osp-resources-destroy/tasks/project_resources.yml
@@ -1,4 +1,20 @@
---
+- name: Ensure manager has access to project
+ environment:
+ OS_AUTH_URL: "{{ osp_auth_url }}"
+ OS_USERNAME: "{{ osp_auth_username }}"
+ OS_PASSWORD: "{{ osp_auth_password }}"
+ OS_PROJECT_NAME: "admin"
+ OS_PROJECT_DOMAIN_ID: "{{ osp_auth_project_domain }}"
+ OS_USER_DOMAIN_NAME: "{{ osp_auth_user_domain }}"
+ block:
+ - name: Set admin role for manager user on project
+ openstack.cloud.role_assignment:
+ state: present
+ user: "{{ osp_auth_username }}"
+ role: "admin"
+ project: "{{ osp_project_id | default(osp_project_name) }}"
+
- name: Remove OpenStack resources from project
environment: >-
{{ __infra_osp_resources_destroy_environment
@@ -105,7 +121,8 @@
when: __all_ports | length > 0
command: openstack network trunk delete {{ __all_ports | map('quote') | join(' ') }}
- - name: Purge network resources
- command: |
- neutron purge
- --project {{ osp_project_info[0].id | default(osp_project_id) | quote }}
+ # deprecated
+ #- name: Purge network resources
+ # command: |
+ # neutron purge
+ # --project {{ osp_project_info[0].id | default(osp_project_id) | quote }}
diff --git a/ansible/roles-infra/infra-osp-save-images/tasks/stop_vms.yml b/ansible/roles-infra/infra-osp-save-images/tasks/stop_vms.yml
index 649b9681cd7..909026c0f66 100644
--- a/ansible/roles-infra/infra-osp-save-images/tasks/stop_vms.yml
+++ b/ansible/roles-infra/infra-osp-save-images/tasks/stop_vms.yml
@@ -1,14 +1,14 @@
---
- name: Get list of the instances
environment: "{{ __infra_osp_save_images_authentication }}"
- os_server_info:
+ openstack.cloud.server_info:
register: os_instances
- name: Stop VM instances
environment: "{{ __infra_osp_save_images_authentication }}"
- os_server_action:
+ openstack.cloud.server_action:
action: stop
server: "{{ instance.name }}"
- loop: "{{ os_instances.openstack_servers }}"
+ loop: "{{ os_instances.servers }}"
loop_control:
loop_var: instance
diff --git a/ansible/roles-infra/infra-vmc-resources/tasks/create_additional_public_ips.yaml b/ansible/roles-infra/infra-vmc-resources/tasks/create_additional_public_ips.yaml
index 3dbc631736b..303b788ff95 100644
--- a/ansible/roles-infra/infra-vmc-resources/tasks/create_additional_public_ips.yaml
+++ b/ansible/roles-infra/infra-vmc-resources/tasks/create_additional_public_ips.yaml
@@ -2,14 +2,14 @@
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ _additional.name }}"
method: GET
- status_code: [200,500]
+ status_code: [200,404,500]
headers:
csp-auth-token: "{{ _nsxt_token }}"
return_content: yes
register: _public_ip_exists
- name: Request a Public IP
- when: _public_ip_exists.status == 500
+ when: _public_ip_exists.status in [404,500]
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ _additional.name }}"
method: PUT
@@ -26,7 +26,7 @@
_additional_public_ip: "{{ _public_ip_request.json.ip | default(_public_ip_exists.json.ip)}}"
- name: Create a NAT configuration
- when: _public_ip_exists.status == 500
+ when: _public_ip_exists.status in [404,500]
uri:
url: "{{ nsxt_proxy_url }}/policy/api/v1/infra/tier-1s/cgw/nat/USER/nat-rules/nat-{{ env_type }}-{{ guid }}-{{ _additional.name }}"
method: PUT
diff --git a/ansible/roles-infra/infra-vmc-resources/tasks/create_public_ip_and_nat.yaml b/ansible/roles-infra/infra-vmc-resources/tasks/create_public_ip_and_nat.yaml
index c0bb728f33d..034a2af36e1 100644
--- a/ansible/roles-infra/infra-vmc-resources/tasks/create_public_ip_and_nat.yaml
+++ b/ansible/roles-infra/infra-vmc-resources/tasks/create_public_ip_and_nat.yaml
@@ -2,7 +2,7 @@
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ item.instance.hw_name }}"
method: GET
- status_code: [200,500]
+ status_code: [200,404,500]
headers:
csp-auth-token: "{{ _nsxt_token }}"
return_content: yes
@@ -21,7 +21,7 @@
- name: Request a Public IP
- when: _public_ip_exists.status == 500
+ when: _public_ip_exists.status in [404,500]
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ item.instance.hw_name }}"
method: PUT
@@ -69,7 +69,7 @@
return_content: yes
- name: Create a NAT configuration
- when: _public_ip_exists.status == 500
+ when: _public_ip_exists.status in [404,500]
uri:
url: "{{ nsxt_proxy_url }}/policy/api/v1/infra/tier-1s/cgw/nat/USER/nat-rules/nat-{{ env_type }}-{{ guid }}-{{ item.instance.hw_name }}"
method: PUT
diff --git a/ansible/roles-infra/infra-vmc-resources/tasks/delete_additional_public_ips.yaml b/ansible/roles-infra/infra-vmc-resources/tasks/delete_additional_public_ips.yaml
index a9e68476327..e24779ebf72 100644
--- a/ansible/roles-infra/infra-vmc-resources/tasks/delete_additional_public_ips.yaml
+++ b/ansible/roles-infra/infra-vmc-resources/tasks/delete_additional_public_ips.yaml
@@ -2,7 +2,7 @@
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ _additional.name }}"
method: GET
- status_code: [200,500]
+ status_code: [200,404,500]
headers:
csp-auth-token: "{{ _nsxt_token }}"
return_content: yes
diff --git a/ansible/roles-infra/infra-vmc-resources/tasks/delete_public_ip_and_nat.yaml b/ansible/roles-infra/infra-vmc-resources/tasks/delete_public_ip_and_nat.yaml
index 49d118f9335..3a62565cd7f 100644
--- a/ansible/roles-infra/infra-vmc-resources/tasks/delete_public_ip_and_nat.yaml
+++ b/ansible/roles-infra/infra-vmc-resources/tasks/delete_public_ip_and_nat.yaml
@@ -2,7 +2,7 @@
uri:
url: "{{ nsxt_proxy_url }}/cloud-service/api/v1/infra/public-ips/{{ env_type }}-{{ guid }}-{{ item.guest_name }}"
method: GET
- status_code: [200,500]
+ status_code: [200,404,500]
headers:
csp-auth-token: "{{ _nsxt_token }}"
return_content: yes
@@ -17,11 +17,6 @@
return_content: yes
register: _lab_public_ips
-
-
-
-
-
- name: Remove a NAT configuration
when: _public_ip_exists.status == 200
uri:
@@ -46,13 +41,10 @@
return_content: yes
register: _public_ip_request
-
-
- name: Set the IP in a variable
set_fact:
_vm_public_ip: "{{ _public_ip_request.json.ip | default(_public_ip_exists.json.ip)}}"
-
- name: Set a new variable removing the IP to the lab public ips
set_fact:
_lab_public_ips_new: "{{ _lab_public_ips.json.expression.0.ip_addresses|default([])|difference([_vm_public_ip]) }}"
diff --git a/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/add-bastion-to-openstack-network.yml b/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/add-bastion-to-openstack-network.yml
index 2b1d951793f..c1d59933b46 100644
--- a/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/add-bastion-to-openstack-network.yml
+++ b/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/add-bastion-to-openstack-network.yml
@@ -1,15 +1,15 @@
---
- name: Get OpenStack bastion server info
- os_server_info:
+ openstack.cloud.server_info:
auth: "{{ __infra_osp_bastion_on_openshift_network_osp_auth }}"
filters:
name: bastion
project_id: "{{ __os_project_id }}"
register: r_server_info
- failed_when: r_server_info.openstack_servers | length == 0
+ failed_when: r_server_info.servers | length == 0
- name: Get OpenStack network info
- os_networks_info:
+ openstack.cloud.networks_info:
auth: "{{ __infra_osp_bastion_on_openshift_network_osp_auth }}"
filters:
project_id: "{{ __os_project_id }}"
@@ -17,10 +17,10 @@
- name: Add bastion to openshift network
vars:
- __bastion_info: "{{ r_server_info.openstack_servers[0] }}"
+ __bastion_info: "{{ r_server_info.servers[0] }}"
__bastion_networks: "{{ __bastion_info.addresses.keys() }}"
__openshift_network: >-
- {{ r_networks_info.openstack_networks | to_json | from_json
+ {{ r_networks_info.networks | to_json | from_json
| json_query("[?ends_with(name, '-openshift')]|[0].name")
}}
when: __openshift_network not in __bastion_networks
diff --git a/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/main.yml b/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/main.yml
index 841106140dd..8aa20fac71d 100644
--- a/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/main.yml
+++ b/ansible/roles-infra/infra_osp_bastion_on_openshift_network/tasks/main.yml
@@ -4,17 +4,17 @@
that: osp_project_name is defined
- name: Get project information
- os_project_info:
+ openstack.cloud.project_info:
auth: "{{ __infra_osp_bastion_on_openshift_network_osp_auth }}"
name: "{{ osp_project_name }}"
register: r_os_project_info
failed_when: >-
r_os_project_info is failed or
- r_os_project_info.openstack_projects | length == 0
+ r_os_project_info.projects | length == 0
- name: Add bastion to openstack network
vars:
- __os_project_id: "{{ r_os_project_info.openstack_projects[0].id }}"
+ __os_project_id: "{{ r_os_project_info.projects[0].id }}"
include_tasks:
file: add-bastion-to-openstack-network.yml
...
diff --git a/ansible/roles-infra/infra_osp_lifecycle/tasks/get-servers.yml b/ansible/roles-infra/infra_osp_lifecycle/tasks/get-servers.yml
index fee09112ff6..46864204669 100644
--- a/ansible/roles-infra/infra_osp_lifecycle/tasks/get-servers.yml
+++ b/ansible/roles-infra/infra_osp_lifecycle/tasks/get-servers.yml
@@ -3,16 +3,16 @@
- name: Get server info using guid & env_type
openstack.cloud.server_info:
all_projects: false
- register: r_osp_facts
+ register: r_osp_server_facts
- name: Debug openstack.cloud.server_info var, use -v to display
debug:
verbosity: 3
- var: r_osp_facts
+ var: r_osp_server_facts
- name: Create openstack_servers fact
set_fact:
- openstack_servers: "{{ r_osp_facts.openstack_servers }}"
+ openstack_servers: "{{ r_osp_server_facts.servers }}"
- name: Debug osp_servers fact, use -v to display
debug:
diff --git a/ansible/roles/ansible_bu_gitea/defaults/main.yml b/ansible/roles/ansible_bu_gitea/defaults/main.yml
new file mode 100644
index 00000000000..d5d15e435b4
--- /dev/null
+++ b/ansible/roles/ansible_bu_gitea/defaults/main.yml
@@ -0,0 +1,32 @@
+---
+
+# -------------------------------------------------
+# Default Variables
+# -------------------------------------------------
+# FQDN
+ansible_bu_gitea_fqdn: "{{ groups['gitlab'][0].split('.')[0] }}.{{ subdomain_base }}"
+
+# List of gitea dependencies
+ansible_bu_gitea_certbot_dependencies:
+ - python3-pip
+ - python3-devel
+ - certbot
+
+# -------------------------------------------------
+# Role: ansible.workshops.vendor_do1jlr_gitea
+# -------------------------------------------------
+gitea_root_url: 'https://{{ ansible_bu_gitea_fqdn }}'
+gitea_http_listen: '0.0.0.0'
+gitea_http_port: '443'
+gitea_protocol: 'https'
+gitea_start_ssh: false
+gitea_systemd_cap_net_bind_service: true
+gitea_group: 'root'
+gitea_db_password: "{{ common_password }}"
+gitea_server_extra_config: |
+ CERT_FILE = /etc/letsencrypt/live/{{ ansible_bu_gitea_fqdn }}/fullchain.pem
+ KEY_FILE = /etc/letsencrypt/live/{{ ansible_bu_gitea_fqdn }}/privkey.pem
+ LANDING_PAGE = login
+gitea_repository_extra_config: |
+ ENABLE_PUSH_CREATE_USER = true
+ DEFAULT_BRANCH = main
diff --git a/ansible/roles/ansible_bu_gitea/meta/main.yml b/ansible/roles/ansible_bu_gitea/meta/main.yml
new file mode 100644
index 00000000000..c06debd6e79
--- /dev/null
+++ b/ansible/roles/ansible_bu_gitea/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ role_name: ansible_bu_gitea
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: Setup gitea
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - workshop
+dependencies: []
diff --git a/ansible/roles/ansible_bu_gitea/tasks/main.yml b/ansible/roles/ansible_bu_gitea/tasks/main.yml
new file mode 100644
index 00000000000..d88301f4b32
--- /dev/null
+++ b/ansible/roles/ansible_bu_gitea/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+
+- name: Include role from ansible.workshops collections
+ ansible.builtin.include_role:
+ name: ansible.workshops.vendor_do1jlr_gitea
+
+- name: Install base packages
+ ansible.builtin.dnf:
+ name: "{{ ansible_bu_gitea_certbot_dependencies }}"
+ state: present
+
+- name: Get letsencrypt certs
+ ansible.builtin.command: >-
+ certbot certonly
+ --standalone
+ --no-bootstrap
+ --email ansible-network@redhat.com
+ --agree-tos
+ -d {{ ansible_bu_gitea_fqdn }}
+ --noninteractive
+ register: r_gitea_cert
+ until: r_gitea_cert is not failed
+ retries: 5
+ poll: 30
+
+- name: set permissions on cert directory
+ ansible.builtin.file:
+ path: "{{ item }}"
+ recurse: true
+ mode: '0755'
+ loop:
+ - /etc/letsencrypt/live
+ - /etc/letsencrypt/archive
+
+- name: Restart gitea service
+ ansible.builtin.service:
+ name: gitea
+ state: restarted
+
+- name: Waits for port 443 on gitea to listen
+ ansible.builtin.wait_for:
+ host: "{{ ansible_bu_gitea_fqdn }}"
+ port: 443
+ timeout: 60
+
+- name: Use gitea cli to create user
+ ansible.builtin.command: >
+ /usr/local/bin/gitea -c /etc/gitea/gitea.ini admin user create
+ --username "{{ student_name }}"
+ --password "{{ common_password }}"
+ --email {{ student_name }}@example.com
+ --must-change-password=false
+ become_user: gitea
+ register: gitearesult
+ failed_when:
+ - '"successfully created" not in gitearesult.stdout'
+ - '"user already exists" not in gitearesult.stdout'
diff --git a/ansible/roles/ansible_bu_run_time_inventory/defaults/main.yml b/ansible/roles/ansible_bu_run_time_inventory/defaults/main.yml
new file mode 100644
index 00000000000..aa02f74312c
--- /dev/null
+++ b/ansible/roles/ansible_bu_run_time_inventory/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# Default variables
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_run_time_inventory/meta/main.yml b/ansible/roles/ansible_bu_run_time_inventory/meta/main.yml
new file mode 100644
index 00000000000..34e48cb0856
--- /dev/null
+++ b/ansible/roles/ansible_bu_run_time_inventory/meta/main.yml
@@ -0,0 +1,12 @@
+---
+galaxy_info:
+ role_name: ansible_bu_run_time_inventory
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: |
+ Creates run time inventory for automationcontroller group
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - inventory
+dependencies: []
diff --git a/ansible/roles/ansible_bu_run_time_inventory/tasks/main.yml b/ansible/roles/ansible_bu_run_time_inventory/tasks/main.yml
new file mode 100644
index 00000000000..aca782afd0f
--- /dev/null
+++ b/ansible/roles/ansible_bu_run_time_inventory/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Add control host in automationcontroller group
+ ansible.builtin.add_host:
+ name: "{{ groups['bastions'][0] }}"
+ groups: automationcontroller
diff --git a/ansible/roles/ansible_bu_setup_workshop/defaults/main.yml b/ansible/roles/ansible_bu_setup_workshop/defaults/main.yml
new file mode 100644
index 00000000000..46ccefe77a8
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/defaults/main.yml
@@ -0,0 +1,30 @@
+---
+
+# -------------------------------------------------
+# Default Variables
+# -------------------------------------------------
+workshop_type: rhel
+workshop_version: 1.0.18
+
+# Ansible BU exercise base directory
+ansible_bu_setup_workshop_exercise_src: ansible_rhel
+# Destination directory where exercise will be copied
+# /home/{{ student_name }}/[Destination directory]
+ansible_bu_setup_workshop_exercise_dest: rhel-workshop
+
+ansible_bu_setup_workshop_ee_image:
+ - name: registry.redhat.io/ansible-automation-platform-20-early-access/ee-29-rhel8
+ tag: 2.0.0
+ - name: registry.redhat.io/ansible-automation-platform-20-early-access/ee-supported-rhel8
+ tag: 2.0.0
+ - name: registry.redhat.io/ansible-automation-platform-20-early-access/ee-minimal-rhel8
+ tag: 2.0.0
+
+# -------------------------------------------------
+# Role: ansible.workshops.gitlab_client
+# -------------------------------------------------
+username: "{{ student_name }}"
+student: "{{ student_name }}"
+admin_password: "{{ common_password }}"
+ec2_name_prefix: "{{ guid }}"
+workshop_dns_zone: "{{ sandbox_zone }}"
diff --git a/ansible/roles/ansible_bu_setup_workshop/files/settings.json b/ansible/roles/ansible_bu_setup_workshop/files/settings.json
new file mode 100644
index 00000000000..79e51836b5b
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/files/settings.json
@@ -0,0 +1,17 @@
+{
+ "git.ignoreLegacyWarning": true,
+ "terminal.integrated.experimentalRefreshOnResume": true,
+ "window.menuBarVisibility": "visible",
+ "git.enableSmartCommit": true,
+ "workbench.tips.enabled": false,
+ "workbench.startupEditor": "readme",
+ "telemetry.enableTelemetry": false,
+ "search.smartCase": true,
+ "git.confirmSync": false,
+ "workbench.colorTheme": "Visual Studio Dark",
+ "ansible.ansibleLint.enabled": false,
+ "ansible.ansible.useFullyQualifiedCollectionNames": true,
+ "files.associations": {
+ "*.yml": "ansible"
+ }
+}
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/files/setup.yml b/ansible/roles/ansible_bu_setup_workshop/files/setup.yml
new file mode 100644
index 00000000000..66066b0254b
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/files/setup.yml
@@ -0,0 +1,14 @@
+---
+- name: Capture Setup
+ hosts: node1
+
+ tasks:
+
+ - name: Collect only facts returned by facter
+ ansible.builtin.setup:
+ gather_subset:
+ - 'all'
+ register: setup
+
+ - ansible.builtin.debug:
+ var: setup
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/files/vscode_nginx.conf b/ansible/roles/ansible_bu_setup_workshop/files/vscode_nginx.conf
new file mode 100644
index 00000000000..e6a83d031a1
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/files/vscode_nginx.conf
@@ -0,0 +1,8 @@
+ location /editor/ {
+ proxy_pass http://127.0.0.1:8080/;
+ proxy_set_header Host $host;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection upgrade;
+ proxy_set_header Accept-Encoding gzip;
+ proxy_redirect off;
+ }
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/meta/main.yml b/ansible/roles/ansible_bu_setup_workshop/meta/main.yml
new file mode 100644
index 00000000000..f2d76a7f673
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/meta/main.yml
@@ -0,0 +1,12 @@
+---
+galaxy_info:
+ role_name: ansible_bu_setup_workshop
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: |
+ Setup Ansible BU workshops
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - workshop
+dependencies: []
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/common/ansible-navigator.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/common/ansible-navigator.yml
new file mode 100644
index 00000000000..c7fccb969e0
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/common/ansible-navigator.yml
@@ -0,0 +1,77 @@
+---
+- name: Enable offline automation controller repo
+ community.general.ini_file:
+ path: "/etc/yum.repos.d/ansible-automation-platform.repo"
+ section: ansible-automation-platform
+ option: enabled
+ value: 1
+
+- name: Install ansible core & navigator
+ ansible.builtin.dnf:
+ name:
+ - ansible-core
+ - ansible-navigator
+ state: present
+
+- name: Install ansible.cfg in home directory
+ ansible.builtin.template:
+ src: ./templates/ansible.cfg.j2
+ dest: "/etc/ansible/ansible.cfg"
+
+- name: Create workshop inventory directories
+ ansible.builtin.file:
+ path: "/home/{{ student_name }}/lab_inventory/"
+ state: directory
+ mode: '0755'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: Generate ansible inventory from template
+ ansible.builtin.template:
+ src: ./templates/hosts/{{ workshop_type }}.j2
+ dest: "/home/{{ student_name }}/lab_inventory/hosts"
+ mode: '0644'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: Copy ansible-navigator file
+ ansible.builtin.template:
+ src: ./templates/ansible-navigator.yml.j2
+ dest: "/home/{{ student_name }}/.ansible-navigator.yml"
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0644'
+
+- name: Pull images for student
+ become: true
+ become_user: "{{ student_name }}"
+ block:
+ - name: Login to registry.redhat.io
+ containers.podman.podman_login:
+ registry: registry.redhat.io
+ username: "{{ registry_username }}"
+ password: "{{ registry_password }}"
+
+ - name: Pull images for student
+ become_user: "{{ student_name }}"
+ containers.podman.podman_image:
+ name: "{{ item.name }}"
+ pull: true
+ tag: "{{ item.tag }}"
+ retries: 5
+ loop: "{{ ansible_bu_setup_workshop_ee_image }}"
+
+- name: print out user.info
+ agnosticd_user_info:
+ msg: |
+ Automation Controller URL: https://{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}
+ Automation Controller User: {{ student_name }}
+ Automation Controller User Password: {{ student_password }}
+
+- name: Save user data
+ agnosticd_user_info:
+ data:
+ automationcontroller_url: "https://{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}"
+ automationcontroller_user_name: "{{ student_name }}"
+ automationcontroller_user_password: "{{ student_password }}"
+
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/common/automation-controller.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/common/automation-controller.yml
new file mode 100644
index 00000000000..aa22e311d03
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/common/automation-controller.yml
@@ -0,0 +1,20 @@
+---
+- name: Create automation user
+ environment:
+ CONTROLLER_HOST: "{{ aap_auth.controller_host | default(aap_controller_web_url) }}"
+ CONTROLLER_USERNAME: "{{ aap_auth.controller_username | default(aap_controller_admin_user) | default('admin') }}"
+ CONTROLLER_PASSWORD: "{{ aap_auth.controller_password | default(aap_controller_admin_password) }}"
+ CONTROLLER_VERIFY_SSL: "{{ aap_auth.controller_verify_ssl | default('true') }}"
+ awx.awx.user:
+ username: "{{ student_name }}"
+ password: "{{ student_password }}"
+ is_superuser: true
+ state: present
+
+- name: Clean up
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "/tmp/automationcontroller_installer"
+ - "/tmp/automationcontroller.tar.gz"
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/common/code-server.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/common/code-server.yml
new file mode 100644
index 00000000000..946deafcf7c
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/common/code-server.yml
@@ -0,0 +1,31 @@
+---
+- name: Clean up
+ ansible.builtin.file:
+ path: "/tmp/code-server.rpm"
+ state: absent
+
+- name: Apply code server defaults
+ ansible.builtin.template:
+ src: ./files/settings.json
+ dest: "/home/{{ student_name }}/.local/share/code-server/User/settings.json"
+ owner: "{{ student_name }}"
+
+- name: Copy coder.json template
+ ansible.builtin.template:
+ src: ./templates/coder.json.j2
+ dest: "/home/{{ student_name }}/.local/share/code-server/coder.json"
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0644'
+
+- name: print out user.info
+ agnosticd_user_info:
+ msg: |
+ VScode Server URL: https://{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}/editor/
+ VScode Server User Password: {{ student_password }}
+
+- name: Save user data
+ agnosticd_user_info:
+ data:
+ vscode_server_url: "https://{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}/editor/"
+ vscode_server_password: "{{ student_password }}"
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/main.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/main.yml
new file mode 100644
index 00000000000..75e200cef64
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Put ssh-key in proper spot for student
+ ansible.builtin.copy:
+ src: "/home/{{ student_name }}/.ssh/{{ guid }}key.pem"
+ dest: "/home/{{ student_name }}/.ssh/id_rsa"
+ remote_src: true
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0400'
+
+- name: Include rhel tasks
+ when: workshop_type == "rhel"
+ ansible.builtin.include_tasks:
+ file: ./rhel.yml
+
+- name: Include rhel 90 tasks
+ when: workshop_type == "rhel_90"
+ ansible.builtin.include_tasks:
+ file: ./rhel_90.yml
+
+- name: Include rhel90 tasks
+ when: workshop_type == "windows"
+ ansible.builtin.include_tasks:
+ file: ./windows.yml
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/rhel.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/rhel.yml
new file mode 100644
index 00000000000..63c12a48070
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/rhel.yml
@@ -0,0 +1,41 @@
+---
+
+- name: Include ansible-navigator tasks
+ ansible.builtin.include_tasks:
+ file: ./common/ansible-navigator.yml
+
+- name: Include code-server tasks
+ ansible.builtin.include_tasks:
+ file: ./common/code-server.yml
+
+- name: template out motd
+ ansible.builtin.template:
+ src: ./templates/motd.j2
+ dest: /etc/motd
+
+- name: copy setup.yml playbook
+ copy:
+ src: ./files/setup.yml
+ dest: "/home/{{ student_name }}/setup.yml"
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: Clone rhel-workshop
+ ansible.builtin.git:
+ repo: https://github.com/ansible/workshops.git
+ dest: /tmp/workshops
+ version: devel
+
+- name: Copy rhel-workshop to users home
+ ansible.builtin.copy:
+ src: "/tmp/workshops/exercises/{{ ansible_bu_setup_workshop_exercise_src }}/"
+ dest: "/home/{{ student_name }}/rhel-workshop/{{ ansible_bu_setup_workshop_exercise_dest }}/"
+ remote_src: true
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0755'
+
+- name: Clean workshop clone directory
+ ansible.builtin.file:
+ path: "/tmp/workshops"
+ state: absent
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/rhel_90.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/rhel_90.yml
new file mode 100644
index 00000000000..4acd51092f7
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/rhel_90.yml
@@ -0,0 +1,66 @@
+---
+- name: Include ansible-navigator tasks
+ ansible.builtin.include_tasks:
+ file: ./common/ansible-navigator.yml
+
+- name: Include code-server tasks
+ ansible.builtin.include_tasks:
+ file: ./common/code-server.yml
+
+- name: Include code-server tasks
+ ansible.builtin.include_tasks:
+ file: ./common/automation-controller.yml
+
+- name: template out motd
+ ansible.builtin.template:
+ src: ./templates/motd.j2
+ dest: /etc/motd
+
+- name: copy setup.yml playbook
+ copy:
+ src: ./files/setup.yml
+ dest: "/home/{{ student_name }}/setup.yml"
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: Clone rhel-workshop
+ ansible.builtin.git:
+ repo: https://github.com/ansible/workshops.git
+ dest: /tmp/workshops
+ version: devel
+
+- name: Copy rhel-workshop to users home
+ ansible.builtin.copy:
+ src: "/tmp/workshops/exercises/{{ ansible_bu_setup_workshop_exercise_src }}/"
+ dest: "/home/{{ student_name }}/{{ ansible_bu_setup_workshop_exercise_dest }}/"
+ remote_src: true
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0755'
+
+- name: Copy rhel-workshop to users home
+ ansible.builtin.copy:
+ src: "/tmp/workshops/exercises/{{ item.src }}/"
+ dest: "/home/{{ student_name }}/{{ ansible_bu_setup_workshop_exercise_dest }}/{{ item.dest }}"
+ remote_src: true
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0755'
+ loop:
+ - src: ansible_rhel/0.0-support-docs
+ dest: 0.0-support-docs
+ - src: ansible_rhel/1.1-setup
+ dest: 1-setup
+ - src: ansible_rhel/1.2-thebasics
+ dest: 2-thebasics
+ - src: ansible_rhel/1.3-playbook
+ dest: 3-playbook
+ - src: ansible_rhel/1.4-variables
+ dest: 4-variables
+ - src: ansible_rhel/2.4-surveys
+ dest: 5-surveys
+
+- name: Clean workshop clone directory
+ ansible.builtin.file:
+ path: "/tmp/workshops"
+ state: absent
diff --git a/ansible/roles/ansible_bu_setup_workshop/tasks/windows.yml b/ansible/roles/ansible_bu_setup_workshop/tasks/windows.yml
new file mode 100644
index 00000000000..0e0bdcc67a3
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/tasks/windows.yml
@@ -0,0 +1,37 @@
+---
+- name: Include ansible-navigator tasks
+ ansible.builtin.include_tasks:
+ file: ./common/ansible-navigator.yml
+
+- name: Include code-server tasks
+ ansible.builtin.include_tasks:
+ file: ./common/code-server.yml
+
+- name: template out motd
+ ansible.builtin.template:
+ src: ./templates/motd.j2
+ dest: /etc/motd
+
+- name: Configure git client
+ ansible.builtin.include_role:
+ name: ansible.workshops.gitlab_client
+
+- name: Clone rhel-workshop
+ ansible.builtin.git:
+ repo: https://github.com/ansible/workshops.git
+ dest: /tmp/workshops
+ version: devel
+
+- name: Copy rhel-workshop to users home
+ ansible.builtin.copy:
+ src: "/tmp/workshops/exercises/ansible_windows/"
+ dest: "/home/{{ student_name }}/windows-workshop/"
+ remote_src: true
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ mode: '0755'
+
+- name: Clean workshop clone directory
+ ansible.builtin.file:
+ path: "/tmp/workshops"
+ state: absent
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/ansible-navigator.yml.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/ansible-navigator.yml.j2
new file mode 100644
index 00000000000..06503d22397
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/ansible-navigator.yml.j2
@@ -0,0 +1,16 @@
+---
+ansible-navigator:
+ ansible:
+ inventory:
+ entries:
+ - /home/{{ student_name }}/lab_inventory/hosts
+
+ execution-environment:
+ image: {{ ansible_bu_setup_workshop_ee_image.0.name }}:{{ ansible_bu_setup_workshop_ee_image.0.tag }}
+ enabled: true
+ container-engine: podman
+ pull:
+ policy: missing
+ volume-mounts:
+ - src: "/etc/ansible/"
+ dest: "/etc/ansible/"
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/ansible.cfg.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/ansible.cfg.j2
new file mode 100644
index 00000000000..a2c9a5c5eff
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/ansible.cfg.j2
@@ -0,0 +1,16 @@
+[defaults]
+stdout_callback = yaml
+connection = smart
+timeout = 60
+deprecation_warnings = False
+action_warnings = False
+system_warnings = False
+devel_warning = False
+host_key_checking = False
+collections_on_ansible_version_mismatch = ignore
+retry_files_enabled = False
+interpreter_python = auto_silent
+inventory = /home/{{ student_name }}/lab_inventory/hosts
+[persistent_connection]
+connect_timeout = 200
+command_timeout = 200
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/coder.json.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/coder.json.j2
new file mode 100644
index 00000000000..b394393b359
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/coder.json.j2
@@ -0,0 +1,15 @@
+{
+ "query": {
+ {% if workshop_type == "rhel" %}
+ "folder": "/home/{{ student_name }}/rhel-workshop/{{ ansible_bu_setup_workshop_exercise_dest }}/"
+
+ {% elif workshop_type == "windows" %}
+ "folder": "/home/{{ student_name }}/windows-workshop/{{ ansible_bu_setup_workshop_exercise_dest }}/"
+
+ {% endif %}
+ },
+ "update": {
+ "checked": 1688360316288,
+ "version": "4.14.1"
+ }
+}
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel.j2
new file mode 100644
index 00000000000..fb401d9e947
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel.j2
@@ -0,0 +1,15 @@
+
+[web]
+{% for host in groups['nodes'] %}
+{{ host.split('.')[0] }} ansible_host={{ host }}
+{% endfor %}
+
+[control]
+ansible-1 ansible_host={{ groups['bastions'][0] }}
+
+[all:vars]
+timeout=60
+ansible_user={{ remote_user }}
+ansible_ssh_private_key_file="~/.ssh/{{ guid }}key.pem"
+ansible_ssh_common_args="-o StrictHostKeyChecking=no"
+ansible_become=true
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel_90.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel_90.j2
new file mode 100644
index 00000000000..fb401d9e947
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/rhel_90.j2
@@ -0,0 +1,15 @@
+
+[web]
+{% for host in groups['nodes'] %}
+{{ host.split('.')[0] }} ansible_host={{ host }}
+{% endfor %}
+
+[control]
+ansible-1 ansible_host={{ groups['bastions'][0] }}
+
+[all:vars]
+timeout=60
+ansible_user={{ remote_user }}
+ansible_ssh_private_key_file="~/.ssh/{{ guid }}key.pem"
+ansible_ssh_common_args="-o StrictHostKeyChecking=no"
+ansible_become=true
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/hosts/windows.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/windows.j2
new file mode 100644
index 00000000000..c06a313bed3
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/hosts/windows.j2
@@ -0,0 +1,27 @@
+
+[windows:vars]
+ansible_connection=winrm
+ansible_winrm_transport=credssp
+ansible_winrm_server_cert_validation=ignore
+ansible_port=5986
+
+[control_nodes:vars]
+ansible_port=22
+ansible_ssh_user=ec2-user
+ansible_ssh_private_key_file="/runner/project/provisioner/5sj2f/5sj2f-private.pem"
+
+[student1]
+{{ student_name }}-{{ groups['windows'][0].split('.')[0] }} ansible_host={{ groups['windows'][0].split('.')[0] }}.{{ guid }}.{{ sandbox_zone }} ansible_user=Administrator ansible_password="{{ windows_password }}"
+{{ student_name }}-{{ groups['bastions'][0].split('.')[0] }} ansible_host={{ groups['bastions'][0] }}
+
+[all]
+{{ student_name }}-{{ groups['bastions'][0].split('.')[0] }}
+{{ student_name }}-{{ groups['windows'][0].split('.')[0] }}
+
+[attendance]
+
+[control_nodes]
+{{ student_name }}-{{ groups['bastions'][0].split('.')[0] }}
+
+[windows]
+{{ student_name }}-{{ groups['windows'][0].split('.')[0] }}
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/motd.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/motd.j2
new file mode 100644
index 00000000000..9c32010c298
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/motd.j2
@@ -0,0 +1,22 @@
+#### This workbench is for {{ student_name | default('student') }} ####
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@@@ ############ m@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@@ ################ m@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@ ################# m@@@@@@@@@@@@ @@@@@@@@@@@@@@@@@@@@ @@@@ @@@@@ @@@@@@@@@@@@@@@@@@@
+@@@@@@@@@ ################## @@@@@@@@@@@ @@@ #@@@@@@@@@@@@@@@@@ @@@@ @@@@@ @@@@@@@@@@@ @@@@@
+@@@ ##### @@############### #m@@@@@@ @@@@ @ @@@ @@@@ @@@@@ @ @@ @@
+@@ ####### ########### m@@@@@@ @ @@@ @ @@@@ @@@@ @@@@@@ @@ @@@@@
+@@@ ######### ######## #m@@ @@ @@ ....@ @@@@ @@@@ @@@@@ @ @@ @@ @@@@@
+@@@@@ ###########@ ####### #m@ @@@ @@ @@@ @@@@ @@@@@ @ @@@ @@@
+@@@@@@@ ######################### m@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@@@ #################### m@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@@@@@@@@ ############# m@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+#### This workbench is for {{ student_name | default('student') }} ####
+- Public FQDN: {{ student_name | default('student') }}.{{ ec2_name_prefix|default("ansible") }}.{{ workshop_dns_zone|default("demo") }}
+— Local FQDN: {{ ansible_fqdn }}
+— Distro: {{ ansible_distribution }} {{ ansible_distribution_version }} {{ ansible_distribution_release }}
+— Virtual: {{ 'YES' if ansible_virtualization_role == 'guest' else 'NO' }}
+— CPUs: {{ ansible_processor_vcpus }}
+— RAM: {{ (ansible_memtotal_mb / 1000) | round(1) }}GB
+- Workshop Version {{ workshop_version }}
\ No newline at end of file
diff --git a/ansible/roles/ansible_bu_setup_workshop/templates/ssh_config.j2 b/ansible/roles/ansible_bu_setup_workshop/templates/ssh_config.j2
new file mode 100644
index 00000000000..60f3da720fa
--- /dev/null
+++ b/ansible/roles/ansible_bu_setup_workshop/templates/ssh_config.j2
@@ -0,0 +1,10 @@
+Host *
+ User ec2-user
+ IdentityFile ~/.ssh/{{ guid }}key.pem
+ ForwardAgent yes
+ StrictHostKeyChecking no
+ ConnectTimeout 600
+ ConnectionAttempts 10
+ ControlMaster auto
+ ControlPath /tmp/%h-%r
+ ControlPersist 5m
diff --git a/ansible/roles/bastion-lite/tasks/main.yml b/ansible/roles/bastion-lite/tasks/main.yml
index b9d732bf3c8..917c370dc30 100644
--- a/ansible/roles/bastion-lite/tasks/main.yml
+++ b/ansible/roles/bastion-lite/tasks/main.yml
@@ -1,6 +1,7 @@
---
-# Generate an SSH key on the Bastion and configure access on all the hosts
-- ansible.builtin.include_tasks: ./create_bastion_ssh_key_and_access.yml
+- name: Generate an SSH key on the Bastion and configure access on all the hosts
+ ansible.builtin.include_tasks:
+ file: ./create_bastion_ssh_key_and_access.yml
- name: Generate .ssh/config
ansible.builtin.template:
diff --git a/ansible/roles/bookbag/tasks/workload.yaml b/ansible/roles/bookbag/tasks/workload.yaml
index 5f5d439cd74..6a6970571c5 100644
--- a/ansible/roles/bookbag/tasks/workload.yaml
+++ b/ansible/roles/bookbag/tasks/workload.yaml
@@ -1,6 +1,6 @@
---
- name: Get bookbag namespace
- k8s_info:
+ kubernetes.core.k8s_info:
kubeconfig: "{{ _bookbag_kubeconfig | default(omit) }}"
api_version: project.openshift.io/v1
kind: Project
@@ -10,7 +10,7 @@
- name: Create bookbag namespace
when: r_get_bookbag_namespace.resources | default([]) | length == 0
- k8s:
+ kubernetes.core.k8s:
kubeconfig: "{{ _bookbag_kubeconfig | default(omit) }}"
definition:
apiVersion: project.openshift.io/v1
@@ -18,30 +18,35 @@
metadata:
name: "{{ bookbag_namespace }}"
register: r_create_bookbag_namespace
+ # Work around https://github.com/ansible-collections/kubernetes.core/issues/623
+ failed_when: >-
+ r_create_bookbag_namespace is failed and
+ 'AlreadyExists' not in r_create_bookbag_namespace.msg | default('')
until: r_create_bookbag_namespace is successful
retries: 10
delay: 5
+ ignore_errors: true
- name: Create temporary directory for bookbag source
- tempfile:
+ ansible.builtin.tempfile:
prefix: bookbag-{{ guid }}-
state: directory
register: r_bookbag_tmp
- name: Clone bookbag repository to output dir
- git:
+ ansible.builtin.git:
repo: "{{ bookbag_git_repo }}"
version: "{{ bookbag_git_version | default(omit) }}"
dest: "{{ r_bookbag_tmp.path }}"
- name: Process bookbag build template
- command: >-
+ ansible.builtin.command: >-
oc process --local -f {{ (r_bookbag_tmp.path ~ '/build-template.yaml') | quote }} -o json
--param GIT_REPO={{ bookbag_git_repo | quote }}
register: r_process_build_template
- name: Apply resources from build template
- k8s:
+ kubernetes.core.k8s:
kubeconfig: "{{ _bookbag_kubeconfig | default(omit) }}"
namespace: "{{ bookbag_namespace }}"
definition: "{{ item }}"
@@ -54,7 +59,7 @@
delay: 5
- name: Build bookbag image
- command: >-
+ ansible.builtin.command: >-
oc start-build bookbag --follow --wait
{% if _bookbag_kubeconfig is defined %}--kubeconfig={{ _bookbag_kubeconfig | quote }}{% endif %}
--namespace={{ bookbag_namespace | quote }}
@@ -69,26 +74,26 @@
delay: 10
- name: Read user-data.yaml
- slurp:
+ ansible.builtin.slurp:
src: "{{ hostvars.localhost.output_dir ~ '/user-data.yaml' }}"
delegate_to: localhost
register: r_user_data
- name: Read user-info.yaml
- slurp:
+ ansible.builtin.slurp:
src: "{{ hostvars.localhost.output_dir ~ '/user-info.yaml' }}"
delegate_to: localhost
register: r_user_info
- name: Set fact for user data and info
- set_fact:
+ ansible.builtin.set_fact:
_bookbag_user_data: "{{ r_user_data.content | b64decode | from_yaml | default({}, true) }}"
_bookbag_user_info: '{{ r_user_info.content | b64decode | from_yaml | default([], true) | join("\n") }}'
- name: Deploy bookbag for environment
when:
- _bookbag_user_data.users is undefined
- include_tasks:
+ ansible.builtin.include_tasks:
file: deploy-bookbag.yaml
vars:
_bookbag_instance_name: "{{ bookbag_name }}"
@@ -103,7 +108,7 @@
{{ _bookbag_user_data.users | dict2items }}
loop_control:
loop_var: _bookbag_users_item
- include_tasks:
+ ansible.builtin.include_tasks:
file: deploy-bookbag.yaml
vars:
_bookbag_instance_name: "{{ bookbag_name }}-{{ _bookbag_user }}"
@@ -113,7 +118,7 @@
{{ _bookbag_users_item.value | combine({'guid': guid, 'user': _bookbag_user}) }}
# Leave this as the last task in the playbook.
-- name: workload tasks complete
- debug:
+- name: Workload tasks complete
+ ansible.builtin.debug:
msg: "Workload Tasks completed successfully."
when: not silent|bool
diff --git a/ansible/roles/eda_controller_config/README.md b/ansible/roles/eda_controller_config/README.md
new file mode 100644
index 00000000000..93bfaa61ba8
--- /dev/null
+++ b/ansible/roles/eda_controller_config/README.md
@@ -0,0 +1,4 @@
+# eda-controller-config
+Configures EDA controller
+
+
diff --git a/ansible/roles/eda_controller_config/defaults/main.yml b/ansible/roles/eda_controller_config/defaults/main.yml
new file mode 100644
index 00000000000..1744e099854
--- /dev/null
+++ b/ansible/roles/eda_controller_config/defaults/main.yml
@@ -0,0 +1,79 @@
+---
+# --------------------------------------------------------
+# Demo git repository for EDA Controller rulebooks
+# --------------------------------------------------------
+eda_controller_config_clone_demo_repo_enable: true
+eda_controller_config_clone_demo_repo_url: >-
+ https://github.com/redhat-gpte-devopsautomation/demo-event-driven-ansible.git
+
+# --------------------------------------------------------
+# EDA Controller URL, Username and Password
+# --------------------------------------------------------
+# eda_controller_config_url: [required https://example.com]
+# eda_controller_config_username: [required]
+# eda_controller_config_password: [required]
+
+# --------------------------------------------------------
+# List of AWX Tokens to be created if defined
+# --------------------------------------------------------
+# eda_controller_config_awx_tokens:
+# - name: [required]
+# token: [required]
+# description: [optional]
+
+# --------------------------------------------------------
+# List of EDA credentials to be created if defined
+# --------------------------------------------------------
+# eda_controller_config_credentials:
+# - name: [required]
+# description: [optional]
+# username: [required]
+# token: [required]
+# credential_type: [required options
+ # "GitHub Personal Access Token" or
+ # "GitLab Personal Access Token" or
+ # "Container registry"
+ # ]
+
+# --------------------------------------------------------
+# List of Decision Environments to be created if defined
+# --------------------------------------------------------
+eda_controller_config_decision_envs:
+ - name: "de-for-ocp"
+ image_url: "quay.io/mitsharm/eda/de-for-ocp"
+ # description: [optional "Decision Environment for OpenShift"]
+ # credential: [optional]
+
+# --------------------------------------------------------
+# List of Projects to be created if defined
+# --------------------------------------------------------
+eda_controller_config_projects:
+ - name: "OpenShift events"
+ repo_url: "https://github.com/miteshget/eda-test.git"
+ # description: [optional "Event-driven Ansible rulebooks"]
+ # credential: [optional]
+
+# --------------------------------------------------------
+# List of Rulebook Activations to be created if defined
+# --------------------------------------------------------
+eda_controller_config_rulebook_activations:
+ - name: "Patch Route"
+ project: "OpenShift events"
+ rulebook: "patch_route.yml"
+ decision_env: "de-for-ocp"
+ # restart_policy: [optional default="always"]
+ # description: [optional "Patch OpenShift Routes"]
+ # enabled: [optional Default=true]
+ - name: "Resource Quota Set"
+ project: "OpenShift events"
+ rulebook: "resource_quota.yml"
+ decision_env: "de-for-ocp"
+ restart_policy: "always"
+ # restart_policy: [optional default="always"]
+ # enabled: [optional Default=true]
+ - name: "Create Volume Snapshot"
+ project: "OpenShift events"
+ rulebook: "volume_snapshot.yml"
+ decision_env: "de-for-ocp"
+ # restart_policy: [optional default="always"]
+ # enabled: [optional Default=true]
diff --git a/ansible/roles/eda_controller_config/meta/main.yml b/ansible/roles/eda_controller_config/meta/main.yml
new file mode 100644
index 00000000000..15c323ae3b0
--- /dev/null
+++ b/ansible/roles/eda_controller_config/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Mitesh Sharma
+ description: Event-driven Ansible Configuration Role
+ company: Red Hat
+ license: GLSv3
+ min_ansible_version: 2.9
+ galaxy_tags:
+ - eda
+ - eventdrivenansible
+ - eventdriven
+ - event
+ - driven
+ - ansible
+dependencies: []
diff --git a/ansible/roles/eda_controller_config/tasks/create/create_awx_token.yml b/ansible/roles/eda_controller_config/tasks/create/create_awx_token.yml
new file mode 100644
index 00000000000..9662061e0f2
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/create/create_awx_token.yml
@@ -0,0 +1,27 @@
+---
+- name: Set up display
+ set_fact:
+ used_for: "AWX Token"
+
+- name: Find credential id
+ ansible.builtin.include_tasks: list/list_awx_token.yml
+ vars:
+ awx_token_name: "{{ item.name }}"
+
+- name: Create EDA AWX Token
+ when: _token_id | length == 0
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/users/me/awx-tokens/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: POST
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 201
+ body: >-
+ {
+ "name": "{{ item.name }}",
+ "description": "{{ item.description | default(item.name) }}",
+ "token": "{{ item.token }}"
+ }
diff --git a/ansible/roles/eda_controller_config/tasks/create/create_credential.yml b/ansible/roles/eda_controller_config/tasks/create/create_credential.yml
new file mode 100644
index 00000000000..c40e21df603
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/create/create_credential.yml
@@ -0,0 +1,29 @@
+---
+- name: Set up display
+ set_fact:
+ used_for: "Credential"
+
+- name: Find credential id
+ ansible.builtin.include_tasks: list/list_credential.yml
+ vars:
+ credential_name: "{{ item.name }}"
+
+- name: Create EDA Credential
+ when: _credential_id | length == 0
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/credentials/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: POST
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 201
+ body: >-
+ {
+ "name": "{{ item.name }}",
+ "description": "{{ item.description | default(item.name) }}",
+ "username": "{{ item.username }}",
+ "secret": "{{ item.token }}",
+ "credential_type": "{{ item.credential_type }}"
+ }
diff --git a/ansible/roles/eda_controller_config/tasks/create/create_decision_env.yml b/ansible/roles/eda_controller_config/tasks/create/create_decision_env.yml
new file mode 100644
index 00000000000..a12b2c4df1f
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/create/create_decision_env.yml
@@ -0,0 +1,37 @@
+---
+- name: Set up display
+ set_fact:
+ used_for: "Decision Environment"
+
+- name: Find project id
+ when: item.credential is defined
+ ansible.builtin.include_tasks: list/list_credential.yml
+ vars:
+ credential_name: "{{ item.credential }}"
+
+- name: Find decision_env id
+ ansible.builtin.include_tasks: list/list_decision_env.yml
+ vars:
+ decision_env_name: "{{ item.name }}"
+
+- name: Create Decision Environment
+ when:
+ - _decision_env_id | length == 0
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/decision-environments/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: POST
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 201
+ body: >-
+ {
+ "name": "{{ item.name }}",
+ "description": "{{ item.description | default(item.name) }}",
+ {%if item.credential is defined %}
+ "credential_id": "{{ _credential_id }}",
+ {%endif%}
+ "image_url": "{{ item.image_url }}"
+ }
diff --git a/ansible/roles/eda_controller_config/tasks/create/create_project.yml b/ansible/roles/eda_controller_config/tasks/create/create_project.yml
new file mode 100644
index 00000000000..0ac35e6e904
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/create/create_project.yml
@@ -0,0 +1,36 @@
+---
+- name: Set up display
+ set_fact:
+ used_for: "Project"
+
+- name: Find project id
+ when: item.credential is defined
+ ansible.builtin.include_tasks: list/list_credential.yml
+ vars:
+ credential_name: "{{ item.credential }}"
+
+- name: Find project id
+ ansible.builtin.include_tasks: list/list_project.yml
+ vars:
+ project_name: "{{ item.name }}"
+
+- name: Create EDA Project
+ when: _project_id | length == 0
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/projects/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: POST
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 201
+ body: >-
+ {
+ "name": "{{ item.name }}",
+ "description": "{{ item.description | default(item.name) }}",
+ {%if item.credential is defined %}
+ "credential_id": "{{ _credential_id }}",
+ {%endif%}
+ "url": "{{ item.repo_url }}"
+ }
diff --git a/ansible/roles/eda_controller_config/tasks/create/create_rulebook_activation.yml b/ansible/roles/eda_controller_config/tasks/create/create_rulebook_activation.yml
new file mode 100644
index 00000000000..0c2b0877d29
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/create/create_rulebook_activation.yml
@@ -0,0 +1,50 @@
+---
+- name: Set up display
+ set_fact:
+ used_for: "Rulebook Activation"
+
+- name: Find decision_env id
+ ansible.builtin.include_tasks: list/list_decision_env.yml
+ vars:
+ decision_env_name: "{{ item.decision_env }}"
+
+- name: Find project id
+ ansible.builtin.include_tasks: list/list_project.yml
+ vars:
+ project_name: "{{ item.project }}"
+
+- name: Find rulebook id
+ ansible.builtin.include_tasks: list/list_rulebook.yml
+ vars:
+ rulebook_name: "{{ item.rulebook }}"
+
+- name: Find activation id
+ ansible.builtin.include_tasks: list/list_activation.yml
+ vars:
+ rulebook_activation_name: "{{ item.name }}"
+
+- name: Create Rulebook Activation
+ when:
+ - _activation_id | length == 0
+ - _project_id | length > 0
+ - _rulebook_id | length > 0
+ - _decision_env_id | length > 0
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/activations/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: POST
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 201
+ body: >-
+ {
+ "name": "{{ item.name }}",
+ "description": "{{ item.description | default(item.name) }}",
+ "project_id": "{{ _project_id }}",
+ "rulebook_id": "{{ _rulebook_id }}" ,
+ "decision_environment_id": "{{ _decision_env_id }}",
+ "restart_policy": "{{ item.restart_policy | default('always')}}",
+ "is_enabled": "{{ item.enabled | default(true) }}"
+ }
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_activation.yml b/ansible/roles/eda_controller_config/tasks/list/list_activation.yml
new file mode 100644
index 00000000000..588ac0b01ce
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_activation.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Activations"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/activations/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_activation
+
+- name: List Activations
+ ansible.builtin.debug:
+ msg: "{{ _r_list_activation }}"
+ verbosity: 2
+
+- name: "[{{ used_for | d('') }}] Find Activation ID"
+ vars:
+ _query: '[?name == `{{ rulebook_activation_name }}`].id'
+ ansible.builtin.set_fact:
+ _activation_id: "{{ _r_list_activation.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Activation ID"
+ ansible.builtin.debug:
+ msg: "{{ _activation_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_awx_token.yml b/ansible/roles/eda_controller_config/tasks/list/list_awx_token.yml
new file mode 100644
index 00000000000..79fa9da4763
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_awx_token.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Tokens"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/users/me/awx-tokens/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_token
+
+- name: "[{{ used_for | d('') }}] List Tokens"
+ ansible.builtin.debug:
+ msg: "{{ _r_list_token }}"
+ verbosity: 2
+
+- name: "[{{ used_for | d('') }}] Find Token ID"
+ vars:
+ _query: '[?name == `{{ awx_token_name }}`].id'
+ ansible.builtin.set_fact:
+ _token_id: "{{ _r_list_token.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Token ID"
+ ansible.builtin.debug:
+ msg: "{{ _token_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_credential.yml b/ansible/roles/eda_controller_config/tasks/list/list_credential.yml
new file mode 100644
index 00000000000..fc753931426
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_credential.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Credentials"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/credentials/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_credential
+
+- name: "[{{ used_for | d('') }}] List Credentials"
+ ansible.builtin.debug:
+ msg: "{{ _r_list_credential }}"
+ verbosity: 2
+
+- name: Find Activation ID
+ vars:
+ _query: '[?name == `{{ credential_name }}`].id'
+ ansible.builtin.set_fact:
+ _credential_id: "{{ _r_list_credential.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Activation ID"
+ ansible.builtin.debug:
+ msg: "{{ _credential_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_decision_env.yml b/ansible/roles/eda_controller_config/tasks/list/list_decision_env.yml
new file mode 100644
index 00000000000..248324d7bce
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_decision_env.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Decision Environments"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/decision-environments/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_de
+
+- name: "[{{ used_for | d('') }}] List decision environments"
+ ansible.builtin.debug:
+ msg: "{{ _r_list_de.json.results }}"
+ verbosity: 2
+
+- name: Find decision env id
+ vars:
+ _query: '[?name == `{{ decision_env_name }}`].id'
+ ansible.builtin.set_fact:
+ _decision_env_id: "{{ _r_list_de.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Decision Env ID"
+ ansible.builtin.debug:
+ msg: "{{ _decision_env_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_project.yml b/ansible/roles/eda_controller_config/tasks/list/list_project.yml
new file mode 100644
index 00000000000..3384622232a
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_project.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Projects"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/projects/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_project
+
+- name: "[{{ used_for | d('') }}] List projects"
+ ansible.builtin.debug:
+ msg: "{{ _r_list_project }}"
+ verbosity: 2
+
+- name: Find Project ID
+ vars:
+ _query: '[?name == `{{ project_name }}`].id'
+ ansible.builtin.set_fact:
+ _project_id: "{{ _r_list_project.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Project ID"
+ ansible.builtin.debug:
+ msg: "{{ _project_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/list/list_rulebook.yml b/ansible/roles/eda_controller_config/tasks/list/list_rulebook.yml
new file mode 100644
index 00000000000..10759c776ad
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/list/list_rulebook.yml
@@ -0,0 +1,27 @@
+---
+- name: "[{{ used_for | d('') }}] Fetch Rulebooks"
+ ansible.builtin.uri:
+ url: "{{ eda_controller_config_url }}/api/eda/v1/rulebooks/"
+ user: "{{ eda_controller_config_username }}"
+ password: "{{ eda_controller_config_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: _r_list_rulebook
+
+- name: "[{{ used_for | d('') }}] List Rulebooks"
+ ansible.builtin.debug:
+ msg: "{{ _r_list_rulebook }}"
+ verbosity: 2
+
+- name: Find Rulebook ID
+ vars:
+ _query: '[?name == `{{ rulebook_name }}`].id'
+ ansible.builtin.set_fact:
+ _rulebook_id: "{{ _r_list_rulebook.json.results | json_query(_query) | join }}"
+
+- name: "[{{ used_for | d('') }}] Rulebook ID"
+ ansible.builtin.debug:
+ msg: "{{ _rulebook_id }}"
diff --git a/ansible/roles/eda_controller_config/tasks/main.yml b/ansible/roles/eda_controller_config/tasks/main.yml
new file mode 100644
index 00000000000..2348732143c
--- /dev/null
+++ b/ansible/roles/eda_controller_config/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+- name: Clone git demo repo
+ when: eda_controller_config_clone_demo_repo_enable | bool
+ become: true
+ become_user: "{{ student_name }}"
+ ansible.builtin.git:
+ repo: "{{ eda_controller_config_clone_demo_repo_url }}"
+ dest: "/home/{{ student_name }}/demo"
+ version: main
+
+- name: Create AWX tokens
+ when: eda_controller_config_awx_tokens is defined
+ ansible.builtin.include_tasks: create/create_awx_token.yml
+ loop: "{{ eda_controller_config_awx_tokens }}"
+
+- name: Create Credentials
+ when: eda_controller_config_credentials is defined
+ ansible.builtin.include_tasks: create/create_credential.yml
+ loop: "{{ eda_controller_config_credentials }}"
+
+- name: Create Decision Environment
+ when: eda_controller_config_decision_envs is defined
+ ansible.builtin.include_tasks: create/create_decision_env.yml
+ loop: "{{ eda_controller_config_decision_envs }}"
+
+- name: Create Project
+ when: eda_controller_config_projects is defined
+ ansible.builtin.include_tasks: create/create_project.yml
+ loop: "{{ eda_controller_config_projects }}"
+
+- name: Create Rulebook Activation
+ when: eda_controller_config_rulebook_activations is defined
+ ansible.builtin.include_tasks: create/create_rulebook_activation.yml
+ loop: "{{ eda_controller_config_rulebook_activations }}"
diff --git a/ansible/roles/host-lets-encrypt-certs-certbot/README.md b/ansible/roles/host-lets-encrypt-certs-certbot/README.md
index f2c8460c073..8407f4a97b5 100644
--- a/ansible/roles/host-lets-encrypt-certs-certbot/README.md
+++ b/ansible/roles/host-lets-encrypt-certs-certbot/README.md
@@ -54,16 +54,16 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../../roles/host-lets-encrypt-certs
vars:
- - _certbot_domain: "master.example.opentlc.com"
- - _certbot_production: False
- - _certbot_remote_dir: "/root"
- - _certbot_cache_cert_file: "/tmp/server.cert"
- - _certbot_cache_key_file: "/tmp/server.key"
- - _certbot_cache_ca_file: "/tmp/server_ca.cer"
- - _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
- - _certbot_cache_archive_file: "/tmp/acme.tar.gz"
- - _certbot_renew_automatically: False
- - _certbot_force_issue: False
+ _certbot_domain: "master.example.opentlc.com"
+ _certbot_production: False
+ _certbot_remote_dir: "/root"
+ _certbot_cache_cert_file: "/tmp/server.cert"
+ _certbot_cache_key_file: "/tmp/server.key"
+ _certbot_cache_ca_file: "/tmp/server_ca.cer"
+ _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
+ _certbot_cache_archive_file: "/tmp/acme.tar.gz"
+ _certbot_renew_automatically: False
+ _certbot_force_issue: False
- name: Request Let's Encrypt Wildcard Certificates
hosts: quay
@@ -73,16 +73,16 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../ansible/roles/host-lets-encrypt-certs
vars:
- - _certbot_wildcard_domain: "*.apps.example.opentlc.com"
- - _certbot_production: False
- - _certbot_remote_dir: "/root"
- - _certbot_cache_cert_file: "/tmp/server.cert"
- - _certbot_cache_key_file: "/tmp/server.key"
- - _certbot_cache_ca_file: "/tmp/server_ca.cer"
- - _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
- - _certbot_cache_archive_file: "/tmp/certbot.tar.gz"
- - _certbot_renew_automatically: False
- - _certbot_force_issue: False
+ _certbot_wildcard_domain: "*.apps.example.opentlc.com"
+ _certbot_production: False
+ _certbot_remote_dir: "/root"
+ _certbot_cache_cert_file: "/tmp/server.cert"
+ _certbot_cache_key_file: "/tmp/server.key"
+ _certbot_cache_ca_file: "/tmp/server_ca.cer"
+ _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
+ _certbot_cache_archive_file: "/tmp/certbot.tar.gz"
+ _certbot_renew_automatically: False
+ _certbot_force_issue: False
- name: Request Both Let's Encrypt Static and Wildcard Certificates
hosts: quay
@@ -92,15 +92,15 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../ansible/roles/host-lets-encrypt-certs
vars:
- - _certbot_domain: "master.example.opentlc.com"
- - _certbot_wildcard_domain: "*.apps.example.opentlc.com"
- - _certbot_production: False
- - _certbot_remote_dir: "/root"
- - _certbot_cache_cert_file: "/tmp/server.cert"
- - _certbot_cache_key_file: "/tmp/server.key"
- - _certbot_cache_ca_file: "/tmp/server_ca.cer"
- - _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
- - _certbot_cache_archive_file: "/tmp/certbot.tar.gz"
- - _certbot_renew_automatically: False
- - _certbot_force_issue: False
+ _certbot_domain: "master.example.opentlc.com"
+ _certbot_wildcard_domain: "*.apps.example.opentlc.com"
+ _certbot_production: False
+ _certbot_remote_dir: "/root"
+ _certbot_cache_cert_file: "/tmp/server.cert"
+ _certbot_cache_key_file: "/tmp/server.key"
+ _certbot_cache_ca_file: "/tmp/server_ca.cer"
+ _certbot_cache_fullchain_file: "/tmp/fullchain.cer"
+ _certbot_cache_archive_file: "/tmp/certbot.tar.gz"
+ _certbot_renew_automatically: False
+ _certbot_force_issue: False
```
diff --git a/ansible/roles/host-lets-encrypt-certs/README.md b/ansible/roles/host-lets-encrypt-certs/README.md
index 32b09cf154d..11668d3aedc 100644
--- a/ansible/roles/host-lets-encrypt-certs/README.md
+++ b/ansible/roles/host-lets-encrypt-certs/README.md
@@ -54,16 +54,16 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../../roles/host-lets-encrypt-certs
vars:
- - acme_domain: "master.example.opentlc.com"
- - acme_production: False
- - acme_remote_dir: "/root"
- - acme_cache_cert_file: "/tmp/server.cert"
- - acme_cache_key_file: "/tmp/server.key"
- - acme_cache_ca_file: "/tmp/server_ca.cer"
- - acme_cache_fullchain_file: "/tmp/fullchain.cer"
- - acme_cache_archive_file: "/tmp/acme.tar.gz"
- - acme_renew_automatically: False
- - acme_force_issue: False
+ acme_domain: "master.example.opentlc.com"
+ acme_production: False
+ acme_remote_dir: "/root"
+ acme_cache_cert_file: "/tmp/server.cert"
+ acme_cache_key_file: "/tmp/server.key"
+ acme_cache_ca_file: "/tmp/server_ca.cer"
+ acme_cache_fullchain_file: "/tmp/fullchain.cer"
+ acme_cache_archive_file: "/tmp/acme.tar.gz"
+ acme_renew_automatically: False
+ acme_force_issue: False
- name: Request Let's Encrypt Wildcard Certificates
hosts: quay
@@ -73,18 +73,18 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../ansible/roles/host-lets-encrypt-certs
vars:
- - acme_wildcard_domain: "*.apps.example.opentlc.com"
- - acme_aws_access_key: ""
- - acme_aws_secret_access_key: ""
- - acme_production: False
- - acme_remote_dir: "/root"
- - acme_cache_cert_file: "/tmp/server.cert"
- - acme_cache_key_file: "/tmp/server.key"
- - acme_cache_ca_file: "/tmp/server_ca.cer"
- - acme_cache_fullchain_file: "/tmp/fullchain.cer"
- - acme_cache_archive_file: "/tmp/acme.tar.gz"
- - acme_renew_automatically: False
- - acme_force_issue: False
+ acme_wildcard_domain: "*.apps.example.opentlc.com"
+ acme_aws_access_key: ""
+ acme_aws_secret_access_key: ""
+ acme_production: False
+ acme_remote_dir: "/root"
+ acme_cache_cert_file: "/tmp/server.cert"
+ acme_cache_key_file: "/tmp/server.key"
+ acme_cache_ca_file: "/tmp/server_ca.cer"
+ acme_cache_fullchain_file: "/tmp/fullchain.cer"
+ acme_cache_archive_file: "/tmp/acme.tar.gz"
+ acme_renew_automatically: False
+ acme_force_issue: False
- name: Request Both Let's Encrypt Static and Wildcard Certificates
hosts: quay
@@ -94,17 +94,17 @@ Including an example of how to use your role (for instance, with variables passe
include_role:
name: ../ansible/roles/host-lets-encrypt-certs
vars:
- - acme_domain: "master.example.opentlc.com"
- - acme_wildcard_domain: "*.apps.example.opentlc.com"
- - acme_aws_access_key: ""
- - acme_aws_secret_access_key: ""
- - acme_production: False
- - acme_remote_dir: "/root"
- - acme_cache_cert_file: "/tmp/server.cert"
- - acme_cache_key_file: "/tmp/server.key"
- - acme_cache_ca_file: "/tmp/server_ca.cer"
- - acme_cache_fullchain_file: "/tmp/fullchain.cer"
- - acme_cache_archive_file: "/tmp/acme.tar.gz"
- - acme_renew_automatically: False
- - acme_force_issue: False
+ acme_domain: "master.example.opentlc.com"
+ acme_wildcard_domain: "*.apps.example.opentlc.com"
+ acme_aws_access_key: ""
+ acme_aws_secret_access_key: ""
+ acme_production: False
+ acme_remote_dir: "/root"
+ acme_cache_cert_file: "/tmp/server.cert"
+ acme_cache_key_file: "/tmp/server.key"
+ acme_cache_ca_file: "/tmp/server_ca.cer"
+ acme_cache_fullchain_file: "/tmp/fullchain.cer"
+ acme_cache_archive_file: "/tmp/acme.tar.gz"
+ acme_renew_automatically: False
+ acme_force_issue: False
```
diff --git a/ansible/roles/host-ocp4-installer/tasks/main.yml b/ansible/roles/host-ocp4-installer/tasks/main.yml
index 28a6f41fb95..e25d7628b7e 100644
--- a/ansible/roles/host-ocp4-installer/tasks/main.yml
+++ b/ansible/roles/host-ocp4-installer/tasks/main.yml
@@ -102,7 +102,7 @@
meta:
guid: "{{ guid }}"
env_type: "{{ env_type }}"
- loop: "{{ r_servers.openstack_servers }}"
+ loop: "{{ r_servers.servers }}"
loop_control:
label: "{{ item.name }}"
@@ -111,7 +111,7 @@
os_server_metadata:
server: "{{ item.name }}"
meta: "{{ hostvars.localhost.cloud_tags_final | default({}) | to_json }}"
- loop: "{{ r_servers.openstack_servers }}"
+ loop: "{{ r_servers.servers }}"
loop_control:
label: "{{ item.name }}"
diff --git a/ansible/roles/host-ocp4-provisioner/files/openstack_requirements.txt b/ansible/roles/host-ocp4-provisioner/files/openstack_requirements.txt
index ed6214b66c7..ba52a92b7c4 100644
--- a/ansible/roles/host-ocp4-provisioner/files/openstack_requirements.txt
+++ b/ansible/roles/host-ocp4-provisioner/files/openstack_requirements.txt
@@ -26,7 +26,7 @@ msgpack==0.6.2
munch==2.3.2
netaddr==0.7.19
netifaces==0.10.9
-openstacksdk==0.36.0
+openstacksdk==1.3.1
os-service-types==1.7.0
osc-lib==1.14.1
oslo.config==6.11.1
@@ -60,4 +60,4 @@ urllib3==1.25.6
warlock==1.3.3
wcwidth==0.1.7
wrapt==1.11.2
-zipp==0.6.0
\ No newline at end of file
+zipp==0.6.0
diff --git a/ansible/roles/install_operator/tasks/install.yml b/ansible/roles/install_operator/tasks/install.yml
index 2065fe5aa38..1358f3b6638 100644
--- a/ansible/roles/install_operator/tasks/install.yml
+++ b/ansible/roles/install_operator/tasks/install.yml
@@ -80,7 +80,7 @@
register: r_install_plans
vars:
_query: >-
- [?starts_with(spec.clusterServiceVersionNames[0], '{{ install_operator_csv_nameprefix }}') && status.phase ]
+ [?contains(spec.clusterServiceVersionNames[] | join(',', @), '{{ install_operator_csv_nameprefix }}') && status.phase ]
retries: 50
delay: 10
until:
@@ -92,7 +92,11 @@
install_operator_install_plan_name: "{{ r_install_plans.resources | to_json | from_json | json_query(query) }}"
vars:
query: >-
- [?starts_with(spec.clusterServiceVersionNames[0], '{{ install_operator_csv_nameprefix }}' )].metadata.name|[0]
+ [?contains(spec.clusterServiceVersionNames[] | join(',', @), '{{ install_operator_csv_nameprefix }}')].metadata.name|[0]
+
+- name: "{{ install_operator_name }} - Print InstallPlan"
+ debug:
+ msg: "InstallPlan: {{ install_operator_install_plan_name }}"
- name: "{{ install_operator_name }} - Get InstallPlan"
kubernetes.core.k8s_info:
diff --git a/ansible/roles/ms_vscode_server/README.md b/ansible/roles/ms_vscode_server/README.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ansible/roles/ms_vscode_server/defaults/main.yml b/ansible/roles/ms_vscode_server/defaults/main.yml
new file mode 100644
index 00000000000..cecb30581c0
--- /dev/null
+++ b/ansible/roles/ms_vscode_server/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+
+# -------------------------------------------------
+# Default Variables
+# -------------------------------------------------
+ms_vscode_server_enable_autostart: true
+ms_vscode_server_extension_urls:
+ - https://github.com/ansible/workshops/raw/devel/files/bierner.markdown-preview-github-styles-0.1.6.vsix
+ - https://github.com/ansible/workshops/raw/devel/files/hnw.vscode-auto-open-markdown-preview-0.0.4.vsix
+ - https://github.com/ansible/workshops/raw/devel/files/redhat.ansible-0.4.5.vsix
diff --git a/ansible/roles/ms_vscode_server/files/settings.json b/ansible/roles/ms_vscode_server/files/settings.json
new file mode 100644
index 00000000000..c81f5682670
--- /dev/null
+++ b/ansible/roles/ms_vscode_server/files/settings.json
@@ -0,0 +1,20 @@
+{
+ "git.ignoreLegacyWarning": true,
+ "terminal.integrated.experimentalRefreshOnResume": true,
+ "window.menuBarVisibility": "visible",
+ "git.enableSmartCommit": true,
+ "workbench.tips.enabled": false,
+ "workbench.startupEditor": "readme",
+ "telemetry.enableTelemetry": false,
+ "search.smartCase": true,
+ "git.confirmSync": false,
+ "workbench.colorTheme": "Visual Studio Dark",
+ "ansible.ansibleLint.enabled": false,
+ "ansible.ansible.useFullyQualifiedCollectionNames": true,
+ "files.associations": {
+ "*.yml": "ansible"
+ }
+ "ansible.lightspeed.enabled": true,
+ "ansible.lightspeed.suggestions.enabled": true,
+ "redhat.telemetry.enabled": true
+}
\ No newline at end of file
diff --git a/ansible/roles/ms_vscode_server/meta/main.yml b/ansible/roles/ms_vscode_server/meta/main.yml
new file mode 100644
index 00000000000..79e6e7e2541
--- /dev/null
+++ b/ansible/roles/ms_vscode_server/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ role_name: novnc
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: Setup MicroSoft VScode Server
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - vscode
+dependencies: []
diff --git a/ansible/roles/ms_vscode_server/tasks/main.yml b/ansible/roles/ms_vscode_server/tasks/main.yml
new file mode 100644
index 00000000000..560d70ded57
--- /dev/null
+++ b/ansible/roles/ms_vscode_server/tasks/main.yml
@@ -0,0 +1,90 @@
+---
+
+- name: Set MicroSoft VScode repository
+ ansible.builtin.yum_repository:
+ name: code
+ description: Visual Studio Code
+ file: vscode
+ baseurl: https://packages.microsoft.com/yumrepos/vscode
+ enabled: true
+ gpgkey: https://packages.microsoft.com/keys/microsoft.asc
+ gpgcheck: true
+
+- name: Update rhel host
+ ansible.builtin.package:
+ name: '*'
+ state: latest
+
+- name: Install code package
+ ansible.builtin.package:
+ name:
+ - code
+ - firefox
+ - ansible-core
+ state: present
+
+- name: Install ansible-lint
+ ansible.builtin.pip:
+ name: ansible-lint
+ state: present
+
+- name: Create extensions directory
+ ansible.builtin.file:
+ path: /tmp/extensions
+ state: directory
+ mode: '0755'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: Download vscode extensions
+ ansible.builtin.get_url:
+ url: "{{ item }}"
+ dest: "/tmp/extensions/"
+ validate_certs: false
+ mode: '644'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+ loop: "{{ ms_vscode_server_extension_urls }}"
+
+- name: Install vscode extensions in given order
+ become_user: "{{ student_name }}"
+ ansible.builtin.command: >-
+ /usr/bin/code
+ --install-extension
+ /tmp/extensions/{{ item }}
+ loop: "{{ ms_vscode_server_extension_urls | map('urlsplit', 'path') | map('basename') | list }}"
+
+- name: VScode copy default settings
+ ansible.builtin.copy:
+ src: settings.json
+ dest: "/home/{{ student_name }}/.config/Code/User/settings.json"
+ mode: '644'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+- name: VScode autostart setting block
+ when: ms_vscode_server_enable_autostart | bool
+ block:
+ - name: Create directory ~/.config/autostart
+ ansible.builtin.file:
+ path: "/home/{{ student_name }}/.config/autostart"
+ state: directory
+ mode: '755'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+ - name: Copy code.desktop to autostart
+ ansible.builtin.copy:
+ src: /usr/share/applications/code.desktop
+ dest: "/home/{{ student_name }}/.config/autostart/code.desktop"
+ remote_src: true
+ mode: "644"
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+ - name: Add --password-store=basic option to code.desktop
+ ansible.builtin.lineinfile:
+ path: "/home/{{ student_name }}/.config/autostart/code.desktop"
+ regexp: "^Exec="
+ firstmatch: true
+ line: "Exec=/usr/share/code/code --unity-launch %F --password-store=basic"
diff --git a/ansible/roles/nookbag/.yamllint b/ansible/roles/nookbag/.yamllint
new file mode 100644
index 00000000000..b2a7e1775e9
--- /dev/null
+++ b/ansible/roles/nookbag/.yamllint
@@ -0,0 +1,13 @@
+---
+extends: default
+
+rules:
+ comments:
+ require-starting-space: false
+ min-spaces-from-content: 1
+ comments-indentation: disable
+ indentation:
+ indent-sequences: consistent
+ line-length:
+ max: 120
+ allow-non-breakable-inline-mappings: true
diff --git a/ansible/roles/nookbag/README.adoc b/ansible/roles/nookbag/README.adoc
new file mode 100644
index 00000000000..17fd1ac3a59
--- /dev/null
+++ b/ansible/roles/nookbag/README.adoc
@@ -0,0 +1,49 @@
+== Showroom
+
+Showroom is an Ansible role that installs and configures Showroom, a replacement for bookbag.
+Showroom provides views (1 or more webpages) onto external web based resouces (e.g. websites, webapps, etc.).
+It's primary use case is to provide a 1 stop console for demos, workshops, and labs.
+
+=== Core Concepts
+
+* Views - a view is a webpage that is displayed in the browser, it can include:
+** Demo, lab, workshop content - typically created in asciidoc with Antora or similar
+** Tabs (iframed) - internal or external http based services e.g.
+*** Terminal(s) (tty) e.g. Butterfly, xtermjs etc
+*** IDEs such as VSCode/CodeServer, JupyterNotes etc
+*** Consoles e.g. OpenShift, ArgoCD, Automation Controller etc
+
+NOTE: Consoles are typically iframed into a view, but can be opened in a new tab/window.
+Issues *may* arise with iframing some consoles, e.g. OpenShift, ArgoCD, Automation Controller etc and these are actively being investiagted.
+
+
+=== Requirements
+
+* Ansible 2.9 or higher
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+=== Role Variables
+
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+=== Dependencies
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+=== Example Playbook
+
+
+ - hosts: servers
+ roles:
+ - showroom
+
+==== License
+
+BSD
+
+===== Author Information
+
+- Tony Kay (tok@redhat.com)
+
diff --git a/ansible/roles/nookbag/README.md b/ansible/roles/nookbag/README.md
new file mode 100644
index 00000000000..225dd44b9fc
--- /dev/null
+++ b/ansible/roles/nookbag/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/ansible/roles/nookbag/defaults/main.yml b/ansible/roles/nookbag/defaults/main.yml
new file mode 100644
index 00000000000..79ec0d6f26d
--- /dev/null
+++ b/ansible/roles/nookbag/defaults/main.yml
@@ -0,0 +1,36 @@
+---
+# TODO: make this repo generic example
+
+# Content repo with *optional* tag
+nookbag_git_repo: https://github.com/aleixhub/hello-world-lab.git
+showroom_nookbag: https://github.com/rhpds/nookbag/archive/refs/tags/nookbag-v0.0.3.zip
+showroom_git_tag: main
+
+showroom_default_playbook: site.yml # Default antora playbook to build from
+showroom_primary_port: 8000
+
+showroom_user: showroom
+showroom_group: showroom
+showroom_home_dir: /opt/showroom # Also base dir for all things showroom
+
+showroom_container_compose_template: main_compose_template.j2
+
+showroom_tab_services:
+ - double_terminal
+ - codeserver
+ - docs
+
+showroom_dnf_packages:
+ - git
+ - podman
+
+showroom_pip_packages:
+ - podman-compose
+
+showroom_npm_packages:
+ - antora
+ - "@antora/site-generator@3.1"
+
+showroom_work_dirs:
+ - "{{ showroom_home_dir }}/content" # The showroom repo itself, asciidoc source e.g. Antora
+ - "{{ showroom_home_dir }}/orchestration" # compose, kube files etc
diff --git a/ansible/roles/nookbag/meta/main.yml b/ansible/roles/nookbag/meta/main.yml
new file mode 100644
index 00000000000..edb762d66c0
--- /dev/null
+++ b/ansible/roles/nookbag/meta/main.yml
@@ -0,0 +1,53 @@
+---
+galaxy_info:
+ author: your name
+ description: your role description
+ company: your company (optional)
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: license (GPL-2.0-or-later, MIT, etc)
+
+ min_ansible_version: 2.1
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ # platforms:
+ # - name: Fedora
+ # versions:
+ # - all
+ # - 25
+ # - name: SomePlatform
+ # versions:
+ # - all
+ # - 1.0
+ # - 7
+ # - 99.99
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/ansible/roles/nookbag/tasks/10-showroom-user-setup.yml b/ansible/roles/nookbag/tasks/10-showroom-user-setup.yml
new file mode 100644
index 00000000000..618efaf7c78
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/10-showroom-user-setup.yml
@@ -0,0 +1,31 @@
+---
+
+#
+# Create the showroom user and working directories
+#
+
+- name: "Create showroom user {{ showroom_user }}"
+ ansible.builtin.user:
+ name: "{{ showroom_user | default('showroom') }}"
+ home: "{{ showroom_home_dir }}"
+
+- name: Setup persistent working directory
+ ansible.builtin.file:
+ path: "{{ __showroom_work_dir }}"
+ state: directory
+ owner: "{{ showroom_user | default('showroom') }}"
+ group: "{{ showroom_group | default('showroom') }}"
+ loop: "{{ showroom_work_dirs }}"
+ loop_control:
+ loop_var: __showroom_work_dir
+
+- name: Add passwordless sudo for {{ showroom_user }}
+ ansible.builtin.lineinfile:
+ path: /etc/sudoers
+ regexp: "^{{ showroom_user }}"
+ line: "{{ showroom_user }} ALL=(ALL) NOPASSWD: ALL"
+
+#
+# TODO: (post PoC)
+# ssh configuration for showroom_user
+#
diff --git a/ansible/roles/nookbag/tasks/20-showroom-dependencies.yml b/ansible/roles/nookbag/tasks/20-showroom-dependencies.yml
new file mode 100644
index 00000000000..6daf13fe8e2
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/20-showroom-dependencies.yml
@@ -0,0 +1,33 @@
+---
+- name: Ensure Linux Package depedencies
+ ansible.builtin.dnf:
+ name: "{{ __showroom_dnf_packages }}"
+ state: present
+ loop: "{{ showroom_dnf_packages }}"
+ loop_control:
+ loop_var: __showroom_dnf_packages
+
+- name: Ensure Linux Python depedencies
+ ansible.builtin.pip:
+ name: "{{ __showroom_pip_packages }}"
+ state: present
+ loop: "{{ showroom_pip_packages }}"
+ loop_control:
+ loop_var: __showroom_pip_packages
+
+ #
+ # TODO: Probably remove the npm code
+ # far better to do this via an antora image than locally installed npm
+ # however useful during debug for showroom developers
+
+- name: Install antora
+ when: showroom_debug | default(false) | bool
+ community.general.npm:
+ name: "{{ __showroom_npm_packages }}"
+ global: true
+ state: present
+ loop: "{{ showroom_npm_packages }}"
+ loop_control:
+ loop_var: __showroom_npm_packages
+ tags:
+ - showroom-npm
diff --git a/ansible/roles/nookbag/tasks/30-showroom-clone-and-inject.yml b/ansible/roles/nookbag/tasks/30-showroom-clone-and-inject.yml
new file mode 100644
index 00000000000..2f0efbe7d36
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/30-showroom-clone-and-inject.yml
@@ -0,0 +1,36 @@
+---
+- name: Clone and Inject Showroom Tasks
+ block:
+
+ - name: Clone showroom primary repo - lab content in adoc
+ ansible.builtin.git:
+ repo: "{{ nookbag_git_repo }}"
+ dest: "{{ showroom_home_dir }}/content"
+ force: true
+ version: "{{ showroom_git_tag | default('main') }}"
+ become_user: "{{ showroom_user }}"
+
+ - name: Setup and inject userdata
+ block:
+
+ - name: Load AgnosticD User Data
+ ansible.builtin.set_fact:
+ f_user_data: >-
+ {{ lookup('file', hostvars.localhost.output_dir ~ '/user-data.yaml', errors='ignore') | from_yaml }}
+
+ - name: Fallback for AgnosticD User Data
+ when: f_user_data | default({}) | length == 0
+ ansible.builtin.set_fact:
+ f_user_data: []
+
+ - name: Create KV file
+ ansible.builtin.template:
+ src: include_vars.adoc.j2
+ dest: "{{ showroom_home_dir }}/content/modules/ROOT/pages/include_vars.adoc"
+ owner: "{{ showroom_user }}"
+ group: "{{ showroom_group }}"
+ mode: '0644'
+ tags:
+ - showroom-var-injection
+ tags:
+ - showroom-clone-and-inject
diff --git a/ansible/roles/nookbag/tasks/40-showroom-render.yml b/ansible/roles/nookbag/tasks/40-showroom-render.yml
new file mode 100644
index 00000000000..f6d94c34f74
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/40-showroom-render.yml
@@ -0,0 +1,35 @@
+---
+
+- name: Render asciidoc via antora container
+ containers.podman.podman_container:
+ name: container
+ image: docker.io/antora/antora
+ command: site.yml
+ volumes:
+ - "{{ showroom_home_dir }}/content:/antora:Z"
+ become_user: "{{ showroom_user }}"
+ register: r_podman_run_antora
+ tags:
+ - showroom-render
+
+- name: Debug Render asciidoc via antora container
+ ansible.builtin.debug:
+ var: "{{ r_podman_run_antora }}"
+ verbosity: 2
+
+- name: Insert nookbag
+ ansible.builtin.unarchive:
+ src: "{{ showroom_nookbag }}"
+ dest: "{{ showroom_home_dir }}/content"
+ remote_src: True
+ owner: "{{ showroom_user }}"
+ group: "{{ showroom_group }}"
+ mode: "u=rwx,g=rx,o=rx"
+
+- name: fix permissions
+ file:
+ path: "{{ showroom_home_dir }}/content/assets"
+ owner: "{{ showroom_user }}"
+ group: "{{ showroom_group }}"
+ mode: '0755'
+ recurse: yes
diff --git a/ansible/roles/nookbag/tasks/50-showroom-service.yml b/ansible/roles/nookbag/tasks/50-showroom-service.yml
new file mode 100644
index 00000000000..acf94503159
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/50-showroom-service.yml
@@ -0,0 +1,29 @@
+---
+#
+# Orchestrate showroom containers
+#
+
+- name: Insert showroom orchestration files, compose and systemd
+ ansible.builtin.template:
+ src: "{{ __orchestration.src }}"
+ dest: "{{ __orchestration.dest }}"
+ owner: "{{ __orchestration.owner | default(showroom_user) }}"
+ group: "{{ __orchestration.group | default(showroom_group) }}"
+ mode: "u=rw,g=r,o=r"
+ loop:
+ - src: "{{ showroom_container_compose_template | default('container-compose.yml.j2') }}"
+ dest: "{{ showroom_home_dir }}/orchestration/container-compose.yml"
+ - src: nginx.conf.j2
+ dest: "{{ showroom_home_dir }}/orchestration/nginx.conf"
+ - src: "{{ showroom_systemd_service_template | default('showroom.service.j2') }}"
+ dest: "/etc/systemd/system/showroom.service"
+ owner: root
+ group: root
+ loop_control:
+ loop_var: __orchestration
+
+- name: Enable and Start showroom service
+ ansible.builtin.service:
+ name: showroom.service
+ enabled: true
+ state: started
diff --git a/ansible/roles/nookbag/tasks/60-showroom-verify.yml b/ansible/roles/nookbag/tasks/60-showroom-verify.yml
new file mode 100644
index 00000000000..941501aaed2
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/60-showroom-verify.yml
@@ -0,0 +1,17 @@
+---
+#
+# TODO: Basic verification of the showroom service
+# - does it run
+# - all of it?
+
+- name: Capture showroom_primary_view_url as fact
+ ansible.builtin.set_fact:
+ f_lab_ui_url:
+ "http://{{ groups['bastions'][0].split('.',1)[0] }}.{{ guid }}{{
+ subdomain_base_suffix }}:{{ showroom_primary_port }}"
+
+- name: Output showroom view(s) URLs as userinfo and userdata
+ agnosticd_user_info:
+ data:
+ lab_ui_url: "{{ f_lab_ui_url }}"
+ showroom_primary_view_url: "{{ f_lab_ui_url }}"
diff --git a/ansible/roles/nookbag/tasks/main.yml b/ansible/roles/nookbag/tasks/main.yml
new file mode 100644
index 00000000000..c2394afd451
--- /dev/null
+++ b/ansible/roles/nookbag/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+
+#
+# This is a PoC and includes some/many steps that would be migrated to init containers etc
+#
+
+- name: Setup the showroom user and working directories
+ ansible.builtin.include_tasks:
+ file: 10-showroom-user-setup.yml
+
+- name: Setup OS dependencies, packages, user, directory
+ ansible.builtin.include_tasks:
+ file: 20-showroom-dependencies.yml
+
+- name: Clone primary showroom repo and inject externals (vars, html templates)
+ ansible.builtin.include_tasks:
+ file: 30-showroom-clone-and-inject.yml
+ tags:
+ - showroom-clone-and-inject
+
+- name: Render showroom to html if required
+ ansible.builtin.include_tasks:
+ file: 40-showroom-render.yml
+ tags:
+ - showroom-render
+
+- name: Create, enable, start showroom systemd service
+ ansible.builtin.include_tasks:
+ file: 50-showroom-service.yml
+
+- name: Validate showroom service and output view url(s)
+ ansible.builtin.include_tasks:
+ file: 60-showroom-verify.yml
diff --git a/ansible/roles/nookbag/templates/container-compose.yml.j2 b/ansible/roles/nookbag/templates/container-compose.yml.j2
new file mode 100644
index 00000000000..96a20dd7668
--- /dev/null
+++ b/ansible/roles/nookbag/templates/container-compose.yml.j2
@@ -0,0 +1,63 @@
+---
+version: "3"
+
+services:
+
+ web:
+ image: docker.io/nginx
+ container_name: web
+ hostname: web
+ command: nginx -g "daemon off;"
+ ports:
+ - "8000:80"
+ volumes:
+ - "{{ showroom_home_dir }}/content:/usr/share/nginx/html:Z"
+
+ # - "{{ showroom_home_dir }}/content:/opt/app-root/src" # :Z
+ # - ./nginx/nginx.conf:/etc/nginx/nginx.conf
+
+ terminal-01:
+ image: docker.io/wettyoss/wetty
+ container_name: terminal-01
+ hostname: terminal-01
+ command:
+ - "--ssh-user={{ f_user_data.ssh_username }}"
+ - "--ssh-pass={{ f_user_data.ssh_password }}"
+ - "--ssh-host={{ f_user_data.targethost }}"
+ - --allow-iframe=true
+ ports:
+ - "8001:3000"
+
+ terminal-02:
+ image: docker.io/wettyoss/wetty
+ container_name: terminal-02
+ hostname: terminal-02
+ command:
+ - "--ssh-user={{ f_user_data.ssh_username }}"
+ - "--ssh-pass={{ f_user_data.ssh_password }}"
+ - "--ssh-host={{ f_user_data.targethost }}"
+ - --allow-iframe=true
+ ports:
+ - "8002:3000"
+
+ codeserver:
+ image: docker.io/codercom/code-server
+ container_name: codeserver
+ hostname: codeserver
+ environment:
+ - PASSWORD={{ common_password }}
+ ports:
+ - "8003:8080"
+
+ # old style - env var better e.g. PASSWORD
+ # volumes:
+ # - "./config/code-server:/home/coder/.config/code-server"
+
+ # RHDP codeserver
+ #
+ # image: quay.io/gpte-devops-automation/codeserver #docker.io/codercom/code-server
+ # platform: linux/amd64
+
+ # volumes:
+ # - ".:/home/coder"
+ # -u "$(id -u):$(id -g)" \
diff --git a/ansible/roles/nookbag/templates/include_vars.adoc.j2 b/ansible/roles/nookbag/templates/include_vars.adoc.j2
new file mode 100644
index 00000000000..84a424dd80e
--- /dev/null
+++ b/ansible/roles/nookbag/templates/include_vars.adoc.j2
@@ -0,0 +1,3 @@
+{% for k,v in f_user_data.items() %}
+:{{k}}: {{v}}
+{% endfor %}
diff --git a/ansible/roles/nookbag/templates/main_compose_template.j2 b/ansible/roles/nookbag/templates/main_compose_template.j2
new file mode 100644
index 00000000000..c1d6af42dc2
--- /dev/null
+++ b/ansible/roles/nookbag/templates/main_compose_template.j2
@@ -0,0 +1,27 @@
+---
+# Automatically generated Showroom Compose Orchestration file
+# via AgnosticD showroom role
+# https://github.com/redhat-cop/agnosticd/tree/development/ansible/roles/showroom
+
+version: "3"
+
+services:
+
+ web:
+ image: docker.io/nginx
+ container_name: web
+ hostname: web
+ command: nginx -g "daemon off;"
+ ports:
+ - "8000:80"
+ volumes:
+ - "{{ showroom_home_dir }}/content:/usr/share/nginx/html:Z"
+
+{% for service in showroom_tab_services %}
+{% macro fake_indent_op() %}
+{% include 'service_' + service + '/service_' + service + '.j2' ignore missing %}
+{% endmacro %}
+ {{ fake_indent_op() | indent(2) }}
+
+{% endfor %}
+...
diff --git a/ansible/roles/nookbag/templates/nginx.conf.j2 b/ansible/roles/nookbag/templates/nginx.conf.j2
new file mode 100644
index 00000000000..2b6bc87e70d
--- /dev/null
+++ b/ansible/roles/nookbag/templates/nginx.conf.j2
@@ -0,0 +1,86 @@
+# For more information on configuration, see:
+# * Official English Documentation: http://nginx.org/en/docs/
+# * Official Russian Documentation: http://nginx.org/ru/docs/
+
+
+worker_processes auto;
+error_log /var/log/nginx/error.log notice;
+pid /run/nginx.pid;
+
+# Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
+include /usr/share/nginx/modules/*.conf;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+
+ access_log /var/log/nginx/access.log main;
+
+ sendfile on;
+ tcp_nopush on;
+ keepalive_timeout 65;
+ types_hash_max_size 4096;
+
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ # Load modular configuration files from the /etc/nginx/conf.d directory.
+ # See http://nginx.org/en/docs/ngx_core_module.html#include
+ # for more information.
+ include /opt/app-root/etc/nginx.d/*.conf;
+
+ server {
+ listen 8080 default_server;
+ listen [::]:8080 default_server;
+ server_name _;
+ root /opt/app-root/src;
+
+ # Load configuration files for the default server block.
+ include /opt/app-root/etc/nginx.default.d/*.conf;
+
+ location = /404.html {
+ }
+
+ # location /codeserver {
+ # proxy_pass http://codeserver:8088;
+ # }
+ #
+ # location /tty {
+ # proxy_pass http://localhost:8001;
+ # }
+ #
+ # location /docs {
+ # proxy_pass https://docs.ansible.com;
+ # }
+ }
+
+# Settings for a TLS enabled server.
+#
+# server {
+# listen 443 ssl http2;
+# listen [::]:443 ssl http2;
+# server_name _;
+# root /opt/app-root/src;
+#
+# ssl_certificate "/etc/pki/nginx/server.crt";
+# ssl_certificate_key "/etc/pki/nginx/private/server.key";
+# ssl_session_cache shared:SSL:1m;
+# ssl_session_timeout 10m;
+# ssl_ciphers PROFILE=SYSTEM;
+# ssl_prefer_server_ciphers on;
+#
+# # Load configuration files for the default server block.
+# include /opt/app-root/etc/nginx.default.d/*.conf;
+#
+# location = /404.html {
+# }
+#
+# }
+
+}
+
diff --git a/ansible/roles/nookbag/templates/service_codeserver/service_codeserver.j2 b/ansible/roles/nookbag/templates/service_codeserver/service_codeserver.j2
new file mode 100644
index 00000000000..3c3a72e8e4f
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_codeserver/service_codeserver.j2
@@ -0,0 +1,8 @@
+codeserver:
+ image: docker.io/codercom/code-server
+ container_name: codeserver
+ hostname: codeserver
+ environment:
+ - PASSWORD={{ common_password }}
+ ports:
+ - "8003:8080"
diff --git a/ansible/roles/nookbag/templates/service_codeserver/tab_codeserver.j2 b/ansible/roles/nookbag/templates/service_codeserver/tab_codeserver.j2
new file mode 100644
index 00000000000..5f3d148b2b9
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_codeserver/tab_codeserver.j2
@@ -0,0 +1,3 @@
+
+
+
diff --git a/ansible/roles/nookbag/templates/service_codeserver/tablink_codeserver.j2 b/ansible/roles/nookbag/templates/service_codeserver/tablink_codeserver.j2
new file mode 100644
index 00000000000..8f277c450c7
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_codeserver/tablink_codeserver.j2
@@ -0,0 +1 @@
+
diff --git a/ansible/roles/nookbag/templates/service_docs/tab_docs.j2 b/ansible/roles/nookbag/templates/service_docs/tab_docs.j2
new file mode 100644
index 00000000000..cf22633e573
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_docs/tab_docs.j2
@@ -0,0 +1,3 @@
+
+
+
diff --git a/ansible/roles/nookbag/templates/service_docs/tablink_docs.j2 b/ansible/roles/nookbag/templates/service_docs/tablink_docs.j2
new file mode 100644
index 00000000000..ece56779276
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_docs/tablink_docs.j2
@@ -0,0 +1 @@
+
diff --git a/ansible/roles/nookbag/templates/service_double_terminal/service_double_terminal.j2 b/ansible/roles/nookbag/templates/service_double_terminal/service_double_terminal.j2
new file mode 100644
index 00000000000..bd78bd7d555
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_double_terminal/service_double_terminal.j2
@@ -0,0 +1,23 @@
+terminal-01:
+ image: docker.io/wettyoss/wetty
+ container_name: terminal-01
+ hostname: terminal-01
+ command:
+ - "--ssh-user={{ f_user_data.ssh_username }}"
+ - "--ssh-pass={{ f_user_data.ssh_password }}"
+ - "--ssh-host={{ f_user_data.targethost }}"
+ - --allow-iframe=true
+ ports:
+ - "8001:3000"
+
+terminal-02:
+ image: docker.io/wettyoss/wetty
+ container_name: terminal-02
+ hostname: terminal-02
+ command:
+ - "--ssh-user={{ f_user_data.ssh_username }}"
+ - "--ssh-pass={{ f_user_data.ssh_password }}"
+ - "--ssh-host={{ f_user_data.targethost }}"
+ - --allow-iframe=true
+ ports:
+ - "8002:3000"
diff --git a/ansible/roles/nookbag/templates/service_double_terminal/tab_double_terminal.j2 b/ansible/roles/nookbag/templates/service_double_terminal/tab_double_terminal.j2
new file mode 100644
index 00000000000..90bcf5aa006
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_double_terminal/tab_double_terminal.j2
@@ -0,0 +1,8 @@
+
diff --git a/ansible/roles/nookbag/templates/service_double_terminal/tablink_double_terminal.j2 b/ansible/roles/nookbag/templates/service_double_terminal/tablink_double_terminal.j2
new file mode 100644
index 00000000000..92778bf19ea
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_double_terminal/tablink_double_terminal.j2
@@ -0,0 +1 @@
+
diff --git a/ansible/roles/nookbag/templates/service_double_terminal/tabs_double_terminal.j2 b/ansible/roles/nookbag/templates/service_double_terminal/tabs_double_terminal.j2
new file mode 100644
index 00000000000..609eb4d3d7f
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_double_terminal/tabs_double_terminal.j2
@@ -0,0 +1,76 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ansible/roles/nookbag/templates/service_single_terminal/service_single_terminal.j2 b/ansible/roles/nookbag/templates/service_single_terminal/service_single_terminal.j2
new file mode 100644
index 00000000000..d58360e771d
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_single_terminal/service_single_terminal.j2
@@ -0,0 +1,11 @@
+terminal-01:
+ image: docker.io/wettyoss/wetty
+ container_name: terminal-01
+ hostname: terminal-01
+ command:
+ - "--ssh-user={{ f_user_data.ssh_username }}"
+ - "--ssh-pass={{ f_user_data.ssh_password }}"
+ - "--ssh-host={{ f_user_data.targethost }}"
+ - --allow-iframe=true
+ ports:
+ - "8001:3000"
diff --git a/ansible/roles/nookbag/templates/service_single_terminal/tab_single_terminal.j2 b/ansible/roles/nookbag/templates/service_single_terminal/tab_single_terminal.j2
new file mode 100644
index 00000000000..64875721ad3
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_single_terminal/tab_single_terminal.j2
@@ -0,0 +1,3 @@
+
+
+
diff --git a/ansible/roles/nookbag/templates/service_single_terminal/tablink_single_terminal.j2 b/ansible/roles/nookbag/templates/service_single_terminal/tablink_single_terminal.j2
new file mode 100644
index 00000000000..92778bf19ea
--- /dev/null
+++ b/ansible/roles/nookbag/templates/service_single_terminal/tablink_single_terminal.j2
@@ -0,0 +1 @@
+
diff --git a/ansible/roles/nookbag/templates/showroom.service.j2 b/ansible/roles/nookbag/templates/showroom.service.j2
new file mode 100644
index 00000000000..53d8887fcd4
--- /dev/null
+++ b/ansible/roles/nookbag/templates/showroom.service.j2
@@ -0,0 +1,18 @@
+[Unit]
+Description=Showroom Service
+Documentation=man:podman-generate-systemd(1)
+Wants=network.target
+After=network-online.target
+
+[Service]
+User={{ showroom_user | default('showroom') }}
+Group={{ showroom_group | default('showroom') }}
+Environment=PODMAN_SYSTEMD_UNIT=%n
+Restart=on-failure
+ExecStart=/usr/local/bin/podman-compose -f {{ showroom_home_dir }}/orchestration/container-compose.yml up -d
+ExecStop=/usr/local/bin/podman-compose -f {{ showroom_home_dir }}/orchestration/container-compose.yml down
+KillMode=none
+Type=forking
+
+[Install]
+WantedBy=multi-user.target default.target
diff --git a/ansible/roles/novnc/README.md b/ansible/roles/novnc/README.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ansible/roles/novnc/defaults/main.yml b/ansible/roles/novnc/defaults/main.yml
new file mode 100644
index 00000000000..cc9b3afd0c4
--- /dev/null
+++ b/ansible/roles/novnc/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+
+# -------------------------------------------------
+# Default Variables
+# -------------------------------------------------
+novnc_version: 1.4.0
+
+# To define custom port, Default port is 6080
+# novnc_proxy_port: 443
+
+novnc_enable_letsencrypt_cert: true
+novnc_host_fqdn: "{{ groups['bastions'][0].split('.')[0] }}.{{ subdomain_base }}"
diff --git a/ansible/roles/novnc/meta/main.yml b/ansible/roles/novnc/meta/main.yml
new file mode 100644
index 00000000000..a24969f128e
--- /dev/null
+++ b/ansible/roles/novnc/meta/main.yml
@@ -0,0 +1,11 @@
+---
+galaxy_info:
+ role_name: novnc
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: Setup noVNC
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - novnc
+dependencies: []
diff --git a/ansible/roles/novnc/tasks/main.yml b/ansible/roles/novnc/tasks/main.yml
new file mode 100644
index 00000000000..e9ce4b83d62
--- /dev/null
+++ b/ansible/roles/novnc/tasks/main.yml
@@ -0,0 +1,76 @@
+---
+
+- name: Letsencrypt block
+ when: novnc_enable_letsencrypt_cert is defined
+ block:
+ - name: Install pip3
+ ansible.builtin.package:
+ name: python3-pip
+
+ - name: Install certbot
+ ansible.builtin.pip:
+ name: certbot
+ state: present
+
+ - name: Generate letsencrypt certificate
+ ansible.builtin.command: >-
+ /usr/local/bin/certbot certonly
+ --standalone
+ -d {{ novnc_host_fqdn }}
+ -m rhpds-admins@redhat.com
+ --agree-tos
+ -n
+
+- name: Download noVNC
+ ansible.builtin.get_url:
+ url: https://github.com/novnc/noVNC/archive/refs/tags/v{{ novnc_version }}.tar.gz
+ dest: /usr/local/src/v{{ novnc_version }}.tar.gz
+ mode: '644'
+
+- name: Unarchive noVNC
+ ansible.builtin.unarchive:
+ src: /usr/local/src/v{{ novnc_version }}.tar.gz
+ dest: /usr/local/src/
+ remote_src: true
+
+- name: Copy novnc.service file
+ ansible.builtin.template:
+ src: novnc.service
+ dest: /etc/systemd/system/novnc.service
+ mode: '644'
+
+- name: Enable and start service
+ ansible.builtin.service:
+ name: novnc
+ state: started
+ enabled: true
+
+- name: User info block for default noVNC port
+ when: novnc_proxy_port is not defined
+ block:
+ - name: print noVNC user.info
+ agnosticd_user_info:
+ msg: |
+ noVNC Web URL: https://{{ novnc_host_fqdn }}:6080/vnc.html?host={{ novnc_host_fqdn }}&port=6080&autoconnect=true&resize=remote
+ noVNC Password: {{ student_password }}
+
+ - name: Save noVNC user data
+ agnosticd_user_info:
+ data:
+ novnc_web_url: "https://{{ novnc_host_fqdn }}:6080/vnc.html?host={{ novnc_host_fqdn }}&port=6080&autoconnect=true&resize=remote"
+ novnc_user_password: "{{ student_password }}"
+
+- name: User info block for custom noVNC port
+ when: novnc_proxy_port is defined
+ block:
+ - name: print noVNC user.info
+ agnosticd_user_info:
+ msg: |
+ noVNC Web URL: https://{{ novnc_host_fqdn }}/vnc.html?autoconnect=true&resize=remote
+ noVNC Password: {{ student_password }}
+
+ - name: Save noVNC user data
+ agnosticd_user_info:
+ data:
+ novnc_web_url: "https://{{ novnc_host_fqdn }}/vnc.html?autoconnect=true&resize=remote"
+ novnc_user_password: "{{ student_password }}"
diff --git a/ansible/roles/novnc/templates/novnc.service b/ansible/roles/novnc/templates/novnc.service
new file mode 100644
index 00000000000..7ebe9e6e018
--- /dev/null
+++ b/ansible/roles/novnc/templates/novnc.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=noVNC service
+After=syslog.target network.target
+
+[Service]
+{% if novnc_enable_letsencrypt_cert is defined %}
+
+ExecStart=/usr/local/src/noVNC-{{ novnc_version }}/utils/novnc_proxy --vnc localhost:5901 --cert /etc/letsencrypt/live/{{ novnc_host_fqdn }}/fullchain.pem --key /etc/letsencrypt/live/{{ novnc_host_fqdn }}/privkey.pem {{ "--listen " + novnc_proxy_port|string if novnc_proxy_port is defined }}
+
+{% else %}
+
+ExecStart=/usr/local/src/noVNC-{{ novnc_version }}/utils/novnc_proxy --vnc localhost:5901 {{ "--listen " + novnc_proxy_port|string if novnc_proxy_port is defined }}
+{% endif %}
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible/roles/ocp-workload-3scale-demo/tasks/config.yml b/ansible/roles/ocp-workload-3scale-demo/tasks/config.yml
index 24236a5ac92..a758f5ef26a 100644
--- a/ansible/roles/ocp-workload-3scale-demo/tasks/config.yml
+++ b/ansible/roles/ocp-workload-3scale-demo/tasks/config.yml
@@ -11,7 +11,7 @@
delay: 60
- name: Retrieve SSO admin credentials
- k8s_facts:
+ k8s_info:
kind: secret
name: credential-sso
namespace: '{{sso_project}}'
diff --git a/ansible/roles/ocp-workload-gogs-load-repository/tasks/workload.yml b/ansible/roles/ocp-workload-gogs-load-repository/tasks/workload.yml
index 35843b042f4..68be2638542 100644
--- a/ansible/roles/ocp-workload-gogs-load-repository/tasks/workload.yml
+++ b/ansible/roles/ocp-workload-gogs-load-repository/tasks/workload.yml
@@ -10,7 +10,7 @@
KUBECONFIG: "{{ tmp_kubeconfig }}"
block:
- name: Retrieve gogs route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: "{{ gogs_app_name }}"
diff --git a/ansible/roles/ocp-workload-gogs/tasks/workload.yml b/ansible/roles/ocp-workload-gogs/tasks/workload.yml
index 4d5ae3c5465..dd592ab4c5c 100644
--- a/ansible/roles/ocp-workload-gogs/tasks/workload.yml
+++ b/ansible/roles/ocp-workload-gogs/tasks/workload.yml
@@ -22,7 +22,7 @@
definition: "{{ lookup('template', './templates/route.j2' ) | from_yaml }}"
- name: Retrieve created route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: "{{ gogs_app_name }}"
diff --git a/ansible/roles/ocp4-workload-3scale-s3/tasks/tenant_loop.yml b/ansible/roles/ocp4-workload-3scale-s3/tasks/tenant_loop.yml
index bfe2ada412b..6811494b547 100644
--- a/ansible/roles/ocp4-workload-3scale-s3/tasks/tenant_loop.yml
+++ b/ansible/roles/ocp4-workload-3scale-s3/tasks/tenant_loop.yml
@@ -110,7 +110,7 @@
- ./templates/gateway-subscription.j2
- name: "Wait for APIcast CRD to be available"
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: apicasts.apps.3scale.net
diff --git a/ansible/roles/ocp4-workload-3scale-s3/tasks/workload.yml b/ansible/roles/ocp4-workload-3scale-s3/tasks/workload.yml
index 4c518494a71..e6dbf179c31 100755
--- a/ansible/roles/ocp4-workload-3scale-s3/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-3scale-s3/tasks/workload.yml
@@ -38,7 +38,7 @@
ignore_errors: True
- name: "Wait for 3scale CRD to be available"
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: apimanagers.apps.3scale.net
@@ -84,7 +84,7 @@
# wait to APIManager resource creation
- name: Wait for 3scale pods to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: DeploymentConfig
namespace: "{{ api_manager_namespace }}"
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/pre_workload.yml
index 98de05bb9ed..e515aaeb7d1 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/pre_workload.yml
@@ -3,7 +3,7 @@
# Implement your Pre Workload deployment tasks here
- name: see if postgresql 9.5 imagestreamtag is available
- k8s_facts:
+ k8s_info:
api_version: image.openshift.io/v1
kind: ImageStreamTag
name: "postgresql:9.5"
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/workload.yml
index 0ffa2d3098d..00f106bd1df 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-apps/tasks/workload.yml
@@ -45,7 +45,7 @@
- "{{ lookup('template', 'pipeline-service.yaml.j2') }}"
- name: check for any pipeline builds
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
namespace: "{{ project_name }}"
@@ -59,7 +59,7 @@
when: pipeline_builds.resources | length | int == 0
- name: check for pipeline buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: pipeline
@@ -75,7 +75,7 @@
# oc scale dc/jupyterhub --replicas=1 -n {{ project_name }}
- name: check for spam emitter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: spam-emitter
@@ -88,7 +88,7 @@
when: spam_emitter_buildconfig.resources | length | int == 0
- name: check for spam emitter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: spam-emitter
@@ -96,7 +96,7 @@
register: spam_emitter_buildconfig
- name: check for legitimate emitter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: legitimate-emitter
@@ -109,7 +109,7 @@
when: legitimate_emitter_buildconfig.resources | length | int == 0
- name: check for legitimate emitter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: legitimate-emitter
@@ -117,7 +117,7 @@
register: legitimate_emitter_buildconfig
- name: check for legitimate flood buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: legitimate-flood
@@ -130,7 +130,7 @@
when: legitimate_flood_emitter_buildconfig.resources | length | int == 0
- name: check for legitimate flood buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: legitimate-flood
@@ -138,7 +138,7 @@
register: legitimate_flood_emitter_buildconfig
- name: check for flood filter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: flood-filter
@@ -151,7 +151,7 @@
when: flood_filter_buildconfig.resources | length | int == 0
- name: check for flood filter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: flood-filter
@@ -159,7 +159,7 @@
register: flood_filter_buildconfig
- name: check for spam filter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: spam-filter
@@ -172,7 +172,7 @@
when: spam_filter_buildconfig.resources | length | int == 0
- name: check for spam filter buildconfig
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: BuildConfig
name: spam-filter
@@ -180,7 +180,7 @@
register: spam_filter_buildconfig
- name: Wait for the spam filter build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "spam-filter-{{ spam_filter_buildconfig.resources[0].status.lastVersion }}"
@@ -195,7 +195,7 @@
retries: 10
- name: Wait for the pipeline build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "pipeline-{{ pipeline_buildconfig.resources[0].status.lastVersion }}"
@@ -210,7 +210,7 @@
retries: 24
- name: Wait for the spam emitter build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "spam-emitter-{{ spam_emitter_buildconfig.resources[0].status.lastVersion }}"
@@ -225,7 +225,7 @@
retries: 10
- name: Wait for the legitimate emitter build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "legitimate-emitter-{{ legitimate_emitter_buildconfig.resources[0].status.lastVersion }}"
@@ -240,7 +240,7 @@
retries: 10
- name: Wait for the legitimate flood build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "legitimate-flood-{{ legitimate_flood_emitter_buildconfig.resources[0].status.lastVersion }}"
@@ -255,7 +255,7 @@
retries: 10
- name: Wait for the flood filter build to complete
- k8s_facts:
+ k8s_info:
api_version: build.openshift.io/v1
kind: Build
name: "flood-filter-{{ flood_filter_buildconfig.resources[0].status.lastVersion }}"
@@ -300,7 +300,7 @@
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- name: Wait for the prometheus user monitoring pods to roll out
- k8s_facts:
+ k8s_info:
api_version: apps/v1
kind: StatefulSet
name: prometheus-user-workload
@@ -336,7 +336,7 @@
app: pipeline
- name: grab the console route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: console
@@ -344,7 +344,7 @@
register: console_route_out
- name: grab the jupyterhub route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: jupyterhub
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_operator_workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_operator_workload.yml
index 78de070832a..3a2e5519720 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_operator_workload.yml
@@ -5,7 +5,7 @@
project_name: "opendatahub-{{ user_name }}"
- name: "Wait for Open Data Hub ClusterServiceVersion to finish installing in {{ project_name }}"
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
namespace: "{{ project_name }}"
@@ -24,7 +24,7 @@
delay: 10
- name: "Wait for Open Data Hub operator to finish deploying in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: Pod
namespace: "{{ project_name }}"
label_selectors:
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_pre_operator_workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_pre_operator_workload.yml
index 7345b05fd7d..2a266944f98 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_pre_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_pre_operator_workload.yml
@@ -30,7 +30,7 @@
api_version: project.openshift.io/v1
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ project_name }}"
@@ -107,7 +107,7 @@
####################################################################################################
- name: "Get the limitranges in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: LimitRange
namespace: "{{ project_name }}"
register: limit_ranges
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_remove_workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_remove_workload.yml
index a70ac06f55e..aca76933f6b 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_remove_workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/per_user_remove_workload.yml
@@ -29,7 +29,7 @@
- "{{ project_name }}"
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/workload.yml b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/workload.yml
index b48b479a4d7..bc05e05c794 100644
--- a/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-ai-spam-demo-odh/tasks/workload.yml
@@ -81,7 +81,7 @@
- "{{ lookup('template', 'opendatahub-operator.v0.5.2.clusterserviceversion.yaml.j2') }}"
- name: Wait for Open Data Hub ClusterServiceVersion to finish installing
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
namespace: "{{ project_name }}"
@@ -120,7 +120,7 @@
definition: "{{ lookup('template', 'opendatahub_v1alpha1_opendatahub_cr.yaml.j2') }}"
- name: Wait for various deploymentconfigs to deploy
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: "{{ item }}"
@@ -138,7 +138,7 @@
delay: 30
- name: Wait for various deployments to deploy
- k8s_facts:
+ k8s_info:
api_version: extensions/v1beta1
kind: Deployment
name: "{{ item }}"
@@ -156,7 +156,7 @@
delay: 30
- name: Wait for various statefulsets to deploy
- k8s_facts:
+ k8s_info:
api_version: apps/v1
kind: StatefulSet
name: "{{ item }}"
@@ -173,7 +173,7 @@
delay: 30
- name: Wait for various 3-member statefulsets to deploy
- k8s_facts:
+ k8s_info:
api_version: apps/v1
kind: StatefulSet
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-app-deploy-homework/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-app-deploy-homework/tasks/remove_workload.yml
index f96fad6b322..31e90e70906 100644
--- a/ansible/roles/ocp4-workload-app-deploy-homework/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-app-deploy-homework/tasks/remove_workload.yml
@@ -16,7 +16,7 @@
- ./templates/jenkins_role_binding.j2
- name: "Find all projects for user {{ ocp_username }}"
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
register: r_projects
diff --git a/ansible/roles/ocp4-workload-ausgeben-infra/tasks/workload.yml b/ansible/roles/ocp4-workload-ausgeben-infra/tasks/workload.yml
index 69f2bdc81b1..4a5c925af5e 100644
--- a/ansible/roles/ocp4-workload-ausgeben-infra/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-ausgeben-infra/tasks/workload.yml
@@ -10,7 +10,7 @@
name: lab-data-eng
- name: check for ausgeben deploymentconfig
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: ausgeben
@@ -22,7 +22,7 @@
when: deployment_out.resources | length | int < 1
- name: wait for ausgeben to deploy
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: ausgeben
@@ -39,7 +39,7 @@
delay: 10
- name: check for the ausgeben route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: ausgeben
@@ -51,7 +51,7 @@
when: route_out.resources | length | int < 1
- name: get the ausgeben route details
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: ausgeben
diff --git a/ansible/roles/ocp4-workload-authentication/tasks/workload.yml b/ansible/roles/ocp4-workload-authentication/tasks/workload.yml
index 46130011959..4b969ef376f 100644
--- a/ansible/roles/ocp4-workload-authentication/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-authentication/tasks/workload.yml
@@ -97,7 +97,7 @@
- ./templates/oauth-htpasswd.yaml
- name: Retrieve API server configuration (for API endpoint)
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Infrastructure
name: cluster
diff --git a/ansible/roles/ocp4-workload-camelk-crw/tasks/verify_workload.yml b/ansible/roles/ocp4-workload-camelk-crw/tasks/verify_workload.yml
index 08804dd0e04..25d7f3eb108 100644
--- a/ansible/roles/ocp4-workload-camelk-crw/tasks/verify_workload.yml
+++ b/ansible/roles/ocp4-workload-camelk-crw/tasks/verify_workload.yml
@@ -1,6 +1,6 @@
- name: verify user project exists
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Namespace
name: "{{ _namespace }}"
@@ -10,7 +10,7 @@
failed_when: r_user_namespace.resources | list | length != 1
- name: verify codeready pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -29,7 +29,7 @@
status_code: 200
- name: verify grafana pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -48,7 +48,7 @@
# expects -1 due to ssl being needed
- name: verify apicurito pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -67,7 +67,7 @@
status_code: 200
- name: verify keycloak pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -86,7 +86,7 @@
status_code: 200
- name: verify prometheus pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -105,7 +105,7 @@
# expects -1 due to ssl being needed
- name: verify Camel K operator pod is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _namespace }}"
@@ -117,7 +117,7 @@
failed_when: r_camelk_operator_pod.resources | list | length != 1
- name: verify Camel K integration platform is running
- k8s_facts:
+ k8s_info:
api_version: v1
kind: IntegrationPlatform
namespace: "{{ _namespace }}"
diff --git a/ansible/roles/ocp4-workload-camelk-crw/tasks/workload_per_project_codereadyworkspaces.yml b/ansible/roles/ocp4-workload-camelk-crw/tasks/workload_per_project_codereadyworkspaces.yml
index 0fd928c0b3c..21b8537ab20 100644
--- a/ansible/roles/ocp4-workload-camelk-crw/tasks/workload_per_project_codereadyworkspaces.yml
+++ b/ansible/roles/ocp4-workload-camelk-crw/tasks/workload_per_project_codereadyworkspaces.yml
@@ -129,7 +129,7 @@
- ./files/stack_imagestream.yaml
- name: wait for stack to be a thing
- k8s_facts:
+ k8s_info:
kind: ImageStream
name: kamel-stack
namespace: openshift
diff --git a/ansible/roles/ocp4-workload-camelk-lab/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-camelk-lab/tasks/pre_workload.yml
index d4c557a93fd..09f7d3f414f 100644
--- a/ansible/roles/ocp4-workload-camelk-lab/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-camelk-lab/tasks/pre_workload.yml
@@ -12,7 +12,7 @@
# verbosity: 2
# - name: check quota is deployed
-# k8s_facts:
+# k8s_info:
# api_version: quota.openshift.io/v1
# kind: ClusterResourceQuota
# name: clusterquota-{{admin_username}}-{{lab_name}}
diff --git a/ansible/roles/ocp4-workload-camelk-lab/tasks/workload.yml b/ansible/roles/ocp4-workload-camelk-lab/tasks/workload.yml
index 350a1513517..14213c90ae0 100644
--- a/ansible/roles/ocp4-workload-camelk-lab/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-camelk-lab/tasks/workload.yml
@@ -8,7 +8,7 @@
oc import-image quay.io/osevg/workshopper -n openshift --confirm
- name: Check if project exists
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: Project
name: "{{ labguide_project_name }}"
@@ -26,7 +26,7 @@
- project_exists.resources | list | length < 1
- name: check if guide is deployed
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: "{{ _deployed_guide_name }}"
@@ -86,7 +86,7 @@
# when: stat_result.stat.exists == False
# - name: check if user project exists
-# k8s_facts:
+# k8s_info:
# api_version: apps.openshift.io/v1
# kind: Project
# name: "{{content_sources_project_name}}"
@@ -108,7 +108,7 @@
############################################
# - name: Check if RedHat csc exists
-# k8s_facts:
+# k8s_info:
# api_version: operators.coreos.com/v1
# kind: CatalogSourceConfig
# name: installed-redhat-openshift-operators
@@ -120,7 +120,7 @@
# verbosity: 3
# - name: Check if Community csc exists
-# k8s_facts:
+# k8s_info:
# api_version: operators.coreos.com/v1
# kind: CatalogSourceConfig
# name: installed-community-openshift-operators
@@ -128,7 +128,7 @@
# register: community_csc_exists
# - name: Check if AMQStreans subscription exists
-# k8s_facts:
+# k8s_info:
# api_version: operators.coreos.com/v1alpha1
# kind: Subscription
# name: amq-streams
@@ -136,7 +136,7 @@
# register: amqstreans_sub_exists
# - name: Check if CamelK subscription exists
-# k8s_facts:
+# k8s_info:
# api_version: operators.coreos.com/v1alpha1
# kind: Subscription
# name: camel-k
@@ -171,7 +171,7 @@
# become: "{{ become_override | bool }}"
- name: Check if {{ project_name }} project exists
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: Project
name: "{{ project_name }}"
diff --git a/ansible/roles/ocp4-workload-ccnrd-stable/files/devspaces_cr.yaml b/ansible/roles/ocp4-workload-ccnrd-stable/files/devspaces_cr.yaml
index 5193bfd00a8..56e22175266 100644
--- a/ansible/roles/ocp4-workload-ccnrd-stable/files/devspaces_cr.yaml
+++ b/ansible/roles/ocp4-workload-ccnrd-stable/files/devspaces_cr.yaml
@@ -25,7 +25,7 @@ spec:
imagePuller:
enable: true
spec:
- images: quarkus-stack-3-5=quay.io/openshiftlabs/cloudnative-workspaces-quarkus:3.6;vscode=registry.redhat.io/devspaces/code-rhel8:3.6;project-cloner=registry.redhat.io/devworkspace/devworkspace-project-clone-rhel8:0.19
+ images: quarkus-stack-3-7=quay.io/openshiftlabs/cloudnative-workspaces-quarkus:3.7;vscode=registry.redhat.io/devspaces/code-rhel8:3.7;project-cloner=registry.redhat.io/devworkspace/devworkspace-project-clone-rhel8:0.21
containerRegistry: {}
devEnvironments:
secondsOfRunBeforeIdling: -1
diff --git a/ansible/roles/ocp4-workload-ceph/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-ceph/tasks/pre_workload.yml
index a5c2efdae1a..38e48c445c1 100644
--- a/ansible/roles/ocp4-workload-ceph/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-ceph/tasks/pre_workload.yml
@@ -1,6 +1,6 @@
---
- name: Discovering worker nodes
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Node
label_selectors:
diff --git a/ansible/roles/ocp4-workload-ceph/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-ceph/tasks/remove_workload.yml
index e85e641623e..2c6f80eb489 100644
--- a/ansible/roles/ocp4-workload-ceph/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-ceph/tasks/remove_workload.yml
@@ -2,7 +2,7 @@
# verify there are no ceph PVCs in use
- block:
- name: Checking if Ceph PVCs exist
- k8s_facts:
+ k8s_info:
api_version: v1
kind: PersistentVolumeClaim
register: ceph_pvcs
diff --git a/ansible/roles/ocp4-workload-ceph/tasks/workload.yml b/ansible/roles/ocp4-workload-ceph/tasks/workload.yml
index 352f8503224..dd67807f6f7 100644
--- a/ansible/roles/ocp4-workload-ceph/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-ceph/tasks/workload.yml
@@ -10,7 +10,7 @@
definition: "{{ lookup('template', 'subscription.yml.j2') }}"
- name: "Wait for Ceph CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-chaos-engineering-workshop/tasks/post_workload.yml b/ansible/roles/ocp4-workload-chaos-engineering-workshop/tasks/post_workload.yml
index a5ca671d705..15003ee271a 100644
--- a/ansible/roles/ocp4-workload-chaos-engineering-workshop/tasks/post_workload.yml
+++ b/ansible/roles/ocp4-workload-chaos-engineering-workshop/tasks/post_workload.yml
@@ -67,7 +67,7 @@
- name: Check Workshop Infrastructure
block:
- name: "[workshop-infra] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "workshop-infra"
@@ -87,7 +87,7 @@
- name: Check CodeReadyWorkspaces
block:
- name: "[workspaces] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: workspaces
@@ -109,7 +109,7 @@
- name: Check Istio
block:
- name: "[istio-system] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: istio-system
@@ -131,7 +131,7 @@
- name: Check Argo CD
block:
- name: "[argo cd] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: argocd
diff --git a/ansible/roles/ocp4-workload-cluster-autoscale/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-cluster-autoscale/tasks/remove_workload.yml
index ce6c12e616a..f0491bc1542 100644
--- a/ansible/roles/ocp4-workload-cluster-autoscale/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-cluster-autoscale/tasks/remove_workload.yml
@@ -12,7 +12,7 @@
name: "default"
- name: get machine auto scalers
- k8s_facts:
+ k8s_info:
api_version: autoscaling.openshift.io/v1alpha1
kind: MachineAutoscaler
namespace: openshift-machine-api
diff --git a/ansible/roles/ocp4-workload-cluster-autoscale/tasks/workload.yml b/ansible/roles/ocp4-workload-cluster-autoscale/tasks/workload.yml
index dc1730eceb0..94ce567c52e 100644
--- a/ansible/roles/ocp4-workload-cluster-autoscale/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-cluster-autoscale/tasks/workload.yml
@@ -1,7 +1,7 @@
---
# Implement your Workload deployment tasks here
- name: get current machinesets
- k8s_facts:
+ k8s_info:
api_version: machine.openshift.io/v1beta1
kind: MachineSet
namespace: openshift-machine-api
diff --git a/ansible/roles/ocp4-workload-cost-uploader/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-cost-uploader/tasks/pre_workload.yml
index d569b7f14a0..b07e3e9d5f3 100644
--- a/ansible/roles/ocp4-workload-cost-uploader/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-cost-uploader/tasks/pre_workload.yml
@@ -1,7 +1,7 @@
---
# Implement your Pre Workload deployment tasks here
#- name: Wait for metering crd creation
-# k8s_facts:
+# k8s_info:
# api_version: apiextensions.k8s.io/v1beta1
# kind: CustomResourceDefinition
# name: meterings.metering.openshift.io
diff --git a/ansible/roles/ocp4-workload-debugging-workshop/tasks/post_workload.yml b/ansible/roles/ocp4-workload-debugging-workshop/tasks/post_workload.yml
index f182dfb6713..f9762f24cbd 100644
--- a/ansible/roles/ocp4-workload-debugging-workshop/tasks/post_workload.yml
+++ b/ansible/roles/ocp4-workload-debugging-workshop/tasks/post_workload.yml
@@ -94,7 +94,7 @@
- name: Check Workshop Infrastructure
block:
- name: "[workshop-infra] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "workshop-infra"
@@ -114,7 +114,7 @@
- name: Check CodeReadyWorkspaces
block:
- name: "[workspaces] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: workspaces
@@ -136,7 +136,7 @@
- name: Check Istio
block:
- name: "[istio-system] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: istio-system
@@ -158,7 +158,7 @@
- name: Check Argo CD
block:
- name: "[argo cd] Reading deployments"
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: argocd
diff --git a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_crw.yaml b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_crw.yaml
index 1bd99974765..9fbd3a8472e 100644
--- a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_crw.yaml
+++ b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_crw.yaml
@@ -27,7 +27,7 @@
resource_definition: "{{ lookup('template', 'crw-subscription.yaml.j2') }}"
- name: Wait for Code Ready operator to install
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
name: checlusters.org.eclipse.che
@@ -52,7 +52,7 @@
- name: Extract key_cloak_admin_password
- k8s_facts:
+ k8s_info:
kind: Secret
name: che-identity-secret
namespace: '{{ che_project }}'
diff --git a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_sso.yaml b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_sso.yaml
index c93c3a549dd..e097bc5e483 100644
--- a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_sso.yaml
+++ b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_sso.yaml
@@ -46,7 +46,7 @@
minutes: 2
- name: Retrieve SSO admin credentials
- k8s_facts:
+ k8s_info:
kind: secret
name: credential-sso
namespace: '{{sso_project}}'
diff --git a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_threescale.yaml b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_threescale.yaml
index b2cea5ceff6..5dbbd2f4761 100644
--- a/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_threescale.yaml
+++ b/ansible/roles/ocp4-workload-dil-agile-integration/tasks/provision_threescale.yaml
@@ -47,7 +47,7 @@
- r_s3_bucket_claim.resources[0].status.phase == "Bound"
- name: Fetch secrets for bucket
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Secret
namespace: "{{ threescale_project }}"
@@ -77,7 +77,7 @@
# wait to APIManager resource creation
- name: Wait for 3scale pods to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: DeploymentConfig
namespace: "{{ threescale_project }}"
@@ -144,7 +144,7 @@
# wait to system-app resource creation
- name: Wait for 3scale pods to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: DeploymentConfig
namespace: "{{ threescale_project }}"
diff --git a/ansible/roles/ocp4-workload-dil-serverless/tasks/user_terminal.yaml b/ansible/roles/ocp4-workload-dil-serverless/tasks/user_terminal.yaml
index 622a7ef6f89..4bc9e53f5ba 100644
--- a/ansible/roles/ocp4-workload-dil-serverless/tasks/user_terminal.yaml
+++ b/ansible/roles/ocp4-workload-dil-serverless/tasks/user_terminal.yaml
@@ -1,6 +1,6 @@
---
- name: Log in OCP as {{ __user }}
- k8s_auth:
+ community.okd.openshift_auth:
host: "{{ api_url }}"
verify_ssl: false
username: '{{ __user }}'
diff --git a/ansible/roles/ocp4-workload-dil-streaming/templates/devspaces-cluster.yaml.j2 b/ansible/roles/ocp4-workload-dil-streaming/templates/devspaces-cluster.yaml.j2
index ff8a6e23a92..28fe984be5d 100644
--- a/ansible/roles/ocp4-workload-dil-streaming/templates/devspaces-cluster.yaml.j2
+++ b/ansible/roles/ocp4-workload-dil-streaming/templates/devspaces-cluster.yaml.j2
@@ -5,6 +5,10 @@ metadata:
namespace: '{{ che_project }}'
spec:
components:
+ pluginRegistry:
+ deployment:
+ containers:
+ - image: registry.redhat.io/devspaces/pluginregistry-rhel8@sha256:a95b61b3f5d44d14c1398b0674ad04fd61c07f75afad08e43c3a4d5513340176
cheServer:
debug: false
logLevel: INFO
diff --git a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/deploy_certs.yml b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/deploy_certs.yml
index 6dc8e300d26..170def36303 100644
--- a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/deploy_certs.yml
+++ b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/files/deploy_certs.yml
@@ -55,7 +55,7 @@
definition: "{{ lookup('template', './router-certs.j2' ) | from_yaml }}"
- name: Find Ingress Controller Pods
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: openshift-ingress
diff --git a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/tasks/workload.yml b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/tasks/workload.yml
index fa6e2e0a6a4..85473c527c4 100644
--- a/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-enable-lets-encrypt-certificates/tasks/workload.yml
@@ -17,7 +17,7 @@
register: r_api_hostname
- name: Determine Wildcard Domain
- k8s_facts:
+ k8s_info:
api_version: operator.openshift.io/v1
kind: IngressController
name: default
@@ -179,7 +179,7 @@
loop: "{{r_config_files.files}}"
- name: Make sure API Calls succeed
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Ingress
name: cluster
diff --git a/ansible/roles/ocp4-workload-homeroomlab-dev-tools/tasks/verify_workload.yml b/ansible/roles/ocp4-workload-homeroomlab-dev-tools/tasks/verify_workload.yml
index 03a25fee74c..4e6335d6d2d 100644
--- a/ansible/roles/ocp4-workload-homeroomlab-dev-tools/tasks/verify_workload.yml
+++ b/ansible/roles/ocp4-workload-homeroomlab-dev-tools/tasks/verify_workload.yml
@@ -1,6 +1,6 @@
---
- name: verify workshop project exists
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Namespace
name: "{{ project_name }}"
@@ -10,7 +10,7 @@
failed_when: r_project_namespace.resources | list | length != 1
- name: verify homeroom route is created
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
namespace: lab-dev-tools-spawner
diff --git a/ansible/roles/ocp4-workload-homeroomlab-odo/tasks/workload.yml b/ansible/roles/ocp4-workload-homeroomlab-odo/tasks/workload.yml
index 418bfc98ee8..014fe83de98 100644
--- a/ansible/roles/ocp4-workload-homeroomlab-odo/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-homeroomlab-odo/tasks/workload.yml
@@ -51,7 +51,7 @@
__homeroom_installed: false
block:
- name: "Get homeroom deployment (fact)"
- k8s_facts:
+ k8s_info:
api_version: "apps.openshift.io/v1"
kind: DeploymentConfig
name: "homeroom"
diff --git a/ansible/roles/ocp4-workload-homeroomlab-starter-guides/tasks/workload.yml b/ansible/roles/ocp4-workload-homeroomlab-starter-guides/tasks/workload.yml
index bcba2fa1d57..dd21396e0d7 100644
--- a/ansible/roles/ocp4-workload-homeroomlab-starter-guides/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-homeroomlab-starter-guides/tasks/workload.yml
@@ -85,7 +85,7 @@
# KUBECONFIG: "{{ tmp_kubeconfig }}"
# block:
# - name: Retrieve nexus route
-# k8s_facts:
+# k8s_info:
# api_version: "route.openshift.io/v1"
# kind: Route
# name: "nexus"
diff --git a/ansible/roles/ocp4-workload-homeroomlab-tekton-pipelines/tasks/workload.yml b/ansible/roles/ocp4-workload-homeroomlab-tekton-pipelines/tasks/workload.yml
index 418bfc98ee8..014fe83de98 100644
--- a/ansible/roles/ocp4-workload-homeroomlab-tekton-pipelines/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-homeroomlab-tekton-pipelines/tasks/workload.yml
@@ -51,7 +51,7 @@
__homeroom_installed: false
block:
- name: "Get homeroom deployment (fact)"
- k8s_facts:
+ k8s_info:
api_version: "apps.openshift.io/v1"
kind: DeploymentConfig
name: "homeroom"
diff --git a/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
index 632c160520b..9ffe6206f76 100644
--- a/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-infra-nodes/tasks/remove_workload.yml
@@ -3,7 +3,7 @@
# Implement your Workload removal tasks here
- name: Find Infra machinesets
- k8s_facts:
+ k8s_info:
api_version: machine.openshift.io/v1beta1
kind: MachineSet
namespace: openshift-machine-api
@@ -12,7 +12,7 @@
register: r_infra_machinesets
- name: Find Elasticsearch machinesets
- k8s_facts:
+ k8s_info:
api_version: machine.openshift.io/v1beta1
kind: MachineSet
namespace: openshift-machine-api
diff --git a/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml b/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
index f41d2fd9d26..7cdb5b58002 100644
--- a/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-infra-nodes/tasks/workload.yml
@@ -29,7 +29,7 @@
total_replicas_max: "{{ _infra_node_elasticsearch_replicas_max }}"
- name: Wait for Infra Nodes to be available
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Node
label_selectors:
@@ -42,7 +42,7 @@
- name: Wait for Elasticsearch Nodes to be available
when: _infra_node_elasticsearch_nodes | default(false) | bool
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Node
label_selectors:
diff --git a/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/remove_workload.yml
index 9502afa74ab..aeb38a57d21 100644
--- a/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/remove_workload.yml
@@ -67,7 +67,7 @@
path: "/tmp/istio-install"
- name: Ensure project istio-system istio-operator is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/workload.yml b/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/workload.yml
index 645bef6a2a7..afe9120d0e5 100644
--- a/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-istio-controlplane-infra/tasks/workload.yml
@@ -62,7 +62,7 @@
installPlanApproval: Manual
- name: wait for the status of the elastic subscription to not be empty
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: service-mesh-elastic
@@ -106,7 +106,7 @@
installPlanApproval: Manual
- name: wait for the status of the jaeger subscription to not be empty
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: service-mesh-jaeger
@@ -150,7 +150,7 @@
installPlanApproval: Manual
- name: wait for the status of the kiali subscription to not be empty
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: service-mesh-kiali
@@ -194,7 +194,7 @@
installPlanApproval: Manual
- name: wait for the status of the servicemesh subscription to not be empty
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: service-mesh-operator
@@ -221,7 +221,7 @@
approved: true
- name: wait for the CSVs to exist
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ item }}"
@@ -237,7 +237,7 @@
- "{{ servicemesh_version }}"
- name: wait for the CSVs to be Succeeded
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/remove_workload.yml
index 77fdf09ea03..c7a1b291169 100644
--- a/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/remove_workload.yml
@@ -28,7 +28,7 @@
metadata:
- debug: var=result
- name: Ensure istio controlplane cr terminates before continuing
- k8s_facts:
+ k8s_info:
api_version: maistra.io/v1
kind: ServiceMeshControlPlane
register: result
@@ -41,7 +41,7 @@
smcp_project_name: "smcp-{{ ocp_username }}"
- name: Ensure project istio-system istio-operator is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ smcp_project_name }}"
diff --git a/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/workload.yml b/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/workload.yml
index e94296ec2af..e41c587a74f 100644
--- a/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-istio-controlplane-student/tasks/workload.yml
@@ -78,7 +78,7 @@
loop: "{{ range(1, num_users | int + 1, 1) | list }}"
- name: wait for CR to indicate everything is running
- k8s_facts:
+ k8s_info:
api_version: maistra.io/v1
kind: ServiceMeshControlPlane
name: basic-install
@@ -91,7 +91,7 @@
loop: "{{ range(1, num_users | int + 1, 1) | list }}"
- name: wait for kiali route to exist
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: kiali
diff --git a/ansible/roles/ocp4-workload-istio-tutorial-student/tasks/deployment_wait.yml b/ansible/roles/ocp4-workload-istio-tutorial-student/tasks/deployment_wait.yml
index 2a2014d6442..36cce7cc0d4 100644
--- a/ansible/roles/ocp4-workload-istio-tutorial-student/tasks/deployment_wait.yml
+++ b/ansible/roles/ocp4-workload-istio-tutorial-student/tasks/deployment_wait.yml
@@ -2,7 +2,7 @@
# vim: set ft=ansible
- name: wait for deployment
- k8s_facts:
+ k8s_info:
api_version: extensions/v1beta1
kind: Deployment
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/remove_workload.yml
index 267937083a7..c2e51dbdd47 100644
--- a/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/remove_workload.yml
@@ -67,7 +67,7 @@
path: "/tmp/istio-install"
- name: Ensure project istio-system istio-operator is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/workload.yml b/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/workload.yml
index 8ecde454cae..2cfdb19ab1a 100644
--- a/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-istio-workshop-homeroom/tasks/workload.yml
@@ -23,7 +23,7 @@
name: lab-ossm
- name: check if homeroom was already deployed
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: lab-ossm
@@ -31,14 +31,14 @@
register: lab_ossm_deployment
- name: Get the cluster subdomain
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Ingress
name: cluster
register: cluster_subdomain
- name: check for the oauthclient if we're about to deploy the labguide
- k8s_facts:
+ k8s_info:
api_version: oauth.openshift.io/v1
kind: OAuthClient
name: lab-ossm-console
@@ -66,7 +66,7 @@
when: lab_ossm_deployment.resources | length | int < 1
- name: grab the homeroom route for output
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: lab-ossm-spawner
diff --git a/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
index 8185f6d2d9a..3fcc5dca579 100644
--- a/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-logging/tasks/remove_workload.yml
@@ -13,7 +13,7 @@
# operator nukes all pods once cr is gone
# waiting for just one to remain is a bit of a hack
- name: Wait for logging pods to be terminated
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: openshift-logging
diff --git a/ansible/roles/ocp4-workload-logging/tasks/workload.yml b/ansible/roles/ocp4-workload-logging/tasks/workload.yml
index 82db5ab6ff9..6afab2e982e 100644
--- a/ansible/roles/ocp4-workload-logging/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-logging/tasks/workload.yml
@@ -12,14 +12,14 @@
verbosity: 2
- name: Get cluster version
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: ClusterVersion
name: version
register: r_cluster_version
- name: Check if Elasticsearch Operator is already installed
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "openshift-operators-redhat"
@@ -31,7 +31,7 @@
block:
- name: Get current stable channel for Elasticsearch
- k8s_facts:
+ k8s_info:
api_version: packages.operators.coreos.com/v1
kind: PackageManifest
name: elasticsearch-operator
@@ -71,7 +71,7 @@
- ./templates/eo_subscription.j2
- name: Wait for Elasticsearch operator to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "openshift-operators-redhat"
@@ -85,7 +85,7 @@
- r_eo_deployment.resources[0].status.availableReplicas | int == r_eo_deployment.resources[0].spec.replicas | int
- name: Get current stable channel for Cluster Logging
- k8s_facts:
+ k8s_info:
api_version: packages.operators.coreos.com/v1
kind: PackageManifest
name: cluster-logging
@@ -124,7 +124,7 @@
- ./templates/logging_subscription.j2
- name: Wait for Cluster Logging Operator to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "openshift-logging"
@@ -145,7 +145,7 @@
- ./templates/cluster_logging.j2
- name: Wait until Elasticsearch cluster status is green
- k8s_facts:
+ k8s_info:
api_version: logging.openshift.io/v1
kind: ClusterLogging
name: instance
diff --git a/ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml
index e6f648f0159..b01a21ca9ff 100644
--- a/ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-machinesets/tasks/remove_workload.yml
@@ -4,7 +4,7 @@
# Implement your Workload removal tasks here
- name: Find Infra machinesets
- k8s_facts:
+ k8s_info:
api_version: machine.openshift.io/v1beta1
kind: MachineSet
namespace: openshift-machine-api
@@ -13,7 +13,7 @@
register: r_infra_machinesets
- name: Find Elasticsearch machinesets
- k8s_facts:
+ k8s_info:
api_version: machine.openshift.io/v1beta1
kind: MachineSet
namespace: openshift-machine-api
diff --git a/ansible/roles/ocp4-workload-machinesets/tasks/workload.yml b/ansible/roles/ocp4-workload-machinesets/tasks/workload.yml
index 882c507d4ba..63b44093685 100644
--- a/ansible/roles/ocp4-workload-machinesets/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-machinesets/tasks/workload.yml
@@ -20,7 +20,7 @@
{{ ocp4_workload_machinesets.disable_default_machinesets }}
- name: Wait for Nodes to be available
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Node
label_selectors:
diff --git a/ansible/roles/ocp4-workload-metering/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-metering/tasks/pre_workload.yml
index d569b7f14a0..b07e3e9d5f3 100644
--- a/ansible/roles/ocp4-workload-metering/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-metering/tasks/pre_workload.yml
@@ -1,7 +1,7 @@
---
# Implement your Pre Workload deployment tasks here
#- name: Wait for metering crd creation
-# k8s_facts:
+# k8s_info:
# api_version: apiextensions.k8s.io/v1beta1
# kind: CustomResourceDefinition
# name: meterings.metering.openshift.io
diff --git a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/knative.yml b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/knative.yml
index 2e4c9761534..48a3e7b9794 100644
--- a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/knative.yml
+++ b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/knative.yml
@@ -7,7 +7,7 @@
namespace: "{{ user_project }}"
- name: "Wait for Knative CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/open_data_hub.yml b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/open_data_hub.yml
index 941a0b60889..8bc6dd20a5f 100644
--- a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/open_data_hub.yml
+++ b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/open_data_hub.yml
@@ -30,7 +30,7 @@
suffix: opentlc-mgr
- name: "Wait for Open Data Hub CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/serverless.yml b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/serverless.yml
index f1f73158ba0..46160467414 100644
--- a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/serverless.yml
+++ b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/serverless.yml
@@ -12,7 +12,7 @@
namespace: knative-serving
- name: "Wait for Knative CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/storage.yml b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/storage.yml
index 3ffbb1b1eec..0050ea8c397 100644
--- a/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/storage.yml
+++ b/ansible/roles/ocp4-workload-ml-workflows-infra-summit2020/tasks/storage.yml
@@ -52,7 +52,7 @@
channel: " {{ ocs_channel }}"
- name: "Wait for Storage CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
@@ -68,7 +68,7 @@
definition: "{{ lookup('template', 'storage/storagecluster.yml.j2') }}"
- name: "Waiting for Noobaa to become ready"
- k8s_facts:
+ k8s_info:
api_version: "noobaa.io/v1alpha1"
kind: NooBaa
namespace: "{{ ocs_namespace }}"
@@ -100,7 +100,7 @@
definition: "{{ lookup('template', 'storage/pv_pool_objectbucketclaim.yml.j2') }}"
- name: "Wait for Bucket to exist"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
@@ -111,7 +111,7 @@
delay: 10
- name: "Wait for Bucket to have status"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
@@ -122,7 +122,7 @@
delay: 10
- name: "Wait for Bucket to become bound"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/add-dvc-repo.yaml b/ansible/roles/ocp4-workload-mlops/tasks/add-dvc-repo.yaml
index 9980f4572f0..84bd4504e32 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/add-dvc-repo.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/add-dvc-repo.yaml
@@ -131,7 +131,7 @@
download_file: /output/data.tar.bz2
- name: Wait until job is done
- k8s_facts:
+ k8s_info:
name: data-repo-setup-{{user}}
namespace: labs-setup
kind: Job
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/add-git-repo-jupyterhub.yaml b/ansible/roles/ocp4-workload-mlops/tasks/add-git-repo-jupyterhub.yaml
index 9c9a591d2aa..b4466ecec46 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/add-git-repo-jupyterhub.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/add-git-repo-jupyterhub.yaml
@@ -72,7 +72,7 @@
backoffLimit: 4
- name: Wait until job is done
- k8s_facts:
+ k8s_info:
name: "{{user}}-jupyterhub-pvc-setup"
kind: Job
namespace: labs-infra
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/add_nexus_secrets.yaml b/ansible/roles/ocp4-workload-mlops/tasks/add_nexus_secrets.yaml
index 1a6cf0e41b7..ac7ffc754b2 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/add_nexus_secrets.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/add_nexus_secrets.yaml
@@ -1,6 +1,6 @@
---
- name: Get nexus secret for {{ns}}
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Secret
name: nexus
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-amq-streams.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-amq-streams.yaml
index 77822feb83d..a7177787a44 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-amq-streams.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-amq-streams.yaml
@@ -13,7 +13,7 @@
# Wait for CRD
- name: Wait for AMQ Streams CRD to be ready
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: kafkas.kafka.strimzi.io
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-argocd.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-argocd.yaml
index 8e2a218ac9e..cd224e3b832 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-argocd.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-argocd.yaml
@@ -8,7 +8,7 @@
- ./files/argocd_subscription.yaml
- name: Wait for ArgoCD CRD to be ready
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: argocds.argoproj.io
@@ -40,7 +40,7 @@
definition: "{{ lookup('template', './templates/argocd_cr.yaml.j2' ) | from_yaml }}"
- name: Wait for argocd to be available
- k8s_facts:
+ k8s_info:
api_version: argoproj.io/v1alpha1
kind: ArgoCD
name: argocd
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-ceph.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-ceph.yaml
index ba1d5f28251..73011eb5ef1 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-ceph.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-ceph.yaml
@@ -29,7 +29,7 @@
definition: "{{ lookup('template', 'ceph_subscription.yaml.j2') }}"
- name: Get OCS version
- k8s_facts:
+ k8s_info:
kind: ClusterServiceVersion
api_version: operators.coreos.com/v1alpha1
namespace: "{{ ceph_storage.ceph_namespace }}"
@@ -43,7 +43,7 @@
ocs_csv_name: "{{ocs_csv.resources[0].metadata.name}}"
- name: "Wait for OCS Operator to be Succeeded"
- k8s_facts:
+ k8s_info:
kind: ClusterServiceVersion
api_version: operators.coreos.com/v1alpha1
name: "{{ocs_csv_name}}"
@@ -54,7 +54,7 @@
delay: 10
- name: "Wait for Ceph CRDs to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
@@ -70,7 +70,7 @@
definition: "{{ lookup('template', 'ceph_storagecluster.yaml.j2') }}"
- name: "Wait for OCS CR instances to be Ready"
- k8s_facts:
+ k8s_info:
api_version: "{{item.api_version}}"
kind: "{{item.kind}}"
name: "{{item.name}}"
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-codeready.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-codeready.yaml
index 29618dc87d2..11c94176228 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-codeready.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-codeready.yaml
@@ -13,7 +13,7 @@
# wait for CRD to be a thing
- name: Wait for CodeReady CRD to be ready
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: checlusters.org.eclipse.che
@@ -44,7 +44,7 @@
delay: "15"
- name: Get sso secrets
- k8s_facts:
+ k8s_info:
kind: Secret
namespace: labs-infra
name: che-identity-secret
@@ -77,7 +77,7 @@
until: cmd_res.rc == 0
- name: get keycloak pod
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: labs-infra
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-dm.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-dm.yaml
index 1018cbce784..7194ae5c465 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-dm.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-dm.yaml
@@ -18,7 +18,7 @@
# Wait for CRD
- name: Wait for Decision Manager CRD to be ready
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: kieapps.app.kiegroup.org
@@ -28,7 +28,7 @@
until: r_dm_crd.resources | list | length == 1
- name: Get global pull secrets from openshift-config
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Secret
namespace: openshift-config
@@ -66,7 +66,7 @@
state: absent
- name: Get existing rhdm-kieserver-rhel8
- k8s_facts:
+ k8s_info:
api_version: image.openshift.io/v1
kind: ImageStream
name: rhdm-kieserver-rhel8
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-guides.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-guides.yaml
index cae7eb3eabf..cbd53fe89e1 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-guides.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-guides.yaml
@@ -1,6 +1,6 @@
---
- name: search for guide {{ guide }}
- k8s_facts:
+ k8s_info:
kind: Deployment
name: guides-{{ guide }}
namespace: labs-infra
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-mon.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-mon.yaml
index 8b75eacbcc5..d5f01cd2c2d 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-mon.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-mon.yaml
@@ -34,7 +34,7 @@
- prom_subscription.yaml
- name: "Wait for prometheus operator to be Succeeded"
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: prometheusoperator.0.37.0
@@ -45,7 +45,7 @@
delay: 10
- name: Get prometheus operator group info
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1
kind: OperatorGroup
name: labs-prometheus-operator-group
@@ -53,7 +53,7 @@
register: check_namespace
- name: Wait for prometheus operator group to appear
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1
kind: OperatorGroup
name: labs-prometheus-operator-group
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-pipelines.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-pipelines.yaml
index ae9bafadf1a..441d36cf8d2 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-pipelines.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-pipelines.yaml
@@ -1,6 +1,6 @@
---
- name: Get all pipeline runs for {{user}} in {{ns}}
- k8s_facts:
+ k8s_info:
api_version: tekton.dev/v1alpha1
kind: PipelineRun
namespace: "{{ns}}"
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/install-username-distribution.yaml b/ansible/roles/ocp4-workload-mlops/tasks/install-username-distribution.yaml
index d58572691e0..2ee36b7e347 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/install-username-distribution.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/install-username-distribution.yaml
@@ -1,6 +1,6 @@
---
- name: search for username distribution tool
- k8s_facts:
+ k8s_info:
kind: Deployment
name: get-a-username
namespace: labs-infra
@@ -40,7 +40,7 @@
- name: wait for redis to be ready
when: r_gau_dc.resources | list | length == 0
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: labs-infra
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-mlops/tasks/pre_workload.yml
index dfea2c18421..3baee2c2492 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/pre_workload.yml
@@ -11,14 +11,14 @@
loop: "{{ range(1,((num_users | int) + 1)) | list }}"
- name: Get API server URL
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Infrastructure
name: cluster
register: r_api_url
- name: Get Web Console route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
namespace: openshift-console
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/preload-images.yaml b/ansible/roles/ocp4-workload-mlops/tasks/preload-images.yaml
index 0b2bd729706..22c96954aa9 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/preload-images.yaml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/preload-images.yaml
@@ -32,7 +32,7 @@
label: "{{image.name}}"
- name: Wait till all pods are deployed
- k8s_facts:
+ k8s_info:
api_version: apps/v1
kind: DaemonSet
name: "{{image.name}}"
diff --git a/ansible/roles/ocp4-workload-mlops/tasks/workload.yml b/ansible/roles/ocp4-workload-mlops/tasks/workload.yml
index a92178d38f3..dfa5d1525d8 100644
--- a/ansible/roles/ocp4-workload-mlops/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-mlops/tasks/workload.yml
@@ -114,7 +114,7 @@
# Install CodeReady Workspaces
- name: see if codeready is installed
- k8s_facts:
+ k8s_info:
api_version: org.eclipse.che/v1
kind: CheCluster
name: codeready-workspaces
@@ -131,7 +131,7 @@
# Install AMQ Streams
- name: Check if AMQ Streams is installed
- k8s_facts:
+ k8s_info:
api_version: kafka.strimzi.io/v1beta1
kind: Kafka
name: amq-streams
diff --git a/ansible/roles/ocp4-workload-nexus-operator/tasks/workload.yml b/ansible/roles/ocp4-workload-nexus-operator/tasks/workload.yml
index 7af29d263c7..a2c1eaadd6b 100644
--- a/ansible/roles/ocp4-workload-nexus-operator/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-nexus-operator/tasks/workload.yml
@@ -28,7 +28,7 @@
- ./templates/operator.j2
- name: Wait for Nexus operator Pod to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "{{ ocp4_workload_nexus_operator.project }}"
@@ -51,7 +51,7 @@
definition: "{{ lookup('template', './templates/nexus.j2' ) | from_yaml }}"
- name: Wait for Nexus Pod to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "{{ ocp4_workload_nexus_operator.project }}"
diff --git a/ansible/roles/ocp4-workload-open-data-hub-infra/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-open-data-hub-infra/tasks/remove_workload.yml
index ac05901eb30..a7223c25805 100644
--- a/ansible/roles/ocp4-workload-open-data-hub-infra/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-open-data-hub-infra/tasks/remove_workload.yml
@@ -33,7 +33,7 @@
ignore_errors: yes
- name: Ensure rook-ceph cluster is done removing if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: ceph.rook.io/v1
kind: CephCluster
name: rook-ceph
@@ -89,7 +89,7 @@
name: rook-ceph
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-open-data-hub-student/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-open-data-hub-student/tasks/remove_workload.yml
index c90d4ba5773..1763b8ccc97 100644
--- a/ansible/roles/ocp4-workload-open-data-hub-student/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-open-data-hub-student/tasks/remove_workload.yml
@@ -83,7 +83,7 @@
merge_type: merge
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "open-data-hub-{{ ocp_username }}"
diff --git a/ansible/roles/ocp4-workload-open-data-hub-student/tasks/workload.yml b/ansible/roles/ocp4-workload-open-data-hub-student/tasks/workload.yml
index 011111fc680..a95a9b27df4 100644
--- a/ansible/roles/ocp4-workload-open-data-hub-student/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-open-data-hub-student/tasks/workload.yml
@@ -72,7 +72,7 @@
#
### obtain secrets for each user
#- name: "new-obtain {{ ocp_username }} secrets"
-# k8s_facts:
+# k8s_info:
# name: "rook-ceph-object-user-my-store-{{ ocp_username }}"
# namespace: rook-ceph
# kind: Secret
@@ -216,7 +216,7 @@
# var: result
#
#- name: get route for jupyterhub
-# k8s_facts:
+# k8s_info:
# kind: Route
# name: jupyterhub
# namespace: "open-data-hub-{{ ocp_username }}"
diff --git a/ansible/roles/ocp4-workload-open-data-hub/tasks/per_user_workload.yml b/ansible/roles/ocp4-workload-open-data-hub/tasks/per_user_workload.yml
index ae3b9468890..e067a7a7104 100644
--- a/ansible/roles/ocp4-workload-open-data-hub/tasks/per_user_workload.yml
+++ b/ansible/roles/ocp4-workload-open-data-hub/tasks/per_user_workload.yml
@@ -19,7 +19,7 @@
## obtain secrets for each user
- name: "new-obtain {{ item }} secrets"
- k8s_facts:
+ k8s_info:
name: "rook-ceph-object-user-my-store-{{ item }}"
namespace: rook-ceph
kind: Secret
@@ -106,7 +106,7 @@
var: result
- name: get route for jupyterhub
- k8s_facts:
+ k8s_info:
kind: Route
name: jupyterhub
namespace: "open-data-hub-{{ item }}"
diff --git a/ansible/roles/ocp4-workload-open-data-hub/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-open-data-hub/tasks/remove_workload.yml
index 5d9758726e6..da63effafe0 100644
--- a/ansible/roles/ocp4-workload-open-data-hub/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-open-data-hub/tasks/remove_workload.yml
@@ -122,7 +122,7 @@
ignore_errors: true
- name: Ensure rook-ceph cluster is done removing if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: ceph.rook.io/v1
kind: CephCluster
name: rook-ceph
@@ -178,7 +178,7 @@
name: rook-ceph
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
@@ -192,7 +192,7 @@
- rook-ceph-system
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "open-data-hub-{{ item }}"
diff --git a/ansible/roles/ocp4-workload-pipelines/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-pipelines/tasks/remove_workload.yml
index aa4da5ab4c5..1c2272f0fff 100644
--- a/ansible/roles/ocp4-workload-pipelines/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-pipelines/tasks/remove_workload.yml
@@ -17,7 +17,7 @@
name: cluster
- name: Wait until all OpenShift pipelines pods have been removed
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: openshift-pipelines
@@ -27,7 +27,7 @@
until: r_pipelines_pods.resources | length == 0
- name: Get Installed CSV
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: openshift-pipelines-operator
@@ -79,7 +79,7 @@
- config.operator.tekton.dev
- name: Find InstallPlans
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
diff --git a/ansible/roles/ocp4-workload-pipelines/tasks/workload.yml b/ansible/roles/ocp4-workload-pipelines/tasks/workload.yml
index 6ce451a035d..49319e4bc2a 100644
--- a/ansible/roles/ocp4-workload-pipelines/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-pipelines/tasks/workload.yml
@@ -46,7 +46,7 @@
when: not ocp4_workload_pipelines.automatic_install_plan_approval
block:
- name: Wait until InstallPlan is created
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
@@ -69,7 +69,7 @@
definition: "{{ lookup( 'template', './templates/installplan.j2' ) }}"
- name: Get Installed CSV
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: openshift-pipelines-operator
@@ -82,7 +82,7 @@
- r_subscription.resources[0].status.currentCSV | length > 0
- name: Wait until CSV is Installed
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ r_subscription.resources[0].status.currentCSV }}"
@@ -96,7 +96,7 @@
- r_csv.resources[0].status.phase == "Succeeded"
- name: Wait until Pipelines Pods are ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "openshift-pipelines"
diff --git a/ansible/roles/ocp4-workload-quarkus-workshop/files/devspaces_cr.yaml b/ansible/roles/ocp4-workload-quarkus-workshop/files/devspaces_cr.yaml
index 4f21c845a54..b6f9124858b 100644
--- a/ansible/roles/ocp4-workload-quarkus-workshop/files/devspaces_cr.yaml
+++ b/ansible/roles/ocp4-workload-quarkus-workshop/files/devspaces_cr.yaml
@@ -25,7 +25,7 @@ spec:
imagePuller:
enable: true
spec:
- images: quarkus-stack-3-5=quay.io/openshiftlabs/quarkus-workshop-stack:3.5;vscode=registry.redhat.io/devspaces/code-rhel8:3.5;project-cloner=registry.redhat.io/devworkspace/devworkspace-project-clone-rhel8:0.19
+ images: quarkus-stack-3-7=quay.io/openshiftlabs/quarkus-workshop-stack:3.7;vscode=registry.redhat.io/devspaces/code-rhel8:3.7;project-cloner=registry.redhat.io/devworkspace/devworkspace-project-clone-rhel8:0.21
containerRegistry: {}
devEnvironments:
secondsOfRunBeforeIdling: -1
diff --git a/ansible/roles/ocp4-workload-quay-operator/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-quay-operator/tasks/remove_workload.yml
index 7cdb28b5c13..ab692ddd9c7 100644
--- a/ansible/roles/ocp4-workload-quay-operator/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-quay-operator/tasks/remove_workload.yml
@@ -8,7 +8,7 @@
}}
- name: Get ClusterVersion
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: ClusterVersion
name: version
@@ -28,7 +28,7 @@
- ./templates/quay.j2
- name: Wait for all Quay Pods to be terminated
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ ocp4_workload_quay_operator.project }}"
diff --git a/ansible/roles/ocp4-workload-quay-operator/tasks/workload.yml b/ansible/roles/ocp4-workload-quay-operator/tasks/workload.yml
index a45277b677e..2ba73e3da64 100644
--- a/ansible/roles/ocp4-workload-quay-operator/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-quay-operator/tasks/workload.yml
@@ -16,7 +16,7 @@
msg: "Setting up workload for user ocp_username = {{ ocp_username }}"
- name: Get ClusterVersion
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: ClusterVersion
name: version
@@ -92,7 +92,7 @@
ocp4_workload_quay_operator_ssl_certificate: "{{ _quay_ssl_cert_file['content'] }}"
- name: Determine Cluster Base Domain for Quay Route
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Ingress
name: cluster
@@ -134,7 +134,7 @@
- ./templates/quay_ssl_certificate_secret.j2
- name: Wait for ClusterServiceVersion to appear
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
namespace: "{{ ocp4_workload_quay_operator.project }}"
@@ -145,7 +145,7 @@
delay: 10
- name: Wait for Quay operator to be ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Deployment
namespace: "{{ ocp4_workload_quay_operator.project }}"
@@ -169,7 +169,7 @@
when: ocp4_workload_quay_operator.verify_deployment | bool
block:
- name: Wait for Quay App Pod to appear
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ ocp4_workload_quay_operator.project }}"
@@ -186,7 +186,7 @@
seconds: 10
- name: Wait for Quay App Pod Status to be Ready
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ ocp4_workload_quay_operator.project }}"
@@ -213,7 +213,7 @@
# namespace: "{{ ocp4_workload_quay_operator.project }}"
- name: Get Quay Hostname
- k8s_facts:
+ k8s_info:
api_version: redhatcop.redhat.io/v1alpha1
kind: QuayEcosystem
name: "{{ ocp4_workload_quay_operator.name }}"
diff --git a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/remove_workload.yml
index cbdfd3c52f2..a0e732313bf 100644
--- a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/remove_workload.yml
@@ -26,7 +26,7 @@
ignore_errors: yes
- name: Ensure rook-ceph cluster is done removing if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: ceph.rook.io/v1
kind: CephCluster
name: rook-ceph
@@ -81,7 +81,7 @@
name: rook-ceph
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/workload.yml b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/workload.yml
index d714d4ffebe..e8cbe104ae3 100644
--- a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_infra/tasks/workload.yml
@@ -71,7 +71,7 @@
delay: 60
- name: Get Rook Ceph RGW Service
- k8s_facts:
+ k8s_info:
kind: Service
namespace: rook-ceph
name: rook-ceph-rgw-my-store
diff --git a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_operator_workload.yml b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_operator_workload.yml
index 1574a2e0018..fa05cd2b780 100644
--- a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_operator_workload.yml
@@ -6,7 +6,7 @@
project_name: "opendatahub-{{ user_name }}"
- name: "Wait for Open Data Hub ClusterServiceVersion to finish installing in {{ project_name }}"
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
namespace: "{{ project_name }}"
@@ -22,7 +22,7 @@
delay: 10
- name: "Wait for Open Data Hub operator to finish deploying in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: Pod
namespace: "{{ project_name }}"
label_selectors:
diff --git a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_pre_operator_workload.yml b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_pre_operator_workload.yml
index d940b96e971..0f69a821947 100644
--- a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_pre_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_pre_operator_workload.yml
@@ -31,7 +31,7 @@
## obtain secrets for each user
- name: Get Ceph Access and Secret Key
- k8s_facts:
+ k8s_info:
name: "rook-ceph-object-user-my-store-{{ user_name }}"
namespace: rook-ceph
kind: Secret
@@ -63,7 +63,7 @@
api_version: project.openshift.io/v1
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ project_name }}"
@@ -140,7 +140,7 @@
####################################################################################################
- name: "Get the limitranges in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: LimitRange
namespace: "{{ project_name }}"
register: limit_ranges
diff --git a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_remove_workload.yml b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_remove_workload.yml
index 2bcc8cdaf49..768eafbb4bc 100644
--- a/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_remove_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-analytics_data_ocp_workshop_s2020/tasks/per_user_remove_workload.yml
@@ -30,7 +30,7 @@
- "{{ project_name }}"
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-rhte-keynote-ai-infra/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-rhte-keynote-ai-infra/tasks/remove_workload.yml
index cbdfd3c52f2..a0e732313bf 100644
--- a/ansible/roles/ocp4-workload-rhte-keynote-ai-infra/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-keynote-ai-infra/tasks/remove_workload.yml
@@ -26,7 +26,7 @@
ignore_errors: yes
- name: Ensure rook-ceph cluster is done removing if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: ceph.rook.io/v1
kind: CephCluster
name: rook-ceph
@@ -81,7 +81,7 @@
name: rook-ceph
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_operator_workload.yml b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_operator_workload.yml
index 7a67dc2b6fc..7f8601c1d65 100644
--- a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_operator_workload.yml
@@ -5,7 +5,7 @@
project_name: "opendatahub-{{ user_name }}"
- name: "Wait for Open Data Hub ClusterServiceVersion to finish installing in {{ project_name }}"
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
namespace: "{{ project_name }}"
@@ -18,7 +18,7 @@
delay: 10
- name: "Wait for Open Data Hub operator to finish deploying in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: Pod
namespace: "{{ project_name }}"
label_selectors:
diff --git a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_pre_operator_workload.yml b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_pre_operator_workload.yml
index 69b9b0d4a57..ecc72292953 100644
--- a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_pre_operator_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_pre_operator_workload.yml
@@ -30,7 +30,7 @@
api_version: project.openshift.io/v1
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ project_name }}"
@@ -107,7 +107,7 @@
####################################################################################################
- name: "Get the limitranges in {{ project_name }}"
- k8s_facts:
+ k8s_info:
kind: LimitRange
namespace: "{{ project_name }}"
register: limit_ranges
diff --git a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_remove_workload.yml b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_remove_workload.yml
index a70ac06f55e..aca76933f6b 100644
--- a/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_remove_workload.yml
+++ b/ansible/roles/ocp4-workload-rhte-keynote-ai-odh-setup/tasks/per_user_remove_workload.yml
@@ -29,7 +29,7 @@
- "{{ project_name }}"
- name: Ensure project is done terminating if it was being terminated
- k8s_facts:
+ k8s_info:
api_version: project.openshift.io/v1
kind: Project
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-serverless/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-serverless/tasks/remove_workload.yml
index 298b2ea9f02..f9494ceee9e 100644
--- a/ansible/roles/ocp4-workload-serverless/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-serverless/tasks/remove_workload.yml
@@ -18,7 +18,7 @@
namespace: knative-serving
- name: Wait until all KNative Serving pods have been removed
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: knative-serving
@@ -28,7 +28,7 @@
until: r_knative_pods.resources | length == 0
- name: Get Installed CSV
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: serverless-operator
@@ -71,7 +71,7 @@
- /etc/bash_completion.d/kn
- name: Find InstallPlan
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
diff --git a/ansible/roles/ocp4-workload-serverless/tasks/workload.yml b/ansible/roles/ocp4-workload-serverless/tasks/workload.yml
index deda9a94b4f..950bd20386c 100644
--- a/ansible/roles/ocp4-workload-serverless/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-serverless/tasks/workload.yml
@@ -44,7 +44,7 @@
when: not ocp4_workload_serverless.automatic_install_plan_approval
block:
- name: Wait until InstallPlan is created
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
@@ -67,7 +67,7 @@
definition: "{{ lookup( 'template', './templates/installplan.j2' ) }}"
- name: Get Installed CSV
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: Subscription
name: serverless-operator
@@ -80,7 +80,7 @@
- r_subscription.resources[0].status.currentCSV | length > 0
- name: Wait until CSV is Installed
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ r_subscription.resources[0].status.currentCSV }}"
@@ -103,7 +103,7 @@
- name: Wait until KNative Serving installation is complete
when: ocp4_workload_serverless.wait_for_deploy | bool
- k8s_facts:
+ k8s_info:
api_version: operator.knative.dev/v1alpha1
kind: KnativeServing
name: knative-serving
diff --git a/ansible/roles/ocp4-workload-servicemesh/tasks/remove_workload.yml b/ansible/roles/ocp4-workload-servicemesh/tasks/remove_workload.yml
index 34cdc2b4121..0dc2eea5644 100644
--- a/ansible/roles/ocp4-workload-servicemesh/tasks/remove_workload.yml
+++ b/ansible/roles/ocp4-workload-servicemesh/tasks/remove_workload.yml
@@ -10,7 +10,7 @@
}}
- name: Find all Service Mesh Member Rolls
- k8s_facts:
+ k8s_info:
api_version: maistra.io/v1
kind: ServiceMeshMemberRoll
register: r_smmr
@@ -23,7 +23,7 @@
loop: "{{ r_smmr.resources }}"
- name: Find all Service Mesh Control Planes
- k8s_facts:
+ k8s_info:
api_version: maistra.io/v1
kind: ServiceMeshControlPlane
register: r_smcp
@@ -36,14 +36,14 @@
loop: "{{ r_smcp.resources }}"
- name: Wait until all Service Mesh Control Planes have disappeared
- k8s_facts:
+ k8s_info:
api_version: maistra.io/v1
kind: ServiceMeshControlPlane
register: r_smcp
until: r_smcp.resources | length == 0
- name: Get all InstallPlans
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
diff --git a/ansible/roles/ocp4-workload-servicemesh/tasks/workload.yml b/ansible/roles/ocp4-workload-servicemesh/tasks/workload.yml
index 13e2dbfd77c..a93c55a7612 100644
--- a/ansible/roles/ocp4-workload-servicemesh/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-servicemesh/tasks/workload.yml
@@ -23,7 +23,7 @@
definition: "{{ lookup('template', './templates/subscription.j2' ) }}"
- name: Wait until InstallPlan is created
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: InstallPlan
namespace: openshift-operators
@@ -56,7 +56,7 @@
[?starts_with(spec.clusterServiceVersionNames[0], 'servicemeshoperator')].spec.clusterServiceVersionNames
- name: Wait until all CSVs are Succeeded
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ item }}"
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_cr.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/codeready_cr.yaml
deleted file mode 100644
index d1a036d9122..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_cr.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-apiVersion: org.eclipse.che/v1
-kind: CheCluster
-metadata:
- name: codeready-workspaces
- namespace: codeready
-spec:
- auth:
- identityProviderURL: ''
- identityProviderRealm: ''
- oAuthSecret: ''
- identityProviderPassword: 'admin'
- oAuthClientName: ''
- initialOpenShiftOAuthUser: true
- identityProviderClientId: ''
- identityProviderAdminUserName: 'admin'
- externalIdentityProvider: false
- openShiftoAuth: false
- database:
- chePostgresUser: ''
- externalDb: false
- chePostgresHostName: ''
- chePostgresPassword: ''
- chePostgresDb: ''
- chePostgresPort: ''
- devWorkspace:
- enable: false
- metrics:
- enable: true
- server:
- proxyURL: ''
- cheClusterRoles: ''
- proxyPassword: ''
- nonProxyHosts: ''
- proxyPort: ''
- tlsSupport: true
- selfSignedCert: false
- allowUserDefinedWorkspaceNamespaces: false
- serverTrustStoreConfigMapName: ''
- proxyUser: ''
- cheWorkspaceClusterRole: ''
- workspaceNamespaceDefault: -codeready
- serverExposureStrategy: ''
- gitSelfSignedCert: false
- useInternalClusterSVCNames: true
- cheFlavor: codeready
- serverMemoryRequest: '2Gi'
- serverMemoryLimit: '6Gi'
- customCheProperties:
- CHE_LIMITS_WORKSPACE_IDLE_TIMEOUT: "0"
- storage:
- postgresPVCStorageClassName: ''
- preCreateSubPaths: true
- pvcClaimSize: 1Gi
- pvcStrategy: common
- workspacePVCStorageClassName: ''
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_operatorgroup.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/codeready_operatorgroup.yaml
deleted file mode 100644
index d7c4d0a233b..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_operatorgroup.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-apiVersion: operators.coreos.com/v1
-kind: OperatorGroup
-metadata:
- generateName: codeready-
- annotations:
- olm.providedAPIs: CheCluster.v1.org.eclipse.che
- name: codeready-operator-group
- namespace: codeready
-spec:
- targetNamespaces:
- - codeready
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/devspaces_cr.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/devspaces_cr.yaml
new file mode 100644
index 00000000000..a2eb26aa745
--- /dev/null
+++ b/ansible/roles/ocp4-workload-sso-workshop/files/devspaces_cr.yaml
@@ -0,0 +1,39 @@
+---
+# yamllint disable rule:line-length
+apiVersion: org.eclipse.che/v2
+kind: CheCluster
+metadata:
+ name: devspaces
+ namespace: openshift-operators
+ annotations:
+ che.eclipse.org/checluster-defaults-cleanup: '{"spec.components.pluginRegistry.openVSXURL":"true"}'
+spec:
+ components:
+ cheServer:
+ debug: false
+ logLevel: INFO
+ database:
+ credentialsSecretName: postgres-credentials
+ externalDb: false
+ postgresDb: dbche
+ postgresHostName: postgres
+ postgresPort: '5432'
+ pvc:
+ claimSize: 1Gi
+ metrics:
+ enable: true
+ pluginRegistry: {openVSXURL: 'https://open-vsx.org'}
+ imagePuller:
+ enable: true
+ spec:
+ images: quarkus-stack-3-5=quay.io/openshiftlabs/quarkus-workshop-stack:3.5;vscode=registry.redhat.io/devspaces/code-rhel8:3.5;project-cloner=registry.redhat.io/devworkspace/devworkspace-project-clone-rhel8:0.19
+ containerRegistry: {}
+ devEnvironments:
+ secondsOfRunBeforeIdling: -1
+ defaultNamespace:
+ template: -devspaces
+ secondsOfInactivityBeforeIdling: -1
+ storage:
+ pvcStrategy: per-user
+ networking: {}
+# yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_subscription.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/devspaces_subscription.yaml
similarity index 54%
rename from ansible/roles/ocp4-workload-sso-workshop/files/codeready_subscription.yaml
rename to ansible/roles/ocp4-workload-sso-workshop/files/devspaces_subscription.yaml
index d8adfdda35a..b47c8630e93 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/files/codeready_subscription.yaml
+++ b/ansible/roles/ocp4-workload-sso-workshop/files/devspaces_subscription.yaml
@@ -2,11 +2,13 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
- name: codeready-workspaces
- namespace: codeready
+ labels:
+ operators.coreos.com/devspaces.openshift-operators: ''
+ name: devspaces
+ namespace: openshift-operators
spec:
- channel: latest
+ channel: stable
installPlanApproval: Automatic
- name: codeready-workspaces
+ name: devspaces
source: redhat-operators-index
sourceNamespace: openshift-marketplace
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/kubernetes-imagepuller-operator_subscription.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/kubernetes-imagepuller-operator_subscription.yaml
new file mode 100644
index 00000000000..19ab007ced0
--- /dev/null
+++ b/ansible/roles/ocp4-workload-sso-workshop/files/kubernetes-imagepuller-operator_subscription.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ labels:
+ operators.coreos.com/kubernetes-imagepuller-operator.openshift-operators: ''
+ name: kubernetes-imagepuller-operator
+ namespace: openshift-operators
+spec:
+ channel: stable
+ installPlanApproval: Automatic
+ name: kubernetes-imagepuller-operator
+ source: community-operators-index
+ sourceNamespace: openshift-marketplace
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/project-request-custom.yaml b/ansible/roles/ocp4-workload-sso-workshop/files/project-request-custom.yaml
new file mode 100644
index 00000000000..aa913f8f3df
--- /dev/null
+++ b/ansible/roles/ocp4-workload-sso-workshop/files/project-request-custom.yaml
@@ -0,0 +1,129 @@
+---
+kind: Template
+apiVersion: template.openshift.io/v1
+metadata:
+ name: project-request
+ namespace: openshift-config
+objects:
+ - apiVersion: v1
+ kind: LimitRange
+ metadata:
+ name: '${PROJECT_NAME}-core-resource-limits'
+ namespace: '${PROJECT_NAME}'
+ spec:
+ limits:
+ - type: Container
+ max:
+ cpu: 4
+ memory: 12Gi
+ default:
+ cpu: 500m
+ memory: 1.5Gi
+ defaultRequest:
+ cpu: 50m
+ memory: 256Mi
+ - type: Pod
+ max:
+ cpu: 4
+ memory: 12Gi
+ - kind: NetworkPolicy
+ apiVersion: networking.k8s.io/v1
+ metadata:
+ name: allow-from-all-namespaces
+ spec:
+ podSelector: {}
+ ingress:
+ - from:
+ - namespaceSelector: {}
+ - apiVersion: networking.k8s.io/v1
+ kind: NetworkPolicy
+ metadata:
+ name: allow-from-ingress-namespace
+ spec:
+ podSelector: null
+ ingress:
+ - from:
+ - namespaceSelector:
+ matchLabels:
+ network-policy: global
+ - apiVersion: project.openshift.io/v1
+ kind: Project
+ metadata:
+ annotations:
+ openshift.io/description: '${PROJECT_DESCRIPTION}'
+ openshift.io/display-name: '${PROJECT_DISPLAYNAME}'
+ openshift.io/requester: '${PROJECT_REQUESTING_USER}'
+ name: '${PROJECT_NAME}'
+ spec: {}
+ status: {}
+ - apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ annotations:
+ openshift.io/description: >-
+ Allows all pods in this namespace to pull images from this namespace.
+ It is auto-managed by a controller; remove subjects to disable.
+ name: 'system:image-pullers'
+ namespace: '${PROJECT_NAME}'
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'system:image-puller'
+ subjects:
+ - apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: 'system:serviceaccounts:${PROJECT_NAME}'
+ - apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ annotations:
+ openshift.io/description: >-
+ Allows builds in this namespace to push images to this namespace. It
+ is auto-managed by a controller; remove subjects to disable.
+ name: 'system:image-builders'
+ namespace: '${PROJECT_NAME}'
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'system:image-builder'
+ subjects:
+ - kind: ServiceAccount
+ name: builder
+ namespace: '${PROJECT_NAME}'
+ - apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ annotations:
+ openshift.io/description: >-
+ Allows deploymentconfigs in this namespace to rollout pods in this
+ namespace. It is auto-managed by a controller; remove subjects to
+ disable.
+ name: 'system:deployers'
+ namespace: '${PROJECT_NAME}'
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: 'system:deployer'
+ subjects:
+ - kind: ServiceAccount
+ name: deployer
+ namespace: '${PROJECT_NAME}'
+ - apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: admin
+ namespace: '${PROJECT_NAME}'
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: admin
+ subjects:
+ - apiGroup: rbac.authorization.k8s.io
+ kind: User
+ name: '${PROJECT_ADMIN_USER}'
+parameters:
+ - name: PROJECT_NAME
+ - name: PROJECT_DISPLAYNAME
+ - name: PROJECT_DESCRIPTION
+ - name: PROJECT_ADMIN_USER
+ - name: PROJECT_REQUESTING_USER
diff --git a/ansible/roles/ocp4-workload-sso-workshop/files/stack.Dockerfile b/ansible/roles/ocp4-workload-sso-workshop/files/stack.Dockerfile
index feef9f28299..8f7f80fddb0 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/files/stack.Dockerfile
+++ b/ansible/roles/ocp4-workload-sso-workshop/files/stack.Dockerfile
@@ -1,31 +1,49 @@
# To build this stack:
-# docker build -t quay.io/sshaaf/sso-workshop-stack:VVV -f stack.Dockerfile .
-# docker push quay.io/sshaaf/sso-workshop-stack:VVVV
+# docker build -t quay.io/username/quarkus-workshop-stack:VVV -f stack.Dockerfile .
+# docker push quay.io/username/quarkus-workshop-stack:VVVV
+# macOS M1: --platform linux/x86_64
-FROM registry.redhat.io/codeready-workspaces/plugin-java11-rhel8:latest
+FROM registry.redhat.io/devspaces/udi-rhel8:latest
-ENV OC_VERSION=4.10
+ENV MANDREL_VERSION=22.3.1.0-Final
+ENV QUARKUS_VERSION=2.13.7.Final-redhat-00003
+ENV OC_VERSION=4.12
ENV MVN_VERSION=3.8.4
+ENV GRAALVM_HOME="/usr/local/mandrel-java17-${MANDREL_VERSION}"
ENV PATH="/usr/local/maven/apache-maven-${MVN_VERSION}/bin:${PATH}"
+ENV JAVA_HOME=$JAVA_HOME_17
USER root
RUN wget -O /tmp/mvn.tar.gz https://archive.apache.org/dist/maven/maven-3/${MVN_VERSION}/binaries/apache-maven-${MVN_VERSION}-bin.tar.gz && sudo tar -xvzf /tmp/mvn.tar.gz && rm -rf /tmp/mvn.tar.gz && mkdir /usr/local/maven && mv apache-maven-${MVN_VERSION}/ /usr/local/maven/ && alternatives --install /usr/bin/mvn mvn /usr/local/maven/apache-maven-${MVN_VERSION}/bin/mvn 1
-RUN wget -O /tmp/oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OC_VERSION}.4/openshift-client-linux-${OC_VERSION}.4.tar.gz && cd /usr/bin && sudo tar -xvzf /tmp/oc.tar.gz && sudo chmod a+x /usr/bin/oc && rm -f /tmp/oc.tar.gz
+RUN wget -O /tmp/oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OC_VERSION}.3/openshift-client-linux-${OC_VERSION}.3.tar.gz && cd /usr/bin && sudo tar -xvzf /tmp/oc.tar.gz && sudo chmod a+x /usr/bin/oc && rm -f /tmp/oc.tar.gz
RUN sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && sudo microdnf install -y zlib-devel gcc siege gcc-c++ && sudo curl -Lo /usr/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && sudo chmod a+x /usr/bin/jq
-USER jboss
+RUN wget -O /tmp/mandrel.tar.gz https://github.com/graalvm/mandrel/releases/download/mandrel-${MANDREL_VERSION}/mandrel-java17-linux-amd64-${MANDREL_VERSION}.tar.gz && cd /usr/local && sudo tar -xvzf /tmp/mandrel.tar.gz && rm -rf /tmp/mandrel.tar.gz
-RUN mkdir /home/jboss/.m2
+RUN ln -f -s /usr/lib/jvm/java-17-openjdk/* ${HOME}/.java/current
-COPY settings.xml /home/jboss/.m2
+USER user
+
+RUN mkdir -p /home/user/.m2
+
+COPY settings.xml /home/user/.m2
+
+RUN cd /tmp && mkdir project && cd project && mvn com.redhat.quarkus.platform:quarkus-maven-plugin:${QUARKUS_VERSION}:create -DprojectGroupId=org.acme -DprojectArtifactId=footest -DplatformGroupId=com.redhat.quarkus.platform -DplatformVersion=${QUARKUS_VERSION} -Dextensions="quarkus-resteasy-reactive,quarkus-resteasy-reactive-jackson,quarkus-agroal,quarkus-hibernate-orm,quarkus-hibernate-orm-panache,quarkus-hibernate-reactive-panache,quarkus-jdbc-h2,quarkus-jdbc-postgresql,quarkus-kubernetes,quarkus-scheduler,quarkus-smallrye-fault-tolerance,quarkus-smallrye-health,quarkus-smallrye-opentracing" && mvn -f footest clean compile package -DskipTests && cd / && rm -rf /tmp/project
+
+RUN cd /tmp && mkdir project && cd project && mvn com.redhat.quarkus.platform:quarkus-maven-plugin:${QUARKUS_VERSION}:create -DprojectGroupId=org.acme -DprojectArtifactId=footest -DplatformGroupId=com.redhat.quarkus.platform -DplatformVersion=${QUARKUS_VERSION} -Dextensions="quarkus-smallrye-reactive-messaging,quarkus-smallrye-reactive-messaging-kafka,quarkus-vertx,quarkus-kafka-client,quarkus-micrometer-registry-prometheus,quarkus-smallrye-openapi,quarkus-qute,quarkus-resteasy-reactive-qute,quarkus-opentelemetry,quarkus-opentelemetry-exporter-jaeger" && mvn -f footest clean compile package -Pnative -DskipTests && cd / && rm -rf /tmp/project
+
+RUN cd /tmp && git clone https://github.com/RedHat-Middleware-Workshops/quarkus-workshop-m3-labs && cd quarkus-workshop-m3-labs && git checkout ocp-${OC_VERSION} && for proj in *-petclinic* ; do mvn -fn -f ./$proj dependency:resolve-plugins dependency:resolve dependency:go-offline clean compile -DskipTests ; done && cd /tmp && rm -rf /tmp/quarkus-workshop-m3-labs
+
+RUN siege && sed -i 's/^connection = close/connection = keep-alive/' $HOME/.siege/siege.conf && sed -i 's/^benchmark = false/benchmark = true/' $HOME/.siege/siege.conf
RUN echo '-w "\n"' > $HOME/.curlrc
USER root
-RUN chown -R jboss /home/jboss/.m2
-RUN chmod -R a+w /home/jboss/.m2
+RUN chown -R user /home/user/.m2
+RUN chmod -R a+w /home/user/.m2
+RUN chmod -R a+rwx /home/user/.siege
-USER jboss
\ No newline at end of file
+USER user
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/add_che_user.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/add_che_user.yaml
deleted file mode 100644
index 880d701030b..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/add_che_user.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-# yamllint disable rule:line-length
-- name: Get codeready SSO admin token
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/realms/master/protocol/openid-connect/token
- validate_certs: false
- method: POST
- body:
- username: "{{ codeready_sso_admin_username }}"
- password: "{{ codeready_sso_admin_password }}"
- grant_type: "password"
- client_id: "admin-cli"
- body_format: form-urlencoded
- status_code: 200,201,204
- register: codeready_sso_admin_token
- # yamllint enable rule:line-length
-
-# yamllint disable rule:line-length
-- name: Add user {{ user }} to Che
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/admin/realms/codeready/users
- validate_certs: false
- method: POST
- headers:
- Content-Type: application/json
- Authorization: "Bearer {{ codeready_sso_admin_token.json.access_token }}"
- body:
- username: "{{ user }}"
- enabled: true
- emailVerified: true
- firstName: "{{ user }}"
- lastName: Developer
- email: "{{ user }}@no-reply.com"
- credentials:
- - type: password
- value: "{{ workshop_che_user_password }}"
- temporary: false
- body_format: json
- status_code: 201,409
- # yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/confirm_che_workspace.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/confirm_che_workspace.yaml
deleted file mode 100644
index 568846ee9a0..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/confirm_che_workspace.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# yamllint disable rule:line-length
-- name: "Get Che {{ user }} token"
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/realms/codeready/protocol/openid-connect/token
- validate_certs: false
- method: POST
- body:
- username: "{{ user }}"
- password: "{{ workshop_che_user_password }}"
- grant_type: "password"
- client_id: "admin-cli"
- body_format: form-urlencoded
- status_code: 200
- register: user_token
- # yamllint enable rule:line-length
-
-- name: Confirm running status of workspace for {{ user }}
- uri:
- url: "https://codeready-codeready.{{ route_subdomain }}/api/workspace"
- validate_certs: false
- method: GET
- headers:
- Accept: application/json
- Authorization: "Bearer {{ user_token.json.access_token }}"
- status_code: 200
- register: workspace_def
-
-# yamllint disable rule:line-length
-- name: "Output warning for {{ user }}"
- agnosticd_user_info:
- msg: "WARNING: Workspace for {{ user }} failed to initialize - you may need to log in as that user and start it manually!"
- when: >-
- workspace_def.json[0].status == "STOPPED" or
- workspace_def.json[0].status == "STOPPING"
- # yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/create_che_workspace.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/create_che_workspace.yaml
deleted file mode 100644
index 0709e7b6a26..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/create_che_workspace.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# yamllint disable rule:line-length
-- name: "Get Che {{ user }} token"
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/realms/codeready/protocol/openid-connect/token
- validate_certs: false
- method: POST
- body:
- username: "{{ user }}"
- password: "{{ workshop_che_user_password }}"
- grant_type: "password"
- client_id: "admin-cli"
- body_format: form-urlencoded
- status_code: 200
- register: user_token
- # yamllint enable rule:line-length
-
-- name: Wait for CRW APIs to be ready
- uri:
- url: "https://codeready-codeready.{{ route_subdomain }}/api/workspace/"
- validate_certs: false
- method: GET
- headers:
- Content-Type: application/json
- Authorization: "Bearer {{ user_token.json.access_token }}"
- register: r_crw_dashboard
- until: r_crw_dashboard.status == 200
- retries: 200
- delay: 15
-
-# yamllint disable rule:line-length
-- name: Create workspace for {{ user }} from devfile
- uri:
- url: "https://codeready-codeready.{{ route_subdomain }}/api/workspace/devfile?start-after-create=true&namespace={{ user }}"
- validate_certs: false
- method: POST
- headers:
- Content-Type: application/json
- Authorization: "Bearer {{ user_token.json.access_token }}"
- body: "{{ lookup('template', './templates/devfile.json.j2') }}"
- body_format: json
- status_code: 201,409
- register: workspace_def
- # yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-codeready.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-codeready.yaml
deleted file mode 100644
index c780489aa5e..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-codeready.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
----
-# create codeready namespace
-- name: create codeready namespace
- k8s:
- state: present
- kind: Project
- api_version: project.openshift.io/v1
- definition:
- metadata:
- name: "codeready"
- annotations:
- openshift.io/description: ""
- openshift.io/display-name: "CodeReady Project"
-
-# deploy codeready operator
-- name: Create operator subscription for CodeReady
- k8s:
- state: present
- merge_type:
- - strategic-merge
- - merge
- definition: "{{ lookup('file', item ) | from_yaml }}"
- loop:
- - ./files/codeready_operatorgroup.yaml
- - ./files/codeready_subscription.yaml
-
-# wait for CRD to be a thing
-- name: Wait for CodeReady CRD to be ready
- k8s_info:
- api_version: apiextensions.k8s.io/v1
- kind: CustomResourceDefinition
- name: checlusters.org.eclipse.che
- register: r_codeready_crd
- retries: 200
- delay: 10
- until: r_codeready_crd.resources | list | length == 1
-
-# deploy codeready CR
-- name: Create CR for CodeReady
- k8s:
- state: present
- merge_type:
- - strategic-merge
- - merge
- definition: "{{ lookup('file', item ) | from_yaml }}"
- loop:
- - ./files/codeready_cr.yaml
-
-# wait for che to be up
-- name: wait for CRW to be running
- uri:
- url: https://codeready-codeready.{{ route_subdomain }}/dashboard/
- validate_certs: false
- register: result
- until: result.status == 200
- retries: "120"
- delay: "15"
-
-- name: Get codeready keycloak deployment
- k8s_info:
- kind: Deployment
- namespace: codeready
- name: keycloak
- register: r_keycloak_deployment
-
-- name: show cr
- debug:
- msg: "existing keycloak deployment: {{ r_keycloak_deployment }}"
-# yamllint disable rule:line-length
-- name: set codeready username fact
- set_fact:
- codeready_sso_admin_username: "{{ r_keycloak_deployment.resources[0].spec.template.spec.containers[0].env | selectattr('name','equalto','SSO_ADMIN_USERNAME') |map (attribute='value') | list | first }}"
- # yamllint enable rule:line-length
-
-# yamllint disable rule:line-length
-- name: set codeready password fact
- set_fact:
- codeready_sso_admin_password: "{{ r_keycloak_deployment.resources[0].spec.template.spec.containers[0].env | selectattr('name','equalto','SSO_ADMIN_PASSWORD') |map (attribute='value') | list | first }}"
- # yamllint enable rule:line-length
-
-- name: show codeready keycloak admin username
- debug:
- msg: "codeready keycloak admin username: {{ codeready_sso_admin_username }}"
-
-- name: show codeready keycloak admin password
- debug:
- msg: "codeready keycloak admin password: {{ codeready_sso_admin_password }}"
-
-- name: create codeready users
- include_tasks: add_che_user.yaml
- vars:
- user: "{{ item }}"
- with_list: "{{ users }}"
-
-# yamllint disable rule:line-length
-- name: Get codeready SSO admin token
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/realms/master/protocol/openid-connect/token
- validate_certs: false
- method: POST
- body:
- username: "{{ codeready_sso_admin_username }}"
- password: "{{ codeready_sso_admin_password }}"
- grant_type: "password"
- client_id: "admin-cli"
- body_format: form-urlencoded
- status_code: 200,201,204
- register: codeready_sso_admin_token
- # yamllint enable rule:line-length
-
-# yamllint disable rule:line-length
-- name: Increase codeready access token lifespans
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/admin/realms/codeready
- validate_certs: false
- method: PUT
- headers:
- Content-Type: application/json
- Authorization: "Bearer {{ codeready_sso_admin_token.json.access_token }}"
- body:
- accessTokenLifespan: 28800
- accessTokenLifespanForImplicitFlow: 28800
- actionTokenGeneratedByUserLifespan: 28800
- ssoSessionIdleTimeout: 28800
- ssoSessionMaxLifespan: 28800
- body_format: json
- status_code: 204
- # yamllint enable rule:line-length
-
-- name: Import stack imagestream
- k8s:
- state: present
- merge_type:
- - strategic-merge
- - merge
- definition: "{{ lookup('file', item ) | from_yaml }}"
- loop:
- - ./files/stack_imagestream.yaml
-
-- name: wait for stack to be a thing
- k8s_info:
- kind: ImageStream
- name: quarkus-stack
- namespace: openshift
- register: r_stack_is
- retries: 200
- delay: 10
- until: r_stack_is.resources | list | length == 1
-
-- name: import stack image
- shell: |
- oc import-image --all quarkus-stack -n openshift
-
-- name: Pre-create and warm user workspaces
- include_tasks: create_che_workspace.yaml
- vars:
- user: "{{ item }}"
- with_list: "{{ users }}"
-
-- name: wait a minute and let the image download and be registered
- when: num_users | int > 0
- pause:
- minutes: 2
-
-- name: Attempt to warm workspaces which failed to start
- include_tasks: verify_che_workspace.yaml
- vars:
- user: "{{ item }}"
- with_list: "{{ users }}"
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-devspaces.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-devspaces.yaml
new file mode 100644
index 00000000000..7931e912a15
--- /dev/null
+++ b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-devspaces.yaml
@@ -0,0 +1,61 @@
+---
+- name: Create operator subscription for Dev Spaces
+ k8s:
+ state: present
+ merge_type:
+ - strategic-merge
+ - merge
+ definition: "{{ lookup('file', item ) | from_yaml }}"
+ loop:
+ - ./files/devspaces_subscription.yaml
+
+- name: Wait for Dev Spaces CRD to be ready
+ k8s_info:
+ api_version: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ name: checlusters.org.eclipse.che
+ register: r_devspaces_crd
+ retries: 200
+ delay: 10
+ until: r_devspaces_crd.resources | list | length == 1
+
+- name: Verify if Dev Spaces Service is accessible
+ k8s_info:
+ api_version: v1
+ kind: Service
+ name: devspaces-operator-service
+ namespace: openshift-operators
+ register: r_devspaces_svc
+ retries: 200
+ delay: 10
+ until: r_devspaces_svc.resources | list | length == 1
+
+- name: Create CR for Dev Spaces
+ kubernetes.core.k8s:
+ merge_type:
+ - merge
+ definition: "{{ lookup('file', 'devspaces_cr.yaml' ) }}"
+ register: r_create_crd
+ until: r_create_crd is successful
+ retries: 30
+ delay: 10
+
+# yamllint disable rule:line-length
+- name: Wait for Dev Spaces Pod to be ready
+ kubernetes.core.k8s_info:
+ api_version: v1
+ kind: Pod
+ label_selectors:
+ - component=devspaces-dashboard
+ namespace: openshift-operators
+ register: r_devspaces_dashboard_pod
+ failed_when:
+ r_devspaces_dashboard_pod.resources[0].status.phase | default('') != 'Running'
+ until: r_devspaces_dashboard_pod is successful
+ delay: 10
+ retries: 200
+# yamllint enable rule:line-length
+
+- name: Pause for 2 minutes for image download
+ ansible.builtin.pause:
+ minutes: 2
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-guides.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-guides.yaml
index e89757c373f..5168d34182b 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-guides.yaml
+++ b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-guides.yaml
@@ -8,11 +8,11 @@
-e OPENSHIFT_USER_PASSWORD='{{ workshop_openshift_user_password }}'
-e MASTER_URL={{ master_url }}
-e CONSOLE_URL={{ console_url }}
- -e CHE_URL=https://codeready-codeready.{{ route_subdomain }}
+ -e CHE_URL=https://devspaces.{{ route_subdomain }}
-e KEYCLOAK_URL=https://keycloak-codeready.{{ route_subdomain }}
-e ROUTE_SUBDOMAIN={{ route_subdomain }}
- -e CONTENT_URL_PREFIX='https://raw.githubusercontent.com/RedHat-Middleware-Workshops/keycloak-workshop-guides/ocp-4.10/docs'
- -e WORKSHOPS_URLS='https://raw.githubusercontent.com/RedHat-Middleware-Workshops/keycloak-workshop-guides/ocp-4.10/docs/{{ workshop_labs_url }}'
+ -e CONTENT_URL_PREFIX='https://raw.githubusercontent.com/RedHat-Middleware-Workshops/keycloak-workshop-guides/ocp-4.12/docs'
+ -e WORKSHOPS_URLS='https://raw.githubusercontent.com/RedHat-Middleware-Workshops/keycloak-workshop-guides/ocp-4.12/docs/{{ workshop_labs_url }}'
-e LOG_TO_STDOUT=true
# yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-username-distribution.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-username-distribution.yaml
index 32f602723f6..feac9da9fe2 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/install-username-distribution.yaml
+++ b/ansible/roles/ocp4-workload-sso-workshop/tasks/install-username-distribution.yaml
@@ -49,7 +49,7 @@
-e LAB_USER_PAD_ZERO=false
-e LAB_ADMIN_PASS={{ workshop_openshift_user_password }}
-e LAB_MODULE_URLS={{ ('http://web-guides.' + route_subdomain + '/workshop/sso-workshop/lab/preface?userid=%USERNAME%;Getting Started with Single Sign-on Hands-on Lab') | quote }}
- -e LAB_EXTRA_URLS={{ ( console_url + ';OpenShift Console,https://codeready-codeready.' + route_subdomain + ';CodeReady Workspaces Console' ) | quote }}
+ -e LAB_EXTRA_URLS={{ ( console_url + ';OpenShift Console,https://devspaces.' + route_subdomain + ';OpenShift Dev Spaces Console' ) | quote }}
# yamllint enable rule:line-length
- name: expose username distribution tool
when: r_gau_dc.resources | list | length == 0
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/verify_che_workspace.yaml b/ansible/roles/ocp4-workload-sso-workshop/tasks/verify_che_workspace.yaml
deleted file mode 100644
index 0ffd4aefe69..00000000000
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/verify_che_workspace.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# yamllint disable rule:line-length
-- name: "Get Che {{ user }} token"
- uri:
- url: https://keycloak-codeready.{{ route_subdomain }}/auth/realms/codeready/protocol/openid-connect/token
- method: POST
- body:
- username: "{{ user }}"
- password: "{{ workshop_che_user_password }}"
- grant_type: "password"
- client_id: "admin-cli"
- body_format: form-urlencoded
- status_code: 200
- register: user_token
- # yamllint enable rule:line-length
-
-- name: Get workspace for {{ user }}
- uri:
- url: "https://codeready-codeready.{{ route_subdomain }}/api/workspace"
- validate_certs: false
- method: GET
- headers:
- Accept: application/json
- Authorization: "Bearer {{ user_token.json.access_token }}"
- status_code: 200
- register: workspace_def
-
-# yamllint disable rule:line-length
-- name: Verify and start workspace for {{ user }} again if stopped
- when: workspace_def.json[0].status == "STOPPED"
- uri:
- url: "https://codeready-codeready.{{ route_subdomain }}/api/workspace/{{ workspace_def.json[0].id }}/runtime"
- validate_certs: false
- method: POST
- headers:
- Accept: application/json
- Authorization: "Bearer {{ user_token.json.access_token }}"
- status_code: 200
- # yamllint enable rule:line-length
diff --git a/ansible/roles/ocp4-workload-sso-workshop/tasks/workload.yml b/ansible/roles/ocp4-workload-sso-workshop/tasks/workload.yml
index db60d01e739..37d7e848ce3 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-sso-workshop/tasks/workload.yml
@@ -18,6 +18,17 @@
- redhat-operators-index.yaml
- community-operators-index.yaml
+
+- name: Import custom project request to increase the limits
+ k8s:
+ state: present
+ merge_type:
+ - strategic-merge
+ - merge
+ definition: "{{ lookup('file', item ) | from_yaml }}"
+ loop:
+ - ./files/project-request-custom.yaml
+
- name: create projects userXX-{{ workshop_openshift_project_postfix }}
include_tasks: create_project.yaml
vars:
@@ -51,32 +62,29 @@
- name: install username distribution
include_tasks: install-username-distribution.yaml
-- name: Create ServiceAccount infinispan-monitoring
- k8s:
- state: present
- definition:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: infinispan-monitoring
- namespace: default
+# yamllint disable rule:line-length
+- name: Create operator subscription for kubernetes-imagepuller-operator
+ kubernetes.core.k8s:
+ merge_type:
+ - merge
+ definition: "{{ lookup('file', 'kubernetes-imagepuller-operator_subscription.yaml' ) }}"
+ # yamllint enable rule:line-length
-# Install CRW via operator
-- name: see if codeready is installed
+- name: see if dev spaces is installed
k8s_info:
- api_version: org.eclipse.che/v1
+ api_version: org.eclipse.che/v2
kind: CheCluster
- name: codeready-workspaces
- namespace: codeready
- register: r_codeready_cr
+ name: devspaces
+ namespace: openshift-operators
+ register: r_devspaces_cr
-- name: show codeready cr
+- name: show devspaces cr
debug:
- msg: "existing codeready project: {{ r_codeready_cr }}"
+ msg: "existing devspaces project: {{ r_devspaces_cr }}"
-- name: install codeready
- when: r_codeready_cr.resources | list | length == 0
- include_tasks: install-codeready.yaml
+- name: install devspaces
+ when: r_devspaces_cr.resources | list | length == 0
+ include_tasks: install-devspaces.yaml
# Leave this as the last task in the playbook.
- name: workload tasks complete
diff --git a/ansible/roles/ocp4-workload-sso-workshop/templates/devfile.json.j2 b/ansible/roles/ocp4-workload-sso-workshop/templates/devfile.json.j2
index 56372efb2d7..ea615db9264 100644
--- a/ansible/roles/ocp4-workload-sso-workshop/templates/devfile.json.j2
+++ b/ansible/roles/ocp4-workload-sso-workshop/templates/devfile.json.j2
@@ -1,92 +1,82 @@
{
- "apiVersion": "1.0.0",
+ "schemaVersion": "2.2.0",
"metadata": {
- "name": "{{ user }}-workspace"
+ "name": "user-workspace"
},
"components": [
{
- "id": "redhat/quarkus-java11/latest",
- "type": "chePlugin"
- },
- {
- "mountSources": true,
- "memoryLimit": "4Gi",
- "type": "dockerimage",
- "alias": "quarkus-tools",
- "image": "image-registry.openshift-image-registry.svc:5000/openshift/quarkus-stack:2.15",
- "env": [
- {
- "value": "/home/jboss/.m2",
- "name": "MAVEN_CONFIG"
- },
- {
- "value": "-Xmx4G -Xss128M -XX:MetaspaceSize=1G -XX:MaxMetaspaceSize=2G -XX:+CMSClassUnloadingEnabled",
- "name": "MAVEN_OPTS"
- }
- ],
- "endpoints": [
- {
- "name": "index-webpage",
- "port": 8080,
- "attributes": {
- "discoverable": "true",
- "public": "true",
- "protocol": "http"
- }
- },
- {
- "name": "quarkus-devui",
- "port": 8080,
- "attributes": {
- "discoverable": "true",
- "public": "true",
+ "container": {
+ "cpuLimit": "1000m",
+ "cpuRequest": "500m",
+ "endpoints": [
+ {
+ "exposure": "public",
+ "name": "index-webpage",
+ "protocol": "http",
+ "targetPort": 8080
+ },
+ {
+ "exposure": "public",
+ "name": "quarkus-devui",
+ "path": "/q/dev",
"protocol": "http",
- "path": "/q/dev"
+ "targetPort": 8080
+ },
+ {
+ "exposure": "none",
+ "name": "quarkus-debug",
+ "protocol": "tcp",
+ "targetPort": 5005
}
- },
- {
- "name": "debug-{{ user }}",
- "port": 5005,
- "attributes": {
- "discoverable": "false",
- "public": "false",
- "protocol": "jdwp"
+ ],
+ "env": [
+ {
+ "value": "/home/jboss/.m2",
+ "name": "MAVEN_CONFIG"
+ },
+ {
+ "value": "-Xmx4G -Xss128M -XX:MetaspaceSize=1G -XX:MaxMetaspaceSize=2G -XX:+CMSClassUnloadingEnabled",
+ "name": "MAVEN_OPTS"
}
- }
- ]
+ ],
+ "image": "image-registry.openshift-image-registry.svc:5000/openshift/quarkus-stack:3.5",
+ "memoryLimit": "6Gi",
+ "memoryRequest": "4Gi",
+ "command": [
+ "tail", "-f", "/dev/null"
+ ]
+ },
+ "name": "quarkus-tools"
}
],
"commands": [
{
- "name": "Login to OpenShift",
- "actions": [
- {
- "type": "exec",
- "component": "quarkus-tools",
- "command": "oc login https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT --insecure-skip-tls-verify=true --username={{ user }} --password={{ workshop_che_user_password }}",
- "workdir": "${CHE_PROJECTS_ROOT}"
- }
- ]
+ "id": "Run Tests",
+ "exec": {
+ "component": "quarkus-tools",
+ "commandLine": "mvn verify -f ${PROJECT_SOURCE}/quarkus-workshop-m1m2-labs"
+ }
},
{
- "name": "Remote - Start Live Coding",
- "actions": [
- {
- "type": "exec",
+ "id": "Start Live Coding",
+ "exec": {
"component": "quarkus-tools",
- "command": "mvn clean compile quarkus:dev -f ${CHE_PROJECTS_ROOT}/keycloak-workshop-labs/sso-quarkus-client-example",
- "workdir": "${CHE_PROJECTS_ROOT}"
- }
- ]
+ "commandLine": "mvn clean quarkus:dev -Dquarkus.http.host=0.0.0.0 -f ${PROJECT_SOURCE}/quarkus-workshop-m1m2-labs"
+ }
+ },
+ {
+ "id": "Package App for OpenShift",
+ "exec": {
+ "component": "quarkus-tools",
+ "commandLine": "mvn package -DskipTests -f ${PROJECT_SOURCE}/quarkus-workshop-m1m2-labs"
+ }
},
{
- "name": "Start Debugger on 5005",
- "actions": [
- {
- "type": "vscode-launch",
- "referenceContent": "{\n \"version\": \"0.2.0\",\n \"configurations\": [\n {\n \"type\": \"java\",\n \"request\": \"attach\",\n \"name\": \"Attach to App\",\n \"hostName\": \"localhost\",\n \"port\": 5005\n }\n ]\n}\n"
- }
- ]
+ "id": "Build Native App",
+ "exec": {
+ "component": "quarkus-tools",
+ "commandLine": "mvn package -Pnative -DskipTests -f ${PROJECT_SOURCE}/quarkus-workshop-m1m2-labs"
+ }
}
]
}
\ No newline at end of file
diff --git a/ansible/roles/ocp4-workload-template-service-broker/tasks/test.yaml b/ansible/roles/ocp4-workload-template-service-broker/tasks/test.yaml
index baa1be80843..7323c0c96e3 100644
--- a/ansible/roles/ocp4-workload-template-service-broker/tasks/test.yaml
+++ b/ansible/roles/ocp4-workload-template-service-broker/tasks/test.yaml
@@ -4,7 +4,7 @@
become: false
tasks:
- name: Wait for CSV to be successful
- k8s_facts:
+ k8s_info:
api_version:
kind:
namespace: openshift-template-service-broker
diff --git a/ansible/roles/ocp4-workload-template-service-broker/tasks/workload.yml b/ansible/roles/ocp4-workload-template-service-broker/tasks/workload.yml
index 9f1ce4fda11..7634346eafa 100644
--- a/ansible/roles/ocp4-workload-template-service-broker/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-template-service-broker/tasks/workload.yml
@@ -61,7 +61,7 @@
- name: Wait for Template Service Broker DeploymentConfig to appear
ignore_errors: true
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
namespace: "{{ _tsb_broker_project }}"
@@ -73,7 +73,7 @@
- name: Wait for Template Service Broker to be running
ignore_errors: true
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
namespace: "{{ _tsb_broker_project }}"
@@ -90,7 +90,7 @@
# fixes the issue.
- name: Workaround - Get Operator Pod
ignore_errors: true
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: "{{ _tsb_broker_project }}"
diff --git a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/clean-environment.yml b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/clean-environment.yml
index 4e7b1fd9b5a..e1b8afbc30d 100644
--- a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/clean-environment.yml
+++ b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/clean-environment.yml
@@ -40,7 +40,7 @@
name: "{{ student_project_name }}"
- name: ensure namespace is gone if it is terminating
- k8s_facts:
+ k8s_info:
kind: namespace
name: "{{ student_project_name }}"
register: result
diff --git a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/pre_workload.yml b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/pre_workload.yml
index 7535b51af7b..bc8446d74bd 100644
--- a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/pre_workload.yml
+++ b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/pre_workload.yml
@@ -13,7 +13,7 @@
api_url: "{{ api_url_r.stdout | trim }}"
- name: extract master_url
- k8s_facts:
+ k8s_info:
# required. Use to specify an object model.
# Use in conjunction with I(api_version), I(name), and I(namespace) to identify a specific object.
kind: Route
diff --git a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/workload.yml b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/workload.yml
index a9e3eae9c3c..e0935f9a5f3 100644
--- a/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-workshop-dashboard-cluster-admin-student/tasks/workload.yml
@@ -72,7 +72,7 @@
- name: add ocp_username to role admin for permissions
block:
- name: get current app name clusterrolebinding admin in order to merge new user
- k8s_facts:
+ k8s_info:
name: "{{ app_name }}-cluster-admin"
api_version: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
@@ -205,7 +205,7 @@
when: not silent | bool
- name: Grab openshift-console console quay.io image to be embedded in homeroom
- k8s_facts:
+ k8s_info:
kind: Deployment
api_version: apps/v1
namespace: openshift-console
@@ -394,7 +394,7 @@
- name: add ocp_username to role app_name enabling route access
block:
- name: get current rolebinding app_name in order to merge new user
- k8s_facts:
+ k8s_info:
name: "{{ app_name }}"
api_version: rbac.authorization.k8s.io/v1
kind: RoleBinding
diff --git a/ansible/roles/ocp4-workload-workshopper/tasks/workload.yml b/ansible/roles/ocp4-workload-workshopper/tasks/workload.yml
index f0535796175..c7864da2dba 100644
--- a/ansible/roles/ocp4-workload-workshopper/tasks/workload.yml
+++ b/ansible/roles/ocp4-workload-workshopper/tasks/workload.yml
@@ -13,7 +13,7 @@
name: labguide
- name: check if guide is deployed
- k8s_facts:
+ k8s_info:
api_version: apps.openshift.io/v1
kind: DeploymentConfig
name: "{{ _deployed_guide_name }}"
@@ -21,7 +21,7 @@
register: guide_exists
- name: extract the cluster_domain
- k8s_facts:
+ k8s_info:
api_version: operator.openshift.io/v1
kind: IngressController
name: default
diff --git a/ansible/roles/ocp_workload_shared_cluster_access/tasks/remove_workload.yml b/ansible/roles/ocp_workload_shared_cluster_access/tasks/remove_workload.yml
index d9efc7d8b48..dce907a07ad 100644
--- a/ansible/roles/ocp_workload_shared_cluster_access/tasks/remove_workload.yml
+++ b/ansible/roles/ocp_workload_shared_cluster_access/tasks/remove_workload.yml
@@ -4,7 +4,7 @@
msg: pre_workload tasks complete
- name: Get Namespaces
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Namespace
label_selectors:
diff --git a/ansible/roles/open-env-azure-add-user-to-subscription/tasks/main.yml b/ansible/roles/open-env-azure-add-user-to-subscription/tasks/main.yml
index 42b6b5a9e7b..a889df8c547 100644
--- a/ansible/roles/open-env-azure-add-user-to-subscription/tasks/main.yml
+++ b/ansible/roles/open-env-azure-add-user-to-subscription/tasks/main.yml
@@ -102,139 +102,32 @@
-p {{ management_subscription.subscriptions.fqid }}/resourceGroups/{{ azure_dns_resource_group }}/providers/Microsoft.Network/dnszones/{{ azure_root_dns_zone }}
--subscription {{ subscription_id }}
- #- name: Get resource group info
- # azure.azcollection.azure_rm_resourcegroup_info:
- # auth_source: cli
- # name: "openenv-{{ guid }}"
- # tenant: "{{ azure_tenant }}"
- # subscription_id: "{{ subscription_id }}"
- # register: azrg
-
- #- debug:
- # msg: "{{ azrg }}"
-
- - name: See if application already exists
- azure.azcollection.azure_rm_adapplication_info:
- auth_source: env
- identifier_uri: "api://openenv-{{ guid }}"
- tenant: "{{ azure_tenant }}"
- subscription_id: "{{ subscription_id }}"
- ignore_errors: true
- register: azappcheck
-
- name: Create the Application and SP
- when: azappcheck.applications|length==0
command: >-
az ad sp create-for-rbac
--name "api://openenv-{{ guid }}"
--role Owner
--scopes "{{ subscription_fqid }}"
register: azappcreate
- #--scopes "{{ azrg.resourcegroups[0].id }}"
-
- - name: Get password
- when: azappcreate.changed
- set_fact: azpass="{{ azappcreate.stdout | from_json | json_query('password') }}"
-
- - name: Wait 60 seconds for Azure to create application
- when: azappcreate.changed
- ansible.builtin.wait_for:
- timeout: 60
+ retries: 10
+ delay: 10
+ until: azappcreate is succeeded
- name: Get application info
azure.azcollection.azure_rm_adapplication_info:
- auth_source: env
- identifier_uri: "api://openenv-{{ guid }}"
+ auth_source: cli
tenant: "{{ azure_tenant }}"
+ app_id: "{{ azappcreate.stdout | from_json | json_query('appId') }}"
subscription_id: "{{ subscription_id }}"
register: azapp
+ retries: 30
+ delay: 10
+ until:
+ - azapp.applications | length > 0
- #- name: Add API Application.ReadWrite.All permissions to SP
- # when: azappcheck.applications|length==0
- # command: >-
- # az ad app permission add
- # --id "{{ azapp.applications[0].app_id }}"
- # --api 00000003-0000-0000-c000-000000000000
- # --api-permissions bdfbf15f-ee85-4955-8675-146e8e5296b5=Scope
- # register: azpermsupdate
-
- #- name: Wait 60 seconds for Azure to apply permission
- # when: azpermsupdate.changed
- # ansible.builtin.wait_for:
- # timeout: 60
-
- #- name: Authorize admin consent
- # when: azpermsupdate.changed
- # command: >-
- # az ad app permission admin-consent
- # --id "{{ azapp.applications[0].app_id }}"
-
- - name: Get SP info
- command: >-
- az ad sp show --id "api://openenv-{{ guid }}"
- register: azappinfo
-
- #- name: Build payload for role assignment
- # set_fact:
- # payload:
- # {
- # '@odata.type': '#microsoft.graph.unifiedRoleAssignment',
- # 'principalId': "{{ azappinfo.stdout | from_json | json_query('objectId') }}",
- # 'roleDefinitionId': '8e2e22ca-bde6-4977-bc67-7f189cc47557',
- # 'directoryScopeId': '/'
- # }
-
- #- name: Assign Application Administrator role to SP
- # command: >-
- # az rest -m post
- # --headers Content-type=application/json
- # -u https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignments
- # -b "{{ payload }}"
-
- #- name: See if ARO SP already exists
- # azure.azcollection.azure_rm_adapplication_info:
- # auth_source: env
- # identifier_uri: "api://openenv-aro-{{ guid }}"
- # tenant: "{{ azure_tenant }}"
- # subscription_id: "{{ subscription_id }}"
- # ignore_errors: true
- # register: azaroappcheck
-
- #- name: Create the Service Principal for ARO
- # when: azaroappcheck.applications|length==0
- # command: >-
- # az ad sp create-for-rbac
- # --name "api://openenv-aro-{{ guid }}"
- # --role Contributor
- # --scopes "{{ subscription_fqid }}"
- # register: azaroappcreate
- # #--scopes "{{ azrg.resourcegroups[0].id }}"
-
- #- name: Save ARO SP password
- # when: azaroappcreate.changed
- # set_fact: az_aro_pass="{{ azaroappcreate.stdout | from_json | json_query('password') }}"
-
- #- name: Get ARO SP info
- # command: >-
- # az ad sp show --id "api://openenv-aro-{{ guid }}"
- # register: azaroappinfo
-
- #- name: Build payload for role assignment
- # set_fact:
- # payload:
- # {
- # '@odata.type': '#microsoft.graph.unifiedRoleAssignment',
- # 'principalId': "{{ azaroappinfo.stdout | from_json | json_query('objectId') }}",
- # 'roleDefinitionId': '8e2e22ca-bde6-4977-bc67-7f189cc47557',
- # 'directoryScopeId': '/'
- # }
-
- #- name: Assign Application Administrator role to ARO SP
- # command: >-
- # az rest -m post
- # --headers Content-type=application/json
- # -u https://graph.microsoft.com/v1.0/roleManagement/directory/roleAssignments
- # -b "{{ payload }}"
+ - name: Get password
+ when: azappcreate.changed
+ set_fact: azpass="{{ azappcreate.stdout | from_json | json_query('password') }}"
- name: Log out of Azure CLI
command: >
diff --git a/ansible/roles/open-env-azure-delete-open-env/tasks/main.yml b/ansible/roles/open-env-azure-delete-open-env/tasks/main.yml
index a30b9bb0aca..4dcee86302b 100644
--- a/ansible/roles/open-env-azure-delete-open-env/tasks/main.yml
+++ b/ansible/roles/open-env-azure-delete-open-env/tasks/main.yml
@@ -40,8 +40,8 @@
tenant: "{{ azure_tenant }}"
register: all_apps
-- ansible.builtin.set_fact: oe_app_reg="openenv-{{ guid }}"
-- ansible.builtin.set_fact: oe_aro_app_reg="openenv-aro-{{ guid }}"
+- ansible.builtin.set_fact: oe_app_reg="api://openenv-{{ guid }}"
+- ansible.builtin.set_fact: oe_aro_app_reg="api://openenv-aro-{{ guid }}"
- name: Delete open environment app registrations
ansible.builtin.command: >-
az rest --method DELETE --url https://graph.microsoft.com/v1.0/applications/{{ item.object_id }}
diff --git a/ansible/roles/open-env-azure-install-aro/tasks/main.yml b/ansible/roles/open-env-azure-install-aro/tasks/main.yml
index 6947184a45e..c7ecaebf38e 100644
--- a/ansible/roles/open-env-azure-install-aro/tasks/main.yml
+++ b/ansible/roles/open-env-azure-install-aro/tasks/main.yml
@@ -182,3 +182,8 @@
retries: 6
delay: 60
until: r_update_oauth is success
+
+ - name: Put preconfigure_aad status in user_data
+ agnosticd_user_info:
+ data:
+ preconfigure_aad: "{{ preconfigure_aad }}"
diff --git a/ansible/roles/open-env-azure-remove-user-from-subscription/tasks/main.yml b/ansible/roles/open-env-azure-remove-user-from-subscription/tasks/main.yml
index 804a799e824..37ac3092273 100644
--- a/ansible/roles/open-env-azure-remove-user-from-subscription/tasks/main.yml
+++ b/ansible/roles/open-env-azure-remove-user-from-subscription/tasks/main.yml
@@ -35,41 +35,6 @@
set_fact:
pool_subscription_id: "{{ assignedsubscription.subscriptions[0].subscription_id }}"
- - name: Get application info
- environment:
- AZURE_SUBSCRIPTION_ID: "{{ pool_subscription_id }}"
- azure.azcollection.azure_rm_adapplication_info:
- auth_source: env
- identifier_uri: "api://openenv-{{ guid }}"
- tenant: "{{ azure_tenant }}"
- register: azapp
-
- - name: Delete application
- environment:
- AZURE_SUBSCRIPTION_ID: "{{ pool_subscription_id }}"
- when: azapp.applications|length>0
- azure.azcollection.azure_rm_adapplication:
- auth_source: env
- tenant: "{{ azure_tenant }}"
- app_id: "{{ azapp.applications[0].app_id }}"
- state: absent
- ignore_errors: true
-
- #- name: Get ARO application info
- # azure.azcollection.azure_rm_adapplication_info:
- # auth_source: env
- # identifier_uri: "api://openenv-aro-{{ guid }}"
- # tenant: "{{ azure_tenant }}"
- # register: azaroapp
-
- #- name: Delete ARO application
- # when: azaroapp.applications|length>0
- # azure.azcollection.azure_rm_adapplication:
- # auth_source: env
- # tenant: "{{ azure_tenant }}"
- # app_id: "{{ azaroapp.applications[0].app_id }}"
- # state: absent
-
- name: Clean up DNS zone
command: >
az network dns zone delete
@@ -88,6 +53,25 @@
zone_name: "{{ azure_root_dns_zone }}"
state: absent
+ - name: Get list of locks in the subscription
+ environment:
+ AZURE_SUBSCRIPTION_ID: "{{ pool_subscription_id }}"
+ azure.azcollection.azure_rm_lock_info:
+ auth_source: env
+ managed_resource_id: "/subscriptions/{{ pool_subscription_id }}"
+ register: r_subscription_locks
+
+ - name: Delete all locks in the subscription
+ when: r_subscription_locks.locks|length>0
+ environment:
+ AZURE_SUBSCRIPTION_ID: "{{ pool_subscription_id }}"
+ azure.azcollection.azure_rm_lock:
+ auth_source: env
+ name: "{{ item.name }}"
+ managed_resource_id: "/subscriptions/{{ pool_subscription_id }}"
+ state: absent
+ loop: "{{ r_subscription_locks.locks }}"
+
- name: Get all resource groups in the subscription
environment:
AZURE_SUBSCRIPTION_ID: "{{ pool_subscription_id }}"
@@ -126,10 +110,6 @@
id: "{{ azure_subscription_id }}"
register: management_subscription
- - name: Log out of Azure CLI
- command: >
- az logout
-
- name: Get the user's object from Active Directory
azure.azcollection.azure_rm_aduser_info:
auth_source: env
@@ -156,7 +136,25 @@
state: absent
loop: "{{ role_assignments.roleassignments }}"
+ - name: Get all azure applications
+ azure.azcollection.azure_rm_adapplication_info:
+ auth_source: cli
+ tenant: "{{ azure_tenant }}"
+ register: all_apps
+
+ - ansible.builtin.set_fact: oe_app_reg="api://openenv-{{ guid }}"
+ - ansible.builtin.set_fact: oe_aro_app_reg="api://openenv-aro-{{ guid }}"
+ - name: Delete open environment app registrations
+ ansible.builtin.command: >-
+ az rest --method DELETE --url https://graph.microsoft.com/v1.0/applications/{{ item.object_id }}
+ with_items: "{{ all_apps.applications }}"
+ when: item.app_display_name == oe_app_reg or item.app_display_name == oe_aro_app_reg
+
- name: Remove pool allocation from the database
ansible.builtin.uri:
url: "{{ az_function_release }}{{ project_tag }}/{{ az_pool_id }}?code={{ azure_pool_api_secret }}"
ignore_errors: yes
+
+ - name: Log out of Azure CLI
+ command: >
+ az logout
diff --git a/ansible/roles/podman_desktop/README.md b/ansible/roles/podman_desktop/README.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/ansible/roles/podman_desktop/defaults/main.yml b/ansible/roles/podman_desktop/defaults/main.yml
new file mode 100644
index 00000000000..520bb62d401
--- /dev/null
+++ b/ansible/roles/podman_desktop/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# -------------------------------------------------
+# Default Variables
+# -------------------------------------------------
+
+# Define the path other than default
+# podman_desktop_content_path: "/home/{{ student_name }}/podman/"
+
+# Git repository url
+# podman_desktop_content_repository: https://github.com/linuxnerds/rhel9_podman_desktop.git
+podman_desktop_content_repository_version: main
diff --git a/ansible/roles/podman_desktop/meta/main.yml b/ansible/roles/podman_desktop/meta/main.yml
new file mode 100644
index 00000000000..ce50e24ba22
--- /dev/null
+++ b/ansible/roles/podman_desktop/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ role_name: novnc
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: Setup podman-desktop
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - podmandesktop
+ - podman
+ - desktop
+dependencies: []
diff --git a/ansible/roles/podman_desktop/tasks/main.yml b/ansible/roles/podman_desktop/tasks/main.yml
new file mode 100644
index 00000000000..0b6790bcef9
--- /dev/null
+++ b/ansible/roles/podman_desktop/tasks/main.yml
@@ -0,0 +1,42 @@
+---
+
+- name: Install podman
+ ansible.builtin.dnf:
+ name: podman
+ state: present
+
+- name: Add the flathub flatpak repository
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://flathub.org/repo/flathub.flatpakrepo
+
+- name: Install Podman desktop from flathub
+ community.general.flatpak:
+ name: io.podman_desktop.PodmanDesktop
+ state: present
+ remote: flathub
+
+- name: Clone content repository block
+ when: podman_desktop_content_repository is defined
+ block:
+ - name: Set repository content path
+ set_fact:
+ podman_desktop_content_path: "/home/{{ student_name }}/podman/"
+
+ - name: Create podman directory
+ ansible.builtin.file:
+ path: "{{ podman_desktop_content_path }}"
+ recurse: true
+ state: directory
+ mode: '0755'
+ owner: "{{ student_name }}"
+ group: "{{ student_name }}"
+
+ - name: Clone content
+ become: true
+ become_user: "{{ student_name }}"
+ ansible.builtin.git:
+ repo: "{{ podman_desktop_content_repository }}"
+ dest: "{{ podman_desktop_content_path }}"
+ version: "{{ podman_desktop_content_repository_version }}"
diff --git a/ansible/roles/showroom/defaults/main.yml b/ansible/roles/showroom/defaults/main.yml
index c14d08c45bf..ed74bb7d739 100644
--- a/ansible/roles/showroom/defaults/main.yml
+++ b/ansible/roles/showroom/defaults/main.yml
@@ -1,11 +1,14 @@
---
# TODO: make this repo generic example
+showroom_deploy: true
+
# Content repo with *optional* tag
showroom_git_repo: https://github.com/tonykay/showroom-poc-2023-06.git
showroom_git_tag: main
showroom_default_playbook: site.yml # Default antora playbook to build from
+showroom_primary_port: 8000
showroom_user: showroom
showroom_group: showroom
diff --git a/ansible/roles/showroom/tasks/60-showroom-verify.yml b/ansible/roles/showroom/tasks/60-showroom-verify.yml
index 8699acb98d9..9f78256e6a3 100644
--- a/ansible/roles/showroom/tasks/60-showroom-verify.yml
+++ b/ansible/roles/showroom/tasks/60-showroom-verify.yml
@@ -4,8 +4,14 @@
# - does it run
# - all of it?
+- name: Capture lab_ui_url as fact
+ ansible.builtin.set_fact:
+ f_lab_ui_url:
+ "http://{{ groups['bastions'][0].split('.',1)[0] }}.{{ guid }}{{
+ subdomain_base_suffix }}:{{ showroom_primary_port }}"
+
- name: Output showroom view(s) URLs as userinfo and userdata
agnosticd_user_info:
- msg: "showroom_primary_view_url: http://{{ groups['bastions'][0] | regex_replace('\\..*$') }}.{{ guid }}{{ subdomain_base_suffix }}:8000"
data:
- showroom_primary_view_url: "http://{{ groups['bastions'][0] | regex_replace('\\..*$') }}.{{ guid }}{{ subdomain_base_suffix }}:8000"
+ lab_ui_url: "{{ f_lab_ui_url }}"
+ showroom_primary_view_url: "{{ f_lab_ui_url }}"
diff --git a/ansible/roles/showroom/tasks/main.yml b/ansible/roles/showroom/tasks/main.yml
index c2394afd451..4f30e2600e0 100644
--- a/ansible/roles/showroom/tasks/main.yml
+++ b/ansible/roles/showroom/tasks/main.yml
@@ -1,33 +1,33 @@
---
-#
-# This is a PoC and includes some/many steps that would be migrated to init containers etc
-#
+- name: Deploy the showroom user interface
+ when: showroom_deploy | default(true) | bool
+ block:
-- name: Setup the showroom user and working directories
- ansible.builtin.include_tasks:
- file: 10-showroom-user-setup.yml
+ - name: Setup the showroom user and working directories
+ ansible.builtin.include_tasks:
+ file: 10-showroom-user-setup.yml
-- name: Setup OS dependencies, packages, user, directory
- ansible.builtin.include_tasks:
- file: 20-showroom-dependencies.yml
+ - name: Setup OS dependencies, packages, user, directory
+ ansible.builtin.include_tasks:
+ file: 20-showroom-dependencies.yml
-- name: Clone primary showroom repo and inject externals (vars, html templates)
- ansible.builtin.include_tasks:
- file: 30-showroom-clone-and-inject.yml
- tags:
- - showroom-clone-and-inject
+ - name: Clone primary showroom repo and inject externals (vars, html templates)
+ ansible.builtin.include_tasks:
+ file: 30-showroom-clone-and-inject.yml
+ tags:
+ - showroom-clone-and-inject
-- name: Render showroom to html if required
- ansible.builtin.include_tasks:
- file: 40-showroom-render.yml
- tags:
- - showroom-render
+ - name: Render showroom to html if required
+ ansible.builtin.include_tasks:
+ file: 40-showroom-render.yml
+ tags:
+ - showroom-render
-- name: Create, enable, start showroom systemd service
- ansible.builtin.include_tasks:
- file: 50-showroom-service.yml
+ - name: Create, enable, start showroom systemd service
+ ansible.builtin.include_tasks:
+ file: 50-showroom-service.yml
-- name: Validate showroom service and output view url(s)
- ansible.builtin.include_tasks:
- file: 60-showroom-verify.yml
+ - name: Validate showroom service and output view url(s)
+ ansible.builtin.include_tasks:
+ file: 60-showroom-verify.yml
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/defaults/main.yml
index 46b00275170..3f42363988d 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/defaults/main.yml
@@ -5,9 +5,8 @@ silent: false
lab_version: "lab-4.13"
repo_user: "RHsyseng"
-kcli_baremetal_plan_revision: 0cdab26571acf61feeaabf216c1d3066f780cb87
# yamllint disable rule:line-length
-kcli_rpm: "https://github.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/raw/{{ lab_version }}/lab-materials/kcli-rpm/kcli-99.0.0.git.202305180753.3473537-0.el8.x86_64.rpm"
+kcli_rpm: "https://github.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/raw/{{ lab_version }}/lab-materials/kcli-rpm/kcli-99.0.0.git.202307262238.9d217af-0.el8.x86_64.rpm"
# yamllint enable rule:line-length
ocp4_major_release: "4.13"
lab_network_cidr: "192.168.125.0/24"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/pre_workload.yml
index 8de2d6805c6..4aace4d2e04 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/pre_workload.yml
@@ -56,13 +56,6 @@
ansible.builtin.shell:
cmd: restorecon /var/lib/libvirt
-#- name: Ensure kcli copr repo is enabled
-# community.general.copr:
-# state: enabled
-# host: copr.fedorainfracloud.org
-# chroot: epel-8-x86_64
-# name: karmab/kcli
-
# group all dnf installs in the same task to save time
- name: Ensure lab dependencies are installed
ansible.builtin.dnf:
@@ -76,8 +69,13 @@
- podman
- httpd-tools
- haproxy
+ - python3-pyOpenSSL
state: present
+- name: Ensure ksushy requirements are installed
+ ansible.builtin.pip:
+ name: cherrypy
+
- name: Ensure kcli rpm is installed
ansible.builtin.dnf:
name: "{{ kcli_rpm }}"
@@ -100,7 +98,7 @@
- name: Ensure lab network is present
ansible.builtin.shell:
- cmd: "kcli create network -c {{ lab_network_cidr }} --nodhcp --domain {{ lab_network_domain }} 5gdeploymentlab"
+ cmd: "kcli create network -c {{ lab_network_cidr }} -P dhcp=false -P dns=false --domain {{ lab_network_domain }} 5gdeploymentlab"
- name: Ensure oc/kubectl tooling is present
ansible.builtin.shell:
@@ -220,17 +218,9 @@
async: 900
register: download_rhcos
-- name: Ensure sushy-tools script exists
- ansible.builtin.get_url:
- # yamllint disable rule:line-length
- url: "https://gist.githubusercontent.com/mvazquezc/0acb9e716c329abb9a184f1bcceed591/raw/21de9c32bcaf53ef40f379231ab1a4c1fdfefcf7/deploy-sushy-tools.sh"
- # yamllint enable rule:line-length
- dest: "/tmp/deploy-sushy-tools.sh"
- mode: "0755"
-
-- name: Ensure sushy-tools are installed
+- name: Ensure ksushy is installed
ansible.builtin.shell:
- cmd: /tmp/deploy-sushy-tools.sh
+ cmd: kcli create sushy-service --ssl --port 9000
async: 120
poll: 0
register: sushy_async
@@ -323,7 +313,6 @@
dest: "/etc/systemd/system/podman-gitea.service"
mode: "0644"
-
- name: Ensure git server service is enabled and running
ansible.builtin.systemd:
state: restarted
@@ -369,30 +358,21 @@
failed_when: result.rc != 0 and "not created because VM" not in result.stderr
# yamllint disable rule:line-length
with_items:
- - {name: "hub-master0", cpus: "{{ lab_hub_vm_cpus }}", disk: "{{ lab_hub_vm_disk }}", memory: "{{ lab_hub_vm_memory }}", mac: "aa:aa:aa:aa:01:01", uuid: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0101"}
- - {name: "hub-master1", cpus: "{{ lab_hub_vm_cpus }}", disk: "{{ lab_hub_vm_disk }}", memory: "{{ lab_hub_vm_memory }}", mac: "aa:aa:aa:aa:01:02", uuid: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0102"}
- - {name: "hub-master2", cpus: "{{ lab_hub_vm_cpus }}", disk: "{{ lab_hub_vm_disk }}", memory: "{{ lab_hub_vm_memory }}", mac: "aa:aa:aa:aa:01:03", uuid: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0103"}
- {name: "sno1", cpus: "{{ lab_sno_vm_cpus }}", disk: "{{ lab_sno_vm_disk }}", memory: "{{ lab_sno_vm_memory }}", mac: "aa:aa:aa:aa:02:01", uuid: "uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0201"}
- {name: "sno2", cpus: "{{ lab_sno_vm_cpus }}", disk: "{{ lab_sno_vm_disk }}", memory: "{{ lab_sno_vm_memory }}", mac: "aa:aa:aa:aa:03:01", uuid: "uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0301"}
# yamllint enable rule:line-length
-- name: Ensure kcli-baremetal-plan-repo is cloned
- ansible.builtin.git:
- repo: 'https://github.com/karmab/kcli-openshift4-baremetal.git'
- dest: /root/kcli-openshift4-baremetal/
- version: "{{ kcli_baremetal_plan_revision }}"
-
- name: Ensure pull secret is copied to the bastion host
ansible.builtin.copy:
content: "{{ ocp4_pull_secret }}"
- dest: "/root/kcli-openshift4-baremetal/openshift_pull.json"
+ dest: "/root/openshift_pull.json"
mode: '0644'
- name: Ensure plan file exists
ansible.builtin.get_url:
# yamllint disable rule:line-length
url: "https://raw.githubusercontent.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/{{ lab_version }}/lab-materials/lab-env-data/hub-cluster/hub.yml"
- dest: "/root/kcli-openshift4-baremetal/hub.yml"
+ dest: "/root/hub.yml"
mode: "0644"
# yamllint enable rule:line-length
@@ -403,7 +383,7 @@
- name: Set password to hub admin user
ansible.builtin.replace:
- path: "/root/kcli-openshift4-baremetal/hub.yml"
+ path: "/root/hub.yml"
regexp: '{{ item.regexp }}'
replace: "'{{ item.password }}'"
with_items:
@@ -428,7 +408,7 @@
community.crypto.openssh_keypair:
path: /root/.ssh/id_rsa
-- name: Async check sushy-tools are installed
+- name: Async check sushy tools are installed
ansible.builtin.async_status:
jid: "{{ sushy_async.ansible_job_id }}"
register: job_result
@@ -440,11 +420,11 @@
ansible.builtin.systemd:
state: restarted
enabled: true
- name: sushy-tools
+ name: ksushy
- name: Ensure sushy is listening for redfish connections
ansible.builtin.uri:
- url: https://infra.5g-deployment.lab:9000/redfish/v1/Systems/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0101
+ url: https://infra.5g-deployment.lab:9000/redfish/v1/Systems/local/sno1
method: GET
status_code: 200
validate_certs: false
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/remove_workload.yml
index 7a374006471..c9dc70e8ff3 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/remove_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/remove_workload.yml
@@ -11,7 +11,7 @@
register: result
failed_when: result.rc != 0 and "not found" not in result.stderr
-- name: Ensure sushy-tools, registry, gitea and dnsmasq directories are deleted
+- name: Ensure registry, gitea and dnsmasq directories are deleted
ansible.builtin.file:
path: "{{ item }}"
state: absent
@@ -19,7 +19,6 @@
- /opt/registry
- /opt/gitea
- /opt/dnsmasq/
- - /opt/sushy-tools
- name: Ensure sushy-tools, registry, gitea and dnsmasq services are stopped
ansible.builtin.systemd:
@@ -30,7 +29,7 @@
- podman-registry
- podman-gitea
- dnsmasq-virt
- - sushy-tools
+ - ksushy
- name: Ensure service files for sushy-tools, registry, gitea and dnsmasq services are deleted
ansible.builtin.file:
@@ -40,7 +39,7 @@
- /etc/systemd/system/podman-registry.service
- /etc/systemd/system/podman-gitea.service
- /etc/systemd/system/dnsmasq-virt.service
- - /etc/systemd/system/sushy-tools.service
+ - /usr/lib/systemd/system/ksushy.service
- name: Ensure HAProxy service is stopped
ansible.builtin.systemd:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/workload.yml
index 03d3aa87dea..c3785827ae8 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_5gran_deployments_lab/tasks/workload.yml
@@ -3,11 +3,11 @@
- name: Ensure hub cluster is deployed via kcli
ansible.builtin.shell:
- cmd: kcli create plan --pf hub.yml
+ cmd: kcli create cluster openshift --pf hub.yml
args:
- chdir: /root/kcli-openshift4-baremetal/
- register: result
- failed_when: result.rc != 0 or ("skipped on local" not in result.stdout and "deployed on local" not in result.stdout)
+ chdir: /root/
+ async: 3600
+ poll: 0
- name: Ensure kubernetes manifests are downloaded
ansible.builtin.get_url:
@@ -16,7 +16,6 @@
mode: "{{ item.mode }}"
# yamllint disable rule:line-length
with_items:
- - {url: "https://raw.githubusercontent.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/{{ lab_version }}/lab-materials/lab-env-data/hub-cluster/lvmcluster.yaml", destination: "/tmp/lvmcluster.yaml", mode: "0644"}
- {url: "https://raw.githubusercontent.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/{{ lab_version }}/lab-materials/lab-env-data/hub-cluster/argocd-patch.json", destination: "/tmp/argocd-openshift-gitops-patch.json", mode: "0644"}
- {url: "https://raw.githubusercontent.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/{{ lab_version }}/lab-materials/lab-env-data/hub-cluster/hub-operators-argoapps.yaml", destination: "/tmp/hub-operators-argoapps.yaml", mode: "0644"}
- {url: "https://raw.githubusercontent.com/{{ repo_user }}/5g-ran-deployments-on-ocp-lab/{{ lab_version }}/lab-materials/lab-env-data/hub-cluster/sno1-argoapp.yaml", destination: "/tmp/sno1-argoapp.yaml", mode: "0644"}
@@ -32,22 +31,18 @@
delay: 60
- name: Ensure we have the kubeconfig file for the hub cluster copied in the bastion
- ansible.builtin.shell:
- cmd: "{{ item }}"
- with_items:
- - 'kcli ssh hub-installer -- "sudo cp /root/ocp/auth/kubeconfig /tmp/kubeconfig && sudo chmod 644 /tmp/kubeconfig"'
- - 'kcli scp hub-installer:/tmp/kubeconfig /root/hub-kubeconfig'
+ ansible.builtin.copy:
+ src: /root/.kcli/clusters/hub/auth/kubeconfig
+ dest: /root/hub-kubeconfig
+ remote_src: true
-# Apply manifests and then wait to be deployed
-- name: Apply LVMCluster manifest to the cluster
+- name: Remove kubeadmin user
kubernetes.core.k8s:
- kubeconfig: /root/hub-kubeconfig
- state: present
- src: /tmp/lvmcluster.yaml
- register: result
- until: result.failed != true
- retries: 5
- delay: 60
+ state: absent
+ api_version: v1
+ kind: Secret
+ namespace: kube-system
+ name: kubeadmin
- name: Ensure ArgoCD instance is patched for ZTP support
kubernetes.core.k8s:
@@ -98,13 +93,12 @@
retries: 5
delay: 60
-
- name: Wait until LVMCluster is ready
kubernetes.core.k8s_info:
kubeconfig: /root/hub-kubeconfig
api_version: lvm.topolvm.io/v1alpha1
kind: LVMCluster
- name: odf-lvmcluster
+ name: lvmcluster
namespace: openshift-storage
register: lvmcluster
retries: 60
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ama_demo/tasks/setup-gitops.yml b/ansible/roles_ocp_workloads/ocp4_workload_ama_demo/tasks/setup-gitops.yml
index bed117be6f2..6d81e553627 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ama_demo/tasks/setup-gitops.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ama_demo/tasks/setup-gitops.yml
@@ -13,7 +13,6 @@
- name: Install JDK 11
command:
cmd: dnf -y install java-11-openjdk-devel
- warn: false
- name: Create /usr/local/maven directory
file:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ansible_automation_platform/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_ansible_automation_platform/tasks/workload.yml
index a04ddcb741a..50a4e943a95 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ansible_automation_platform/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ansible_automation_platform/tasks/workload.yml
@@ -130,7 +130,7 @@
- name: Fetch Automation Controller manifest file
ansible.builtin.get_url:
- url: "{{ ocp4_workload_ansible_automation_platform_manifest.url }}"
+ url: https://d3s3zqyaz8cp2d.cloudfront.net/aap/manifest.zip
dest: /tmp/aap-manifest.zip
username: "{{ ocp4_workload_ansible_automation_platform_manifest.username | default(omit) }}"
password: "{{ ocp4_workload_ansible_automation_platform_manifest.password | default(omit) }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ansible_ee/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_ansible_ee/tasks/workload.yml
index fd423b030e9..603ef798340 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ansible_ee/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ansible_ee/tasks/workload.yml
@@ -12,7 +12,7 @@
name: "{{ ocp4_workload_ansible_ee_namespace }}"
- name: Retrieve created hub route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: hub
@@ -23,7 +23,7 @@
delay: 30
- name: Retrieve hub secret
- k8s_facts:
+ k8s_info:
api_version: "v1"
kind: Secret
name: hub-admin-password
@@ -81,7 +81,7 @@
with_items: "{{ ocp4_workload_ansible_ee_image_build }}"
- name: Retrieve created event listener route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: ansible-ee-el
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/README.md b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/README.md
new file mode 100644
index 00000000000..3751fd27418
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/README.md
@@ -0,0 +1,125 @@
+TODO Application with Quarkus HELM Chart repo
+=========
+
+This role will deploy the TODO Application with Quarkus HELM Chart repo. This is an example application based on a Todo list where the different tasks are created, read, updated, or deleted from the database.
+
+See The [TODO Application with Quarkus HELM Chart repo](https://github.com/tosin2013/todo-demo-app-helmrepo/blob/main/openshift-pipelines/README.md) for use of this demo.
+
+[Deploy using Github Actions](https://github.com/tosin2013/todo-demo-app-helmrepo/blob/main/openshift-pipelines/github-actions.md)
+
+Requirements
+------------
+
+* OpenShift 4.12 cluster installed
+* Ansible 2.9 or higher
+```
+sudo pip3 install openshift pyyaml kubernetes jmespath
+ansible-galaxy collection install kubernetes.core community.general
+```
+
+
+Role Variables
+--------------
+
+Role Variables are found in defaults/main.yml
+
+```
+become_override: false
+ocp_username: system:admin
+silent: false
+
+ocp4_workload_gitea_user: user1
+ocp4_workload_gitea_operator_create_admin: true
+ocp4_workload_gitea_operator_create_users: true
+ocp4_workload_gitea_operator_migrate_repositories: true
+ocp4_workload_gitea_operator_gitea_image_tag: 1.19.3
+ocp4_workload_gitea_operator_repositories_list:
+- repo: "https://github.com/tosin2013/todo-demo-app-helmrepo.git"
+ name: "todo-demo-app-helmrepo"
+ private: false
+
+## OpenShift Pipelines
+
+ocp4_workload_pipelines_defaults:
+ tkn_version: 0.31.1
+ channel: latest
+ automatic_install_plan_approval: true
+ starting_csv: ""
+
+```
+
+Dependencies
+------------
+* ocp4_workload_gitea_operator
+* ocp4_workload_pipelines
+
+Example Playbook
+----------------
+
+Deploy a Workload with the `ocp-workload` playbook
+
+```
+TARGET_HOST="bastion.wk.red.osp.opentlc.com"
+OCP_USERNAME="lab-user"
+WORKLOAD="ocp4_workload_argocd_quay_todo_app"
+GUID=wk
+```
+**Generate extra vars**
+```
+cat >extra_vars.yaml<=1.0.0 <1.9.0'
+ operators.openshift.io/valid-subscription: '["OpenShift Container Platform", "OpenShift Platform Plus"]'
+ console.openshift.io/plugins: '["gitops-plugin"]'
+ operators.operatorframework.io/builder: operator-sdk-v1.10.0+git
+ operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
+ operatorframework.io/properties: >-
+ {"properties":[{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"AnalysisRun","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"AnalysisTemplate","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"AppProject","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"Application","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"ApplicationSet","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"ArgoCD","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"ClusterAnalysisTemplate","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"Experiment","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"Rollout","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"argoproj.io","kind":"RolloutManager","version":"v1alpha1"}},{"type":"olm.gvk","value":{"group":"pipelines.openshift.io","kind":"GitopsService","version":"v1alpha1"}},{"type":"olm.package","value":{"packageName":"openshift-gitops-operator","version":"1.9.0"}}]}
+ repository: 'https://github.com/redhat-developer/gitops-operator'
+ support: Red Hat
+ operators.openshift.io/infrastructure-features: '["disconnected"]'
+ alm-examples: |-
+ [
+ {
+ "apiVersion": "argoproj.io/v1alpha1",
+ "kind": "AppProject",
+ "metadata": {
+ "name": "example"
+ },
+ "spec": null
+ },
+ {
+ "apiVersion": "argoproj.io/v1alpha1",
+ "kind": "Application",
+ "metadata": {
+ "name": "example"
+ },
+ "spec": null
+ },
+ {
+ "apiVersion": "argoproj.io/v1alpha1",
+ "kind": "ApplicationSet",
+ "metadata": {
+ "name": "example"
+ },
+ "spec": null
+ },
+ {
+ "apiVersion": "argoproj.io/v1alpha1",
+ "kind": "ArgoCD",
+ "metadata": {
+ "name": "argocd"
+ },
+ "spec": {
+ "controller": {
+ "resources": {
+ "limits": {
+ "cpu": "2000m",
+ "memory": "2048Mi"
+ },
+ "requests": {
+ "cpu": "250m",
+ "memory": "1024Mi"
+ }
+ }
+ },
+ "ha": {
+ "enabled": false,
+ "resources": {
+ "limits": {
+ "cpu": "500m",
+ "memory": "256Mi"
+ },
+ "requests": {
+ "cpu": "250m",
+ "memory": "128Mi"
+ }
+ }
+ },
+ "rbac": {
+ "defaultPolicy": "",
+ "policy": "g, system:cluster-admins, role:admin\n",
+ "scopes": "[groups]"
+ },
+ "redis": {
+ "resources": {
+ "limits": {
+ "cpu": "500m",
+ "memory": "256Mi"
+ },
+ "requests": {
+ "cpu": "250m",
+ "memory": "128Mi"
+ }
+ }
+ },
+ "repo": {
+ "resources": {
+ "limits": {
+ "cpu": "1000m",
+ "memory": "1024Mi"
+ },
+ "requests": {
+ "cpu": "250m",
+ "memory": "256Mi"
+ }
+ }
+ },
+ "resourceExclusions": "- apiGroups:\n - tekton.dev\n clusters:\n - '*'\n kinds:\n - TaskRun\n - PipelineRun \n",
+ "server": {
+ "resources": {
+ "limits": {
+ "cpu": "500m",
+ "memory": "256Mi"
+ },
+ "requests": {
+ "cpu": "125m",
+ "memory": "128Mi"
+ }
+ },
+ "route": {
+ "enabled": true
+ }
+ },
+ "sso": {
+ "dex": {
+ "openShiftOAuth": true,
+ "resources": {
+ "limits": {
+ "cpu": "500m",
+ "memory": "256Mi"
+ },
+ "requests": {
+ "cpu": "250m",
+ "memory": "128Mi"
+ }
+ }
+ },
+ "provider": "dex"
+ }
+ }
+ },
+ {
+ "apiVersion": "argoproj.io/v1alpha1",
+ "kind": "RolloutManager",
+ "metadata": {
+ "name": "argo-rollout"
+ },
+ "spec": null
+ },
+ {
+ "apiVersion": "pipelines.openshift.io/v1alpha1",
+ "kind": "GitopsService",
+ "metadata": {
+ "name": "gitopsservice-sample"
+ },
+ "spec": null
+ }
+ ]
+ capabilities: Deep Insights
+ olm.operatorNamespace: openshift-operators
+ containerImage: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:f4336d54225d883c96bac965317444a1a785574f3ba85a0b53c56db534cc86cf
+ operators.operatorframework.io/internal-objects: '["gitopsservices.pipelines.openshift.io"]'
+ description: >-
+ Enables teams to adopt GitOps principles for managing cluster
+ configurations and application delivery across hybrid multi-cluster
+ Kubernetes environments.
+ olm.operatorGroup: global-operators
+ name: openshift-gitops-operator.v1.9.0
+ namespace: openshift-gitops
+ labels:
+ olm.copiedFrom: openshift-operators
+ operatorframework.io/arch.amd64: supported
+ operatorframework.io/arch.arm64: supported
+ operatorframework.io/arch.ppc64le: supported
+ operatorframework.io/arch.s390x: supported
+ operatorframework.io/os.linux: supported
+spec:
+ customresourcedefinitions:
+ owned:
+ - kind: AnalysisRun
+ name: analysisruns.argoproj.io
+ version: v1alpha1
+ - kind: AnalysisTemplate
+ name: analysistemplates.argoproj.io
+ version: v1alpha1
+ - description: >-
+ An Application is a group of Kubernetes resources as defined by a
+ manifest.
+ displayName: Application
+ kind: Application
+ name: applications.argoproj.io
+ version: v1alpha1
+ - description: >-
+ ApplicationSet is the representation of an ApplicationSet controller
+ deployment.
+ kind: ApplicationSet
+ name: applicationsets.argoproj.io
+ version: v1alpha1
+ - description: An AppProject is a logical grouping of Argo CD Applications.
+ displayName: AppProject
+ kind: AppProject
+ name: appprojects.argoproj.io
+ version: v1alpha1
+ - description: Argo CD is the representation of an Argo CD deployment.
+ displayName: Argo CD
+ kind: ArgoCD
+ name: argocds.argoproj.io
+ resources:
+ - kind: ArgoCD
+ name: ''
+ version: v1alpha1
+ - kind: ConfigMap
+ name: ''
+ version: v1
+ - kind: CronJob
+ name: ''
+ version: v1
+ - kind: Deployment
+ name: ''
+ version: v1
+ - kind: Ingress
+ name: ''
+ version: v1
+ - kind: Job
+ name: ''
+ version: v1
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: v1
+ - kind: Pod
+ name: ''
+ version: v1
+ - kind: Prometheus
+ name: ''
+ version: v1
+ - kind: ReplicaSet
+ name: ''
+ version: v1
+ - kind: Route
+ name: ''
+ version: v1
+ - kind: Secret
+ name: ''
+ version: v1
+ - kind: Service
+ name: ''
+ version: v1
+ - kind: ServiceMonitor
+ name: ''
+ version: v1
+ - kind: StatefulSet
+ name: ''
+ version: v1
+ version: v1alpha1
+ - kind: ClusterAnalysisTemplate
+ name: clusteranalysistemplates.argoproj.io
+ version: v1alpha1
+ - kind: Experiment
+ name: experiments.argoproj.io
+ version: v1alpha1
+ - description: GitopsService is the Schema for the gitopsservices API
+ displayName: Gitops Service
+ kind: GitopsService
+ name: gitopsservices.pipelines.openshift.io
+ version: v1alpha1
+ - kind: RolloutManager
+ name: rolloutmanagers.argoproj.io
+ version: v1alpha1
+ - kind: Rollout
+ name: rollouts.argoproj.io
+ version: v1alpha1
+ relatedImages:
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:f4336d54225d883c96bac965317444a1a785574f3ba85a0b53c56db534cc86cf
+ name: >-
+ gitops-rhel8-operator-f4336d54225d883c96bac965317444a1a785574f3ba85a0b53c56db534cc86cf-annotation
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:f4336d54225d883c96bac965317444a1a785574f3ba85a0b53c56db534cc86cf
+ name: manager
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/dex-rhel8@sha256:44b5729b11c749e2e286fccc3021f1e9ba524c69fb9809b5d2121c4e5b05b40e
+ name: argocd_dex_image
+ - image: >-
+ registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40
+ name: argocd_keycloak_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:3c03f3f8da227567fab48587ca546b51734d2ef4a8aa7b94ba449060a369001b
+ name: backend_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:4e80c7810c4c99e89e35b33951ed8e1f4324899b5d47a8cd50cbb034f3e0c925
+ name: argocd_image
+ - image: >-
+ registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3
+ name: argocd_redis_image
+ - image: >-
+ registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2
+ name: argocd_redis_ha_proxy_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/console-plugin-rhel8@sha256:2925a527335159ca73115a831b56b713273372f8de18d08b745b8ce018491c71
+ name: gitops_console_plugin_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/kam-delivery-rhel8@sha256:b6397098b9d0e1f9206b51e50013c90165b7ebb9ea69d305e77ecbef0da29b13
+ name: kam_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/argo-rollouts-rhel8@sha256:d70aecb74cb46ce45e9ec02e9938da7c14316e7d142e78ee25b2d6b0ac1e506c
+ name: argo_rollouts_image
+ - image: >-
+ registry.redhat.io/openshift-gitops-1/must-gather-rhel8@sha256:4a5b9b97466b53e7775d887a0d920507cebbf892e7bc6a5334c784c55f9e3dd8
+ name: must_gather_image
+ cleanup:
+ enabled: false
+ apiservicedefinitions: {}
+ keywords:
+ - devtools
+ - gitops
+ - pipelines
+ displayName: Red Hat OpenShift GitOps
+ provider:
+ name: Red Hat Inc
+ maturity: GA
+ installModes:
+ - supported: false
+ type: OwnNamespace
+ - supported: false
+ type: SingleNamespace
+ - supported: false
+ type: MultiNamespace
+ - supported: true
+ type: AllNamespaces
+ version: 1.9.0
+ icon:
+ - base64data: >-
+ iVBORw0KGgoAAAANSUhEUgAAAXwAAAF8CAYAAADM5wDKAAAACXBIWXMAAG66AABuugHW3rEXAAAgAElEQVR4nO2dwW7d1rWGN504bpOmcjq5o+LE6LBAJc86qqQnkIz7AJIGnV3A8hNIegLJQOeSH6CQhE7uTBLQuaXOChSQhU7unTR2E6d1nJgXP03a5yqSRa69N7lJfh9AKHAOyU0e6efiv9daO8vz3AEAwPC5xXcMADAOEHwAgJGA4AMAjAQEHwBgJCD4AAAjAcEHABgJCD4AwEhA8AEARgKCDwAwEj7mi45DlmVfOue0zTnn7k79FPNDu14AAyflLs+dc6dTP5/lef6MGxoeWisEIMuyhVLQq2229xcF0D1n1QPAOXec5/kx34kfCH5DsixTlL4wtSHuAO2hh8BxteV5/px7Xx8EvwZZlilqXy43BB4gHfQAONCW5/kp38uHQfCvoRT51VLkJ0kOEgCmuSjFfw/xvxoEf4pyonW13BB5gP4i8d8rxZ8J4BIE/63QVyJP9gzA8DgphX9v7N/taAW/nHxdJ5oHGA1V1L8z1sne0Ql+adtUQj+TwJAAoF1eTAn/qOye0Qh+KfSbzrmVBIYDAGnwRLowFuEfvOBPWTcbCQwHANJkawxWz6AFP8uy9TKqx7oBgJt4UUb7O0O9U4MU/LLVwV6qk7EzMzNubm7uyv+3sLDQ+ngAYnF8fHU3hNPTU/fixYtU77smd1eH2MphUIJf2jcS+qWuxzI7O+u+/PLLQti13b17991PAHjL8+fPC/Gvfmp79uyZOzs7S+EOHZbCPxibZzCC36V9M5lMisi8EneidAB/9HZQPQT03xcXF13c1UHZPL0X/DKqP2i7aGppaakQ9uXl5SKSB4C4KPI/ODgoxP/w8LDtu63ireW+R/u9Fvwsy5ZLCyd6VC/fXeJebQDQLRL/amtpPuBFafEc9PWr76Xgl1G97JuHsc+lSF4Cv7q6GvtUAGBkb2+vEP6WIv/Hpc3Tu2i/d4JfFlAdxGxTrGh+fX29EHnsGoD+INtH4r+zsxM76j8rLZ5eFWz1SvBjWziafN3c3CSaBxgAEn79PUec7O2dxdMbwS+zcLZjHHt+fr6I6PHmAYaHrB5F/CcnJ7Gu7VFfsnh6IfhZlu3F6IGjiF5RAGmUAMNH2T16e48U8T/J8zx5a+BWAmO4Fk3OZll2HFrs5dHv7u4Wfh9iDzAO9Leuv3n97UsDArMirSoTSpIl2Qi/vHHHoSdnNzY2CvuGileA8aLKXtk8W1tboe+BJnMXUs3gSVLwY2TiyKfXF3xdDxsAGB+q4lUAGNjfTzaDJzlLp1w8/DSU2OvVbXt7u/DvEHsAmEaaIG2QRgS0eaRdp6WWJUVSEX55g45DpV2qgZlm6MmlB4CbkL+vTL2AjdtelPbOaSo3P5kIv7Rxgom9vHq9riH2AFAHaYU0Q9oRCGnZcaltSZBEhB9yglavZYrqyb4BACuyeRTtB6rWTWYit3PBDyn2snD0RZGBAwC+KJNHgWMgiycJ0U/B0gmSjbOyslK8jiH2ABACaYk0RdoSgNlS6zqlU8EvK2i9+9jLc1PFLABAaKQtgXz9+VLzOqMzSydUbxxVzdHsDABiI+FfW1sLcZbOeu90EuGXXS+9xL5qj4DYA0AbSGsCtWXYLjWwdVqP8MsUpVOf9EvdcAqpAKAL5OtrMtczg0c7z7VdjdtqhD+1/ixiDwC9pKrO9Yz0tfNB283W2rZ0Nn0ychB7AEiBQKI/W2pia7Qm+KVn5bUGLc3PACAVpEXSJE8etunnt+Lhl68tz3ysHCZoASBFAmTvyM//so2irLYifC/fXjmwiD0ApIi0yTNPf6atoqzoEb5vvr2q3CiqAoDUkfA/efLEZ5TR8/OjCr6vlaPeOEqBAgDoA/L1PXrvRLd2Yls6e1axrzJyAAD6gmfmzkypmdGIJvhZlqk/8ZJ1f7U4phEaAPQJaZa0y4OlUjujEDPCNz+pNAFCP3sA6CPSLs9J3GhRfhQP32eiFt8eAIaAp58fZQI3uOD7TNTK+2JZQgAYAlojV6Jv7LkTZQI3hqWzbp2o3dzcROwBYBBIy6RpRmZKLQ1K0Ai/7IR5btl3fn6erBwAGBzy9E9OTqyXdS9kR83QEb75cRagJwUAQHJ4alvQ5mrBBL+M7k2LP2pGm6ZoADBEpG0eWTsrpbYGIZilk2XZjqUbpiZqNblBzj0ADJXnz58Xnr5xAvdxnudB/PwgEX6ZmWPqbqbXHcQeAIaMNM7D2lkNtVBKkAg/yzL5TI3fWSaTSRHdAwCMAUX5FxcXlivdyvPc288P5eGbonu6YALAmPDQvCD94b0FP8syDWTSdD+lYdI+AQDGhDRP2mdgUmqtFyEifNMg1teD1xQAACSPh/Z5C76Xh28ttMK7B4Ax4+HlexVi+Ub4pieOR7kxAEDv8dBAryjfN8J/1tS/V969clIBAMaMUjUNefkXeZ6bC7HMEX6WZXOWyVq8ewAAsxZOSu014WPpmF4ttNAvAMDY8dBC844+gr/cdIelpSXaHwMAlBO30kQDjbW3wiT4Vjtnedk8TgCAwWHURLOtY5q0tbRSYLIWAODHGCdvTa0WrJZO48cS0T0AwI8xaqNpp8aCX3Ztm226H4IPAPBjjNo4a+mg2djSybJMo9tveqLQi6UDAAyFLMssV/Igz/ODJjtYLJ3GHc+MM9EAAKPAqJGNtbgVwacrJgDA9Rg1svFOFkunsTdzfn5O/j0AwDWomeS9e/ca3548zxt5QY0i/CzLGj9R1BkTsQcAuB5ppLSyKU01uamlg50DABABo1Y2KsBqKviNQ/W5OXOfHwCA0WDUyqiC33hECD4AwM20IfiNJm0tE7bk3wMA1MOSj99k4rZ2hF8uZ9iI2dnGBbkAAKPFoplNtLmJpdNY8MnOAQCoj1Ezowg+/j0AQERi+/hNBL9xox4EHwCgPkbNrK3NUSN89XkGAIComkmEDwDQN1KK8JuPgggfAKA2sTWzdh5+0xx8ljQEuBk1zdJWB2VwkPk2fCxLHtbNxf841t3DzgF4z/HxsTs9PS3EXT+1GdYxLVAwpb8vbXoA6Cc9q4aDvs+Tk5Mo1xNN8AHGjAS+2kL/8epBoWNePu78/Hwh/NUGcBkEHyAQBwcH7zZr9O5D9RDY2toq3gK0Vmq1AbjYk7YAQ0cWzfr6euG7PnjwwD158qQTsb+MxqCxaEwam8ZYd64AhguCD2BAVo1sE61S9Pjx4yRE/jo0No1RY9WYNXYYJ9EEn5RMGCJ7e3vFROni4mK0ibWYaMwauyYGdS0wLqIJPlk6MCSqiH5tbc1dXFz0/srOzs6KayHiT4+Y2omlA/AB5Htr0rOvEf1NVBG/rhGPPw1iuiMIPsA17OzsFNHW4eHh4G+RrlHXqmuG4YLgA1xCka7E79GjR0lPxoZG16pr1rUT7Q8TBB9gCk1kSvDkcY8VXTuTusMEwQdwruj7tLq6Wkxkjimqvw7dA90L3RN6Yg0HKm1h9EjQlK3SdlQ/mUzeNUS7qSla1WRNW5tZQireUt8fZfKQat1/EHwYNRIziX3sqF6tDqoeNyGanVXN2Kp+PTHHrwehHkg6D+nW/QbBh9ESW+wVwSvdUbZIaKGsHh5qmeDKa5Hnrj4+Md4AdI+qnH1Ev7/g4cMokTDGEvuVlRV3dHRU2C9VamdsqpRKnVPn1hhCU4m+7h30EwQfRocES03FQoq9LJuNjQ13fn5eRNpdtifWuTUGjUVj0thCoXume4fo9xMEH0aFrA9ZLCGRqCqy3tzcTGpFKo1FY9LYNMaQ6B7qXkK/QPBhNIT27JeWloooWqKacgaLxqYxaqwacwgqewfR7xcIPoyCKvUyhNjLItnf3y9sjT6tMauxaswaewibpxJ98vT7A4IPgyek2CtCrhqq9ZWqUVqIaB/R7xcIPgwepS6GKKra3t4uIuQhFCDpGnQtuiZfdG+r9FBIGwQfBo2yVVQt6oPsj6dPnw5S1HRNujZfi0f3mN476YPgw2Cp1pv1YXZ2dvDFRro2XaOu1QfWzU0fBB8Gi7xqH99+DGJfEUL0da9Dp7xCWBB8GCSqOvXx7SuxH1PDMF2rr+hrBS0WUUkXBB8GR1UEZWWMYl8RQvSrYi9IDwQfBoe8ZKuVo8lLTT6OuRWwrl33wDqRq3tP1k6aIPgwKBSd+qxBSzfIt1SevhV9Bz77QxxojwyDwsfKUU56V2L/w+mpy68pXsru3nUfdTAu3QvdE61za0HfBaKfFgg+DAbZEJo0tKCq07ZsCAn798fH7vXxcfHzh5qTyx/NzrqPFxbc7YWF4mfWgu2ke2J9a9J3oe+EzJ10wNKBwWCN7ivfPjYS95daI/aLL9w3Dx64V48f1xZ7oc9qH+2rY+hY37cQQfv4+WTspAWCD4NAUah1pafYk7QS5a8XFtzXi4vuO8+q32l0LB1Tx44p/NUkrgWlxmLrpAOCD4PAGt3LyonVCO3Ns2fvhP57o9VUBx27Ev43kdIhdY+szdZ85lUgLAg+9B7lfFu9+1iWw3d7e+6fc3NRhf4yOpfO+V0ke8p6r/TdkJefBgg+9B6rEGkVqBj97OWtv1xbc3mkxdE/hM6pc7+MMFGqe2VdOQsvPw0QfOg9Fn9Zk5Chs3KUfVNE2AF9eisag8ZyXaqnFd0zywQunTTTAMGHXqOe7paqWglXyIlaCas89CZZN7HRWDSmkKKve2Z5UOo7YuHz7kHwoddYRSR0bnhqYl9RiX5IrPcOwe8eBB96jUVEVlZWgnr38stTFPsKjS2kp697p3vYFAS/exB86C3K77bYOSGje2XEpODZ34TGGDJ7x3IP9V2Rk98tCD70Fot4TCaTYtHtECjn/dsedYXUWEPl6ese6l42BcHvFnrpQG+xiEfIIivZJKFSL29NJkV/nFuXrCYJtKpo3xiriKcpUjZXV93ngURX9/Lx48eN9kHwuwXBh95iKbYKZedIhEMUVX2ysuJ+sr5+YzdMddP8986Ot32kMWvsHwd4y9G9bCr41gI5CAOWDvQSS6So/PFQ7Y//5dku4PbSkps5P3ef7e3Van2sz+iz2ue2scVBhe/YK3QvLTn5RPndgeBDLzk9PW087FDevW90/+n2tvvZwcGP7Js6aB/tq2OYx19G+SGw3FPLdwdhQPChl1h6s4QS/Fce2S6f7e66OwEmenUMHcuKzzVMY7mn9NXpDgQfeoklSgxh56hq1eqjKyr/JGBKqI5ljfRfHxwEqcC13FMi/O5A8KGXdGXpWK0Q+e4hIvvL6JgWT18ZOyFsHSydfoHgQy9pWnBlyRm/itdGkfw0YrdI67Gt13KZpvfWUiwHYUDwoXdYPOBQrRQsUbFSLy0TtHXRsT8xtDoINXFrubf4+N2A4EPv6FLwLT1zftJCNa7lHKH6/yD4/QHBh1EQQvB/MHjPqqCtk2fvi85xy2BbWa7pMjEWkYE4IPgANbFktYSoaI15rtALpEDaIPgAEYnp3V+mzXNBP0HwASJyK+CqWgC+IPgwCkJV2TblzQgsk5BLRUJcEHwA8OI58wC9AcEHiEioBUfq0Oa5oJ/QD39kKP+5bg600u1IuXtPZrAuQhU3xTqX5ZqgvyD4A0U9x9WzROKun9qsJe1VH3ltegDoZ1eeeJdY8um1UpVy3WPn4uscllWxQoyLIqr+gOAPBAl8tYVeVUgPCh3z8nHn5+cL8ddSd2N5AHw0O9u4QlUrVX0WcAHx687RFF1LCLqsfIZmIPg95uDg4N3WRUOq6iGgZe70FiDhr7ahouKmpoKvdso/3dyMlicv797SsjlUURiC3x+YtO0Z+uNaX18vUuEePHjgnjx5kkT3QY1BY9GYNDatdzrENri3jSL5bcR+OtZjW6/lMhcNrSTLsogQBgS/J8iqkW1y7969IqJOucVsJf73798vxqw3kKFgjYpfHx66VxFaJOuYOraFEBG+ZX3aUOsKQ3MQ/MTZ29srXn8XFxd7ueK/xqyoX9ewF9nHbgNltVhaEYtvHz1y3wW8BzqWjmlB19BVhg6C3x0IfqJUEf3a2lrjV+YU0TXoWiT8lqgwJe54LFP4cm0tSKSvY+hYVnyuwRf8++5A8BNDHr0mPfsa0d+EhF/XpodZX9P5ZIV8PD9v3l9R+TfLy6ZCKe2jfa2RvdDYQ03YWtoqEOF3B4KfEDs7O8Ufw6HRk+0TepjpWjc3N3s5/p96jlu++4t799zL1dVaPen1GX1W+1g9+wrfsU+j77DJEof67BhrOFKBtMwEqKL6s0ArEPUFTe5ubW0Vk7ry9/sU+VVR/veeb2FKp9SmxUt0TKVuqsNm1XRNEb0qaC1FVVeR/cd/BO/Rr0BF8zR16OsDfigg+B0joVOa5ZgXdtaDTlGfxGC9heUAQ6Fiqn/Ozbk8wHcnQbfk0jcl/9//Ld4UQhaCKVhZWVkpMrM+hD6z2uHcAWDpdIY6DOqXXxOZrOL/Ntp/9OhRrwRB0finEVItY6MHy8vA91mBy/7+/pX2jv5td3d3EFlafYcIvwMk9opo27Zw9IdXNUS7KVOiarKmrc0sIUWJujd9Ef5PVlfd6+PjVqLzkFTjDR3pa6t6OGlCVxuTtOmA4LeM/hgkaLGjelUz6jzaQjQ7q5qxVf16Yo5f1k6fIn2JpiZVm7Zc6JoYou/KiVxEPk0Q/BaJLfaK4BVhSSxD/8FVD4/KY9e16BVdE66h3wB0vKoOoS98fnzsvjb02emaWKIPaYKH3xISxlhir8mwo6Oj4jW6Su2Mjc6hc+mcOveKsfr0Ovq2ipKqViX6oTpQtkkMTx/SBMFvAYm90tZCir0sm42NDXd+fl5E2l1Gwzq3xvDVV18VYxprcyyJ/s9PT82tF7oE0R8HCH5kZH2E9qMlqoqs5XWnVKauCTqNSWPzFf4+L4wte+Sz3V2X9ezBh+gPHwQ/IqE9+6WlpSKil6imLIiV8Ov6NeamDKEaU9k7ivZ9WjA0RUVVviD6wwbBj0SVehlC7BUpK8dZ1lCfGk9prBqzPP4m5fdDqcZUnr58/c+PjqIKv46tc9z9n/8JYif5iL6ylVQZfNVWp4UExIUsnQiEFHtFyPLH+2xx6F5U1tZNfYKGWI2pVgaflwL4am8vSM6+7KLby8tF18vpVglVto3vOepk7+TPnxfX9Lq8troZSprY1pi1AIt+spB6i+R5XmvTR5tsGxsb+VhZWVlpdK+u27a3twd3B/f39/PJZPKja9a/7e7u1jrG0dFR43upfVLhzVdf5d/t7+ff/Od/5v9wrtGmfbSvjvEhvllZaXzsqzYd5zKvj46CHb86x+uEvp+ukXY2/f2uq+NE+IFRNH5TT5GbkIWjPPQhFq9U1Zi6vir1Um8vY+qgqIhW0bl+fvfHPzba985//Vet5mcxIn1F8f/a3PRuGHfVObTJmlInz9DN3eA9CH5AqvVmfZidne1d50gLtMiNT0jRf/3f/100XouJHiRfLy4Wwq+xx1r0fcwwaRsQRa4+vr3EfqiRPXSDhDPERG5ssZ9Gwq8upCGXg4S3IPiBUNWpTzO0Suz7PDkLaRJK9NtELae1hCMpomFB8ANQFUFZQewhNn0UfVfaScWaAz1rtZEqCH4AfBYw0QRt39MuoR/0VfSV7qnGdIi+Pwi+J4rMfdagxbOHNum76IMfZOl44mPlbG9vI/aJogdxxdAW8QiVvdM2Ev3QyzOODSJ8D2TFnBhzklVB26f1W8eCvlO1hFhcXHy33b9/v/i3IS3R12dPn+wdOwi+B9bovvLtIS2qNYavWtBF/6b/p9TbvvXqv46+iv636+vuzbNnCYykf2DpGNErv3WlJyZp00NN3upUSGu+por2Jf59R6Ifsqjq1mRSVMpeLpqSQKtS902A1dGKlM3V1aI/ETQDwTdije5l5QxBKIZGE3tNGVla0Ebfpeov+tTB9DIS4RBirzeFn6yvu49umOtQx8x/7+x4zx+oOEtjpw1DM7B0DCjv3urdSyAgLaxva4r2NZmrh39fbZ5/ebaivr205GbOz4s3hZvEXugz+qz2uW1YK2Ea37GPEQTfgFW0tQpUn6NB+DGK9re2torvtW/CX/Ss92iE9un2tvvZwYGp54320b46hpUqyocG95171RzLhKsmasnKGS6V8H/xxRe9WcDllUfigJZwvBPg91nH0LGs+FzDGEHwG6LJPUtVrcSeido0Cf29SPhTX8RFVatWH11R+ScBr0/Hskb6rw8OqMBtAILfEAm+haGt4jQk5MM3WYKxDsr4STnSt1oh8t1DRPaX0TEtnr4ydrB16oPgN8Qi+Fq2D+8+bWKIc8oT9K+NIvlpxGuyHtt6LWMEwW+Asjksdg7RffroO1oJXISk35XjRMXIEhUr9TLmoiQ6tqUQjAi/Pgh+Ayx/vLIKWN2pH2gyXv2NNMEeilSzduouOD7NT1pIOrCcw3ItYwXBb4BF8Cmy6heaXNf3rDUKQpDiRL2Kn5qiCto6efa+6By3DPMplmsaIwh+AyzFVtg5/UOTuKenp0XdhG+0n6LgW7Ja2qxotZyLTJ16IPg1sUT3EgvaH/cXTeRK+Ofn503XIDtvKN9/mwuKs3h5PBD8mpwaXhnx7vuPsqv0sD86Omps8wypI+otakgGAYJfk2eGdqwI/nDQd6mH/u7ubq2cfX1uSN//GyyTQYDg18QS4WPnDA/Nyejhv7+/X3TLvOzxy/7R2wBzN5AitEeuCZYOTKPsqyoDSw8AbXrAD7V9RpsLjrC4STwQ/Jo0LbgKXaoP6SKfv0+V1JnhodRmcZPlXJZrGiNYOjWw+Pe0UoBUseTTa6WqNnLddQ7Lqlht1AgMAQS/Bgg+DI2PDIVl/26hN5DlHJZrGSsIfiQQfEgZS3GT2inH9Nd1bEvLZpY5rA+CDzBCbhtF8tuI/XSsx7ZeyxhB8AFGiDUqfn146F5FsHZ0TB3bAhF+fRB8gBGirBZLK2Lx7aNH7ruAVcQ6lo5pQddAhk59EPxIsJxhPFLtMd837ngUh71cWwsS6esYOpYVn2sYIwh+JFLtgw5QISvkY2NjOFdG+t8sL5smcrWP9rVG9kJjx85pBoIPMGJ+6rm0o3z3F/fuuZerq7Xy9PUZfVb7WD37Ct+xjxEqbQFGTBXlf29Y62EapVNq0+IlOqZaHKvDZtV0TRG9KmgtRVVXQXRvA8GPhKVYC+rBvQ3LZ3t77p9zcy43rNd8GQm6JZe+CdnMTDFmaA6WTg0sRVSIUjyofA6LovFPW6iiDYXGyiIpNhD8GiD4aYHgh+eT1VVzmmar41xZKcYKNhD8mjRd2/QikFcJ/vfWd13asSCbJOW+NBobVo4fCH5NLIuZkC8eHss9ZSGa+nx+fJyk6GtMn/P35A2CXxOLaFgWTYHw9xQ7pz6qWk1N9Cuxp6LWHwS/JhbRIMIPDxF+fCSsPz89TcLT1xg0FsQ+DAh+TbB00gDBbw/55Z/t7hZpkG1TpF7u7uLZBwbBr4llfVoti4itEw7dy6ZLTTrWFvZCGTGKsH1aMDRF5yreMMjGCQ6C34B5wy/9HhFKMCz3cpbVkLxRzrs89M+PjqIKv46tc+hc5NnHAcFvgCVSPDg46HDEw8JyL1OM7vPnz93rgwP36g9/aLyv9tG+eQfN+dTKoBL+UP6+rBsdqxJ62iXEBcFvgEU8lDOOl++P7BxLbUNKgq9eMmoc9vyLL9w3Dx647/74x8bH0D7aV8fQsb7v4HdLoixv/e5XX7mf7e+7Ow8fNsrq0We1j/adefasOBZC3w700mmAxENFPE19ZFkR+Mh+7BhK//VdLS8vdz52ifK/Nje9G5RdpmpYJitEnSPbFk1lztxeXi62CnXDvO7tQ5//iAn0TkHwGyIBedKwOZQ+v7m5ST64Ea0t0PSeuwSie3WILKLwwEJ/GR3/68XFQvgVLXfpfyPoaYOl0xBrxMjkrR1LdO88vqsQfFd2oIwt9tPoXDpnyOUHYVgg+A2RiFh6s0i0WAWrObpnVjtntaO0PkX1WrYvRLvhpuicOvdLUhrhChB8AxYhke9vjVTHjO6ZJfe+i+he3nURYUfuB18HjaHocU+QAVMg+AbW19dN+21tbdE2uQHW6N55fEdWJKxfLyy4H87OWj3vh9BYNCZEHyoQfAOafLUUYbkWhUgPFqWDKnddP/toJ+leWaJ7FVu13U4hNbGvqEQfwCH4djaNCygfHh5GLcZSvrqyU+7du+cWFxfdgwcPip9ffPFFIaB9EX49pCyZOa6D6L5YwDtBsa/Q2PD0wSH4diSq1rJ9zQHEEF6JpMZ1ck1myOPHj4v/n7roa3zWCdfJZNLqZK0yYlLw7G+iyNkne2f0IPgeWCNJ2RShRUkWjiYqb7JAzs7Okhd93VfrimHWNy8LyrP/tuW3CR801jfMIY0aCq88kGgrv/66iPpDyNrRhGQo+6FJNksl+nojuJtYn3HdT6uV03Z0L5skVOrlrcmkqJS9XDQlgVal7psAS2YWKZurq6wcNWbyPK+16aNNto2NjXwMHB0dNbovl7enT58GuUuTyaTxuWdnZ/OvvvoqmW9J92JmZsZ8L/f391sb6+ujo/wfznlv36ys5N/X+B3QZ/TZEOfU2CFdpJ1Nf/fr6jiWjieKlJeWlswH0f4heuZbLJCU7B2NQWOxZOW4snV1m7n3//K0jm4vLbmZ8/O3C4fXyCjSZ/RZ7XPb4/fNBRg79BcEPwCyUyzVt27Kz+9KdFMQfV+xdx7tFyzIYvFpmfDp9rb72cGBqeeN9tG+OoZ5/CcnnXTZhO5B8AOgvHyfycIQoiv/usvzW6nE/swjrXFjY6PVvPtXHtkuWrbvToB5Gx1Dx7Licw3QXxD8QGjy1VqM5QKIrq+d0YXohxB7pca2mZmjqlVrGqai8pDL9ulY1ki/q0VUoFsQ/IAow8Rq7bgp0bV4+nrg+JzbtSz6SiP1FXvXQRdSqxUi3z1EZH8ZHdPi6StjB1tnfCD4AZG14+slW0Vf51aaZR9EX9cmC8ZX7Le3t1tvoUYHA4kAAA6wSURBVPDaKJKfRpxjsB7bei3QXxD8wGgCdsVzvU9NXt6/f7/xw0Pil7roKyLXtflM0AplRrXdQsEZI3yt2RpzURId27LGLBH++EDwIyChtrZdmObRo0eFN99EeFMWfT0M19bWvI+je9vVgjKWnjk/aeHBZDlHyv1/IA4IfgRUvRpCdF1ZkSu7pknDtRRFX+O3VtBOo2uS2HdRIfyDYW5FFbRtLPunc9wyZGpZrgn6C4IfiZCiL/tDXS8V7dftp5+a6IewX3Qtuqa2ffsKS1ZLmwuLW85Fps64QPAjEkp0KxTtq+2x0hDrCHAqoq9JWmsztGlklXUl9lbaXFC8y8XLoR8g+JGRQIX2m7VyVlXsdVPEn4Loh7CEdnd3O1uj1odbiTWng3GD4LeArJj9/f1gkb4rbR4JvyJ+CeHxBzIu+pC98yH6KvbiDZYJJASC3xIS/ZD2zjSaDNWqVor65ZVflcPfpehbJ1g1Vj0o+yr2AKmB4LdIaE//MvLJtaqV8twlsnrIyPeuov+uRF/nbdrrp5qgbbMDZgzaXHCExU3gJhD8lpH4yXcPkaf/IWT5aJJXufyK/rMse/cG8Lvf/c7duXPH6/gS/d/85je1Rb9JEZnuTVWNmxKZ4U2lzeImy7ks1wT9BcHvgCpl07cityl6A9DqXH/605/cq1evvI/397//3f3qV7+qJfqK1Otcrz6je/Nlghknlnx6rVTVRq67zmFZFauNGgFIBwS/IyT6yt7RhGQsi6cN/vGPf7jf/va3tc5UXe9V9o7+Tf+vq6KqunxkeDP7dwu9+i3nsFwL9BsEv2M0ISn7wqe1ctf89a9//WCW0DS6XllaT58+dUdHR+82/VsfJmctxU1qpxzTX9exLS2b2ywKgzRA8BOg6nSp7o99jfb/8Ic/NPq8/HlN/FZbX7htHOu3EfvpWI9tvRboLwh+QlQplT5r5HbF3/72t2F9GddgjYpfHx66VxGsHR1Tx7ZAhD8+EPzEqBqlyebos80zVJTVYmlFLL599Mh9F7DqWsfSMS3oGsjQGR8IfqLI5pDNo4nM2CmcIfj1r3/dzxtt4I7HXMPLtbUgkb6O8dKj1bTPNUB/QfATp5rUTT3i//3vf5/AKNpBVsjHHt+FovJvlpdNE7naR/taI3uhsWPnjBMEvydUEf/5+bl7+PBhUpO7iu77NPEagp96Lpwu3/3FvXvu5epqrTx9fUaf1T5Wz77Cd+zQXz7mu+sX1bq52uT1V5vvkoFWfvGLX7g///nPo/oO3FSU//3JiddxlE6pTYuX6JhqcawOm1XTNUX0qqC1FFVdOW6i+1GD4PcYVa9WvWYU/VfbiacI1eWXv/yl+8tf/pJ0oVRMPtvbc/+cm3N5gIetBN2SS9+EbGamGDOMFwR/IFzOZ5fwy/tXQZN+agv5FqCJZJ1jrGLvygVHPvWcPG0TjZVFUsYNgj9QripoUtuCUIuIj13sKz5ZXXWvj4+jR+e+KA3zEzJzRg+TtiMBsY+HbJKU+9JobFg54BD8cYDYx+fz4+MkRV9j+rzFFs2QNgj+wEHs20FVq6mJfiX2VNRCBYI/YBD7dpGw/vz01Nx6ISQag8aC2MM0CP5AQey7Q375Z7u7RRpk2xSpl7u7ePZwJQj+AEHsu0cZMYqwfVowNEXnKt4wyMaBa0DwBwZinw7KeZeH/vnRUVTh17F1Dp2LPHv4EAj+gFCLBcQ+PdTKoBL+UP6+rBsdqxJ62iVAHSi8GgihlghE7ONR9N9ZWCgqXtUfRwVb+vnD2VmtcyrrRvvfLo/DhCw0BcEfCGqm5ts6AbFvBwn17eXlYqtQN8y8bJh2GX3+o7m54d4QaA0EfyDIzvEBse8WBB3aAA9/IFx4tM9F7AHGAYI/chB7gPEQTfBPa6ziA+GYTCaNj4XYA6THccTeR9EE//k1E1AQh+WpCcA6IPYA4wNLZyCsr6/XXucWsQcYJwj+QNBatxLxm0QfsQcYLwj+gJibmyvEfP6aMv6HDx8i9gAjhjz8gVGJvipvtVXo3xF6gHGT5Xle6wZkWVbvgyWyFpi4BQBohgKzplXzeZ5ndT7XxNI5aTIA3zJ/AIAxYtDO2toc1cMnwgcAqE9szWwi+I1HQvEVAEB9jJpZW5ubCH7jkRDhAwDUx6iZtbWZCB8AIBF6HeEj+AAA9TFqZpQI/1mNz/z/HZ413gUAYLQYNbP2TrXz8J0hF9+9zQ9tugsAwCjJslrp9Jc1tvZOTdMy6y2+OUXMVp8AAEPBqJWNNLmp4OPjAwBEILZ/7xB8AIA0GITgY+kAANyMUSsbaXKjSVtnnLg9Pz8v+rUDAMCPUXbOvXv3Gt+ZJhO2zthLp/HE7cHBgeE0AADjwKiRjbXYIviN3zuwdQAArseokY13slg6Wi17v+mJyMcHALgaS/69c+5BnueNXg1aifAdtg4AwJV4aGNjLW4s+HmeP8fHBwAIg9W/L7W4EdYFUBqPEMEHAPgxRm007dSa4GvZrr29PePpAACGhzTRuBxse4Kf57mS/S+a7keUDwDwHqMmXpQa3BifNW0bj/Tw8JCWyQAAZbGVNNGAOXL2EXyTP4OtAwDgpYXmHRvn4f+/nbNM4fqkyT4zMzOsdQsAo+fu3bsW/152jrlPjU+E7yxPGiZvAWDseEzWeomnb4SvJ8150/0mkwlePgCMFjWTvLhonPci7uV5bhZPrwi/PPFJ0/10oWTsAMAYkfYZxf7ER+xdAEvHWV8xdnZ2ApwaAKBfeGiftxfuZem8O4hh8lYcHR25hYUF7/MDAPQBdcVcXFy0jNRrsrYiRITvrE+e1dXVQKcHAEgfD80LkukSSvD1jtI8v+jigowdABgF0jqjd/+i1Fhvglg67q2towE9bLqf8vKVsaOcVACAIaLaI2XmGFMxH+d5vh7itoSK8J31CaQbwAQuAAwZaZxR7F2o6N6FjPDd2yhf/syKZd+nT5+6ubm5YGMBAEiB09NTd//+fetInuR5HmyyM7TgmwqxxPz8PGvfAsDgUCbiyUnjcqUKr0Kry4S0dKpCrC3LvrohWDsAMCSkaR5ivxVS7F3oCN+9jfI1+6pBzjTdVxO4ev3R5AYAQJ9RMopsaqN3r52+tCxj+CGCRvju/Zq3m5Z9dWOWl5dDDwkAoHWkZR4TtZuhxd7FEHz3VvR3LCtiibOzM7e5aXpeAAAkgTRMWmbkotTQ4AS3dCqyLFPPhCPr/rRdAIA+4tE+oWIxz/MoGSzRBN+9FX21xFyy7EtBFgD0Dc8CK3GY53k0XzuKpTPFqqXlgiv9fCJ8AOgT0iwPsX9RamY0ogq+zwSuK/18GqwBQB+QVnn49i7WRO00sSP8agLXnIj65MkTJnEBIGmkUdIqD05iTdROE9XDr/DJza/Y3d0l2geA5FAXzLW1NZ9hRcm5v4roEb57b+14qbVuKK2UASAlAoi9WG1D7F1bEf67kxlbKFcoc0cpTzRZA4CuUVcAz0laF7L1cR3aFnxZO8ovnbUeA9EHgK4JJPaa4V1oK7p3bQu+e99R89THz0f0AaArAom9dp4L3RztJlrx8KcpL9DLz69y9PH0AaBNpDkBxN6Vvn2rYu+6iPDfnTjL5Ftt+x6H7B0AaINAE7TiURspmFfReoRfUV6wV+KqK7N3yNMHgJhIYwKJ/ZOuxN51GeG/G0CWaRJ33vc4KysrWDwAEBw5CJ5FVRUqruq0X0wKgu+duVMxOztbTObScA0AfFEjNPn1nu0SKlrPyLmKziydivIGLJQ3xAt9MepUx9q4AOCDNERaMiSxdykIvnsv+svWzprTaPZcvajx9QHAgrRDGhIgE8eVmpaE2LsULJ1psiybK+0dc47+NLJ4Dg4OWCMXAG5E629oWcJAUb2bEvvTVO5+EhF+RXljFkJE+q60eFScpZXjAQCuQxohrRiy2LvUIvyKshr3IMREbsX8/Py7LxUAwJVVs+vr6+7kxNzB/Sr01FjuorDqJpIUfBc4e2eajY2N4gsmkwdgvCgDRwHg1tZW6HuQzATtVSQr+O696B+EyNOfRr149GVToQswPlSvo6Av0KTsNCdlZJ+k2LvUBb8iyzJVVK2EPu5kMnnXGwMAho1SLRXkXVxcxLhOVdAmH0EmNWl7HeWNfBT6uPrilX4lwVc2DwAMD/1t629cf+uRxP5RH8Te9SXCr8iyTLn6e6HSNi+jiF85uFg9AP1Hb+/6e44k8q7MxFHXy95Ei70SfBcpg+cy8vjl8Un4yeEH6A/KpZfQa44ugkc/TbKZOB+id4Lv3k/mbvosl1iXpaWlohiDqB8gXSTysm4ODw/bGONj6U/Kk7PX0UvBr4ht8UyjqF/CX20A0C0S+GqLHM1X9M7CuUyvBd9FTN28CUX+mgiS+GP7AMRHdo3EXdk2LUXy0ySfclmH3gt+RbmC1mYb0f5lNNkr8VcVrzbSPAH8kbCrElab/jvi5OuHeFHaN4PozzIYwXfvo31ZPEtdj0WN2xT5Vw8BVfZWPwHgLap4laBXP7Upkg/Y08aHw9LC6XVUP82gBL8iy7KFUvgnaYzox6i3z1XwdgBD4rq1KSTsLfnuFi5KoR/cwhqDFPyKLm0eAOgdg7JvrmLQgu/e2zwS/o0EhgMAaaIuajtDsm+uYvCCX1EWbG3G6MkDAL3lSRnV96qAyspoBL+iFH5F/KtYPQCj5EU5x7czFqGvGJ3gV0xZPaspT+4CQDAupoR+0NbNdYxW8KfJsmy1FP5Wi7cAoBVUNLWX5/ne2G83gj9FafesEvUD9J4qmt8bm23zIRD8a8iybK4U/mXEH6AXXJRtVvZSWzw8FRD8GpTiv1xu0doyA0BjzkqRP0DkbwbBb0g52bswtfEAAGgPCfxxtY118tUKgh+AspXD3NTGQwDAH4n7abUNsdVB2yD4kSgngL8sHwJ3p346soEACk7Kn89LUa9+PmOiNQ4IPgDASLjFFw0AMA4QfACAkYDgAwCMBAQfAGAkIPgAACMBwQcAGAkIPgDASEDwAQBGAoIPADAGnHP/B0PvDiJGlrI0AAAAAElFTkSuQmCC
+ mediatype: image/png
+ links:
+ - name: Release Notes
+ url: >-
+ https://docs.openshift.com/container-platform/4.10/cicd/gitops/gitops-release-notes.html
+ - name: Day 1 Operations
+ url: 'https://github.com/redhat-developer/kam/tree/master/docs/journey/day1'
+ - name: Day 2 Operations
+ url: 'https://github.com/redhat-developer/kam/tree/master/docs/journey/day2'
+ install:
+ spec:
+ clusterPermissions:
+ - rules:
+ - apiGroups:
+ - ''
+ resources:
+ - configmaps
+ - endpoints
+ - events
+ - namespaces
+ - pods
+ - secrets
+ - serviceaccounts
+ - services
+ - services/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - configmaps
+ - endpoints
+ - events
+ - persistentvolumeclaims
+ - pods
+ - secrets
+ - serviceaccounts
+ - services
+ - services/finalizers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - deployments
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - namespaces
+ - resourcequotas
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - ''
+ resources:
+ - pods/eviction
+ verbs:
+ - create
+ - apiGroups:
+ - ''
+ resources:
+ - pods/log
+ verbs:
+ - get
+ - apiGroups:
+ - ''
+ resources:
+ - podtemplates
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - appmesh.k8s.aws
+ resources:
+ - virtualnodes
+ - virtualrouters
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - appmesh.k8s.aws
+ resources:
+ - virtualservices
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ - deployments
+ - replicasets
+ - statefulsets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - podtemplates
+ - replicasets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - apps
+ resourceNames:
+ - gitops-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - apps.openshift.io
+ resources:
+ - '*'
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - analysisruns
+ - analysisruns/finalizers
+ - experiments
+ - experiments/finalizers
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - analysistemplates
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - applications
+ - appprojects
+ - argocds
+ - argocds/finalizers
+ - argocds/status
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - clusteranalysistemplates
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - rolloutmanagers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - rolloutmanagers/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - rolloutmanagers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - argoproj.io
+ resources:
+ - rollouts
+ - rollouts/finalizers
+ - rollouts/scale
+ - rollouts/status
+ verbs:
+ - create
+ - delete
+ - deletecollection
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - batch
+ resources:
+ - cronjobs
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - config.openshift.io
+ resources:
+ - clusterversions
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - console.openshift.io
+ resources:
+ - consoleclidownloads
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - console.openshift.io
+ resources:
+ - consolelinks
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - console.openshift.io
+ resources:
+ - consoleplugins
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+ - apiGroups:
+ - elbv2.k8s.aws
+ resources:
+ - targetgroupbindings
+ verbs:
+ - get
+ - list
+ - apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+ - apiGroups:
+ - getambassador.io
+ resources:
+ - ambassadormappings
+ - mappings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - prometheuses
+ - prometheusrules
+ - servicemonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - networking.istio.io
+ resources:
+ - destinationrules
+ - virtualservices
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - ingresses
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - oauth.openshift.io
+ resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - operators.coreos.com
+ resources:
+ - clusterserviceversions
+ - operatorgroups
+ - subscriptions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - pipelines.openshift.io
+ resources:
+ - '*'
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - pipelines.openshift.io
+ resources:
+ - gitopsservices
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - pipelines.openshift.io
+ resources:
+ - gitopsservices/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - pipelines.openshift.io
+ resources:
+ - gitopsservices/status
+ verbs:
+ - get
+ - patch
+ - update
+ - apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - '*'
+ verbs:
+ - bind
+ - create
+ - delete
+ - deletecollection
+ - escalate
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - clusterrolebindings
+ - clusterroles
+ verbs:
+ - bind
+ - create
+ - delete
+ - deletecollection
+ - escalate
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ - roles
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - route.openshift.io
+ resources:
+ - '*'
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ - routes/custom-host
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - split.smi-spec.io
+ resources:
+ - trafficsplits
+ verbs:
+ - create
+ - get
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - template.openshift.io
+ resources:
+ - templateconfigs
+ - templateinstances
+ - templates
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - traefik.containo.us
+ resources:
+ - traefikservices
+ verbs:
+ - get
+ - update
+ - watch
+ - apiGroups:
+ - x.getambassador.io
+ resources:
+ - ambassadormappings
+ - mappings
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+ - apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+ serviceAccountName: gitops-operator-controller-manager
+ deployments:
+ - name: gitops-operator-controller-manager
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ strategy: {}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ control-plane: controller-manager
+ spec:
+ containers:
+ - command:
+ - /usr/local/bin/manager
+ env:
+ - name: ARGOCD_CLUSTER_CONFIG_NAMESPACES
+ value: openshift-gitops
+ - name: OPERATOR_NAME
+ value: gitops-operator
+ - name: RELATED_IMAGE_ARGOCD_DEX_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/dex-rhel8@sha256:44b5729b11c749e2e286fccc3021f1e9ba524c69fb9809b5d2121c4e5b05b40e
+ - name: ARGOCD_DEX_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/dex-rhel8@sha256:44b5729b11c749e2e286fccc3021f1e9ba524c69fb9809b5d2121c4e5b05b40e
+ - name: RELATED_IMAGE_ARGOCD_KEYCLOAK_IMAGE
+ value: >-
+ registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40
+ - name: ARGOCD_KEYCLOAK_IMAGE
+ value: >-
+ registry.redhat.io/rh-sso-7/sso75-openshift-rhel8@sha256:d5829e880db4b82a50a4962d61ea148522a93644174931b256d7ad866eadcf40
+ - name: RELATED_IMAGE_BACKEND_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:3c03f3f8da227567fab48587ca546b51734d2ef4a8aa7b94ba449060a369001b
+ - name: BACKEND_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8@sha256:3c03f3f8da227567fab48587ca546b51734d2ef4a8aa7b94ba449060a369001b
+ - name: RELATED_IMAGE_ARGOCD_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:4e80c7810c4c99e89e35b33951ed8e1f4324899b5d47a8cd50cbb034f3e0c925
+ - name: ARGOCD_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:4e80c7810c4c99e89e35b33951ed8e1f4324899b5d47a8cd50cbb034f3e0c925
+ - name: ARGOCD_REPOSERVER_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/argocd-rhel8@sha256:4e80c7810c4c99e89e35b33951ed8e1f4324899b5d47a8cd50cbb034f3e0c925
+ - name: RELATED_IMAGE_ARGOCD_REDIS_IMAGE
+ value: >-
+ registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3
+ - name: ARGOCD_REDIS_IMAGE
+ value: >-
+ registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3
+ - name: ARGOCD_REDIS_HA_IMAGE
+ value: >-
+ registry.redhat.io/rhel8/redis-6@sha256:53598a6effeb90e4f1b005b2521beffd2fa2b0c52d0e7f2347ee2abd2577cab3
+ - name: RELATED_IMAGE_ARGOCD_REDIS_HA_PROXY_IMAGE
+ value: >-
+ registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2
+ - name: ARGOCD_REDIS_HA_PROXY_IMAGE
+ value: >-
+ registry.redhat.io/openshift4/ose-haproxy-router@sha256:edf7ce748b703e195220b7bd7b42fa2caa4cdfd96840445e096036a0d85f1ff2
+ - name: RELATED_IMAGE_GITOPS_CONSOLE_PLUGIN_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/console-plugin-rhel8@sha256:2925a527335159ca73115a831b56b713273372f8de18d08b745b8ce018491c71
+ - name: GITOPS_CONSOLE_PLUGIN_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/console-plugin-rhel8@sha256:2925a527335159ca73115a831b56b713273372f8de18d08b745b8ce018491c71
+ - name: RELATED_IMAGE_KAM_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/kam-delivery-rhel8@sha256:b6397098b9d0e1f9206b51e50013c90165b7ebb9ea69d305e77ecbef0da29b13
+ - name: KAM_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/kam-delivery-rhel8@sha256:b6397098b9d0e1f9206b51e50013c90165b7ebb9ea69d305e77ecbef0da29b13
+ - name: RELATED_IMAGE_ARGO_ROLLOUTS_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/argo-rollouts-rhel8@sha256:d70aecb74cb46ce45e9ec02e9938da7c14316e7d142e78ee25b2d6b0ac1e506c
+ - name: ARGO_ROLLOUTS_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/argo-rollouts-rhel8@sha256:d70aecb74cb46ce45e9ec02e9938da7c14316e7d142e78ee25b2d6b0ac1e506c
+ - name: RELATED_IMAGE_MUST_GATHER_IMAGE
+ value: >-
+ registry.redhat.io/openshift-gitops-1/must-gather-rhel8@sha256:4a5b9b97466b53e7775d887a0d920507cebbf892e7bc6a5334c784c55f9e3dd8
+ image: >-
+ registry.redhat.io/openshift-gitops-1/gitops-rhel8-operator@sha256:f4336d54225d883c96bac965317444a1a785574f3ba85a0b53c56db534cc86cf
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8081
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ name: manager
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 8081
+ initialDelaySeconds: 5
+ periodSeconds: 10
+ resources: {}
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ securityContext:
+ runAsNonRoot: true
+ serviceAccountName: gitops-operator-controller-manager
+ terminationGracePeriodSeconds: 10
+ permissions:
+ - rules:
+ - apiGroups:
+ - ''
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ - apiGroups:
+ - ''
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ serviceAccountName: gitops-operator-controller-manager
+ strategy: deployment
+ maintainers:
+ - email: team-gitops@redhat.com
+ name: OpenShift GitOps Team
+ description: >
+ Red Hat OpenShift GitOps is a declarative continuous delivery platform based
+ on [Argo CD](https://argoproj.github.io/argo-cd/). It enables teams to adopt
+ GitOps principles for managing cluster configurations and automating secure
+ and repeatable application delivery across hybrid multi-cluster Kubernetes
+ environments. Following GitOps and infrastructure as code principles, you
+ can store the configuration of clusters and applications in Git repositories
+ and use Git workflows to roll them out to the target clusters.
+
+
+ ## Features
+
+ * Automated install and upgrades of Argo CD
+
+ * Manual and automated configuration sync from Git repositories to target
+ OpenShift and Kubernetes clusters
+
+ * Support for the Helm and Kustomize templating tools
+
+ * Configuration drift detection and visualization on live clusters
+
+ * Audit trails of rollouts to the clusters
+
+ * Monitoring and logging integration with OpenShift
+
+ * Automated GitOps bootstrapping using Tekton and Argo CD with [GitOps
+ Application Manager CLI](https://github.com/redhat-developer/kam)
+
+
+ ## Components
+
+ * Argo CD 2.7.2
+
+ * GitOps Application Manager CLI
+ ([download](https://github.com/redhat-developer/kam/releases))
+
+
+ ## How to Install
+
+ After installing the OpenShift GitOps operator, an instance of Argo CD is
+ installed in the `openshift-gitops` namespace which has sufficent privileges
+ for managing cluster configurations. You can create additional Argo CD
+ instances using the `ArgoCD` custom resource within the desired namespaces.
+
+ ```yaml
+
+ apiVersion: argoproj.io/v1alpha1
+
+ kind: ArgoCD
+
+ metadata:
+ name: argocd
+ spec:
+ server:
+ route:
+ enabled: true
+ ```
+
+
+ OpenShift GitOps is a layered product on top of OpenShift that enables teams
+ to adopt GitOps principles for managing cluster configurations and
+ automating secure and repeatable application delivery across hybrid
+ multi-cluster Kubernetes environments. OpenShift GitOps is built around Argo
+ CD as the core upstream project and assists customers to establish an
+ end-to-end application delivery workflow on GitOps principles.
+ replaces: openshift-gitops-operator.v1.8.3
+status:
+ cleanup: {}
+ conditions:
+ - lastTransitionTime: '2023-07-08T17:31:08Z'
+ lastUpdateTime: '2023-07-08T17:31:08Z'
+ message: requirements not yet checked
+ phase: Pending
+ reason: RequirementsUnknown
+ - lastTransitionTime: '2023-07-08T17:31:08Z'
+ lastUpdateTime: '2023-07-08T17:31:08Z'
+ message: one or more requirements couldn't be found
+ phase: Pending
+ reason: RequirementsNotMet
+ - lastTransitionTime: '2023-07-08T17:31:10Z'
+ lastUpdateTime: '2023-07-08T17:31:10Z'
+ message: 'all requirements found, attempting install'
+ phase: InstallReady
+ reason: AllRequirementsMet
+ - lastTransitionTime: '2023-07-08T17:31:10Z'
+ lastUpdateTime: '2023-07-08T17:31:10Z'
+ message: waiting for install components to report healthy
+ phase: Installing
+ reason: InstallSucceeded
+ - lastTransitionTime: '2023-07-08T17:31:10Z'
+ lastUpdateTime: '2023-07-08T17:31:11Z'
+ message: >-
+ installing: waiting for deployment gitops-operator-controller-manager to
+ become ready: deployment "gitops-operator-controller-manager" not
+ available: Deployment does not have minimum availability.
+ phase: Installing
+ reason: InstallWaiting
+ - lastTransitionTime: '2023-07-08T17:31:21Z'
+ lastUpdateTime: '2023-07-08T17:31:21Z'
+ message: install strategy completed with no errors
+ phase: Succeeded
+ reason: InstallSucceeded
+ lastTransitionTime: '2023-07-08T17:31:21Z'
+ lastUpdateTime: '2023-07-08T17:31:21Z'
+ message: >-
+ The operator is running in openshift-operators but is managing this
+ namespace
+ phase: Succeeded
+ reason: Copied
+ requirementStatus:
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: analysisruns.argoproj.io
+ status: Present
+ uuid: 078fec81-9091-433f-a672-bb5bb31ec3cc
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: analysistemplates.argoproj.io
+ status: Present
+ uuid: 8b026a11-900e-4009-b155-00c9fb7dc0ee
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: applications.argoproj.io
+ status: Present
+ uuid: 7daffe74-414c-40f5-bde4-0fe3dc262fcb
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: applicationsets.argoproj.io
+ status: Present
+ uuid: c4d2088a-14c0-45b0-85d6-dbfa4f15525c
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: appprojects.argoproj.io
+ status: Present
+ uuid: 38ab7b01-d57b-46e6-b52f-f7d1fcb394f4
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: argocds.argoproj.io
+ status: Present
+ uuid: 4929ae1c-8e59-44cc-b740-460b67cb57e8
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: clusteranalysistemplates.argoproj.io
+ status: Present
+ uuid: 140334da-436a-4661-8339-48a74208e814
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: experiments.argoproj.io
+ status: Present
+ uuid: 64ffb8f9-62bb-45af-8e43-d14fdfe5e481
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: gitopsservices.pipelines.openshift.io
+ status: Present
+ uuid: e015b191-9f18-4411-8b1a-dcfbdf9976bf
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: rolloutmanagers.argoproj.io
+ status: Present
+ uuid: 1fadd3a1-c116-4a05-8e04-b8782820890e
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: rollouts.argoproj.io
+ status: Present
+ uuid: 3bec7b2b-1b7c-4e7c-bd04-8544610e407e
+ version: v1
+ - dependents:
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["get","list","watch","create","update","patch","delete"],"apiGroups":[""],"resources":["configmaps"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["get","list","watch","create","update","patch","delete"],"apiGroups":["coordination.k8s.io"],"resources":["leases"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["create","patch"],"apiGroups":[""],"resources":["events"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":[""],"resources":["configmaps","endpoints","events","namespaces","pods","secrets","serviceaccounts","services","services/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":[""],"resources":["configmaps","endpoints","events","persistentvolumeclaims","pods","secrets","serviceaccounts","services","services/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","watch"],"apiGroups":[""],"resources":["deployments"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","update","watch"],"apiGroups":[""],"resources":["namespaces","resourcequotas"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create"],"apiGroups":[""],"resources":["pods/eviction"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get"],"apiGroups":[""],"resources":["pods/log"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","watch"],"apiGroups":[""],"resources":["podtemplates"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","patch","update","watch"],"apiGroups":["appmesh.k8s.aws"],"resources":["virtualnodes","virtualrouters"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","watch"],"apiGroups":["appmesh.k8s.aws"],"resources":["virtualservices"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["apps"],"resources":["daemonsets","deployments","replicasets","statefulsets"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["apps"],"resources":["deployments","podtemplates","replicasets"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["update"],"apiGroups":["apps"],"resources":["deployments/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["update"],"apiGroups":["apps"],"resources":["deployments/finalizers"],"resourceNames":["gitops-operator"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["apps.openshift.io"],"resources":["*"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","deletecollection","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["analysisruns","analysisruns/finalizers","experiments","experiments/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","deletecollection","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["analysistemplates"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["applications","appprojects","argocds","argocds/finalizers","argocds/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","deletecollection","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["clusteranalysistemplates"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["rolloutmanagers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["update"],"apiGroups":["argoproj.io"],"resources":["rolloutmanagers/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","patch","update"],"apiGroups":["argoproj.io"],"resources":["rolloutmanagers/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","deletecollection","get","list","patch","update","watch"],"apiGroups":["argoproj.io"],"resources":["rollouts","rollouts/finalizers","rollouts/scale","rollouts/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["autoscaling"],"resources":["horizontalpodautoscalers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["batch"],"resources":["cronjobs","jobs"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["batch"],"resources":["jobs"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","watch"],"apiGroups":["config.openshift.io"],"resources":["clusterversions"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","get","list","patch","update","watch"],"apiGroups":["console.openshift.io"],"resources":["consoleclidownloads"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["console.openshift.io"],"resources":["consolelinks"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["console.openshift.io"],"resources":["consoleplugins"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","get","update"],"apiGroups":["coordination.k8s.io"],"resources":["leases"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list"],"apiGroups":["elbv2.k8s.aws"],"resources":["targetgroupbindings"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","get","list","patch","watch"],"apiGroups":["extensions"],"resources":["ingresses"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","update","watch"],"apiGroups":["getambassador.io"],"resources":["ambassadormappings","mappings"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["monitoring.coreos.com"],"resources":["prometheuses","prometheusrules","servicemonitors"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","list","patch","update","watch"],"apiGroups":["networking.istio.io"],"resources":["destinationrules","virtualservices"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["networking.k8s.io"],"resources":["ingresses"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["oauth.openshift.io"],"resources":["oauthclients"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","get","list","watch"],"apiGroups":["operators.coreos.com"],"resources":["clusterserviceversions","operatorgroups","subscriptions"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["pipelines.openshift.io"],"resources":["*"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["pipelines.openshift.io"],"resources":["gitopsservices"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["update"],"apiGroups":["pipelines.openshift.io"],"resources":["gitopsservices/finalizers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","patch","update"],"apiGroups":["pipelines.openshift.io"],"resources":["gitopsservices/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["bind","create","delete","deletecollection","escalate","get","list","patch","update","watch"],"apiGroups":["rbac.authorization.k8s.io"],"resources":["*"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["bind","create","delete","deletecollection","escalate","get","list","patch","update","watch"],"apiGroups":["rbac.authorization.k8s.io"],"resources":["clusterrolebindings","clusterroles"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["rbac.authorization.k8s.io"],"resources":["rolebindings","roles"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["route.openshift.io"],"resources":["*"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["route.openshift.io"],"resources":["routes","routes/custom-host"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","get","patch","update","watch"],"apiGroups":["split.smi-spec.io"],"resources":["trafficsplits"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","patch","update","watch"],"apiGroups":["template.openshift.io"],"resources":["templateconfigs","templateinstances","templates"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["get","update","watch"],"apiGroups":["traefik.containo.us"],"resources":["traefikservices"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create","delete","get","list","update","watch"],"apiGroups":["x.getambassador.io"],"resources":["ambassadormappings","mappings"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create"],"apiGroups":["authentication.k8s.io"],"resources":["tokenreviews"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ cluster
+ rule:{"verbs":["create"],"apiGroups":["authorization.k8s.io"],"resources":["subjectaccessreviews"]}
+ status: Satisfied
+ version: v1
+ group: ''
+ kind: ServiceAccount
+ message: ''
+ name: gitops-operator-controller-manager
+ status: Present
+ version: v1
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/operator.yaml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/operator.yaml
new file mode 100644
index 00000000000..a0e95fe33ac
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/operator.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ labels:
+ operators.coreos.com/openshift-gitops-operator.openshift-operators: ""
+ name: openshift-gitops-operator
+ namespace: openshift-operators
+spec:
+ channel: latest
+ installPlanApproval: Automatic
+ name: openshift-gitops-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/quay-csv.yaml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/quay-csv.yaml
new file mode 100644
index 00000000000..7e0b31ffcf4
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/files/quay-csv.yaml
@@ -0,0 +1,591 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: ClusterServiceVersion
+metadata:
+ annotations:
+ olm.skipRange: '>=3.5.x <3.8.9'
+ operators.openshift.io/valid-subscription: '["OpenShift Platform Plus", "Red Hat Quay"]'
+ tectonic-visibility: ocs
+ quay-version: v3.8.9
+ olm.targetNamespaces: ''
+ operatorframework.io/properties: >-
+ {"properties":[{"type":"olm.gvk","value":{"group":"quay.redhat.com","kind":"QuayRegistry","version":"v1"}},{"type":"olm.gvk","value":{"group":"redhatcop.redhat.io","kind":"QuayEcosystem","version":"v1alpha1"}},{"type":"olm.package","value":{"packageName":"quay-operator","version":"3.8.9"}}]}
+ repository: 'https://github.com/quay/quay-operator'
+ operators.openshift.io/infrastructure-features: '["disconnected", "proxy-aware", "fips"]'
+ alm-examples: |-
+ [
+ {
+ "apiVersion": "quay.redhat.com/v1",
+ "kind": "QuayRegistry",
+ "metadata": {
+ "name": "example-registry"
+ },
+ "spec": {
+ "components": [
+ {"kind": "clair", "managed": true},
+ {"kind": "postgres", "managed": true},
+ {"kind": "objectstorage", "managed": true},
+ {"kind": "redis", "managed": true},
+ {"kind": "horizontalpodautoscaler", "managed": true},
+ {"kind": "route", "managed": true},
+ {"kind": "mirror", "managed": true},
+ {"kind": "monitoring", "managed": true},
+ {"kind": "tls", "managed": true},
+ {"kind": "quay", "managed": true},
+ {"kind": "clairpostgres", "managed": true}
+ ]
+ }
+ }
+ ]
+ capabilities: Full Lifecycle
+ olm.operatorNamespace: openshift-operators
+ containerImage: >-
+ registry.redhat.io/quay/quay-operator-rhel8@sha256:0709b7c45a15445d14dd1e35e2d5843d670b4aaf0b120bca2d3eeb55fdc87021
+ operators.operatorframework.io/internal-objects: '["quayecosystems.redhatcop.redhat.io"]'
+ createdAt: '2021-04-23 10:04 UTC'
+ categories: Integration & Delivery
+ description: Opinionated deployment of Red Hat on Kubernetes.
+ olm.operatorGroup: global-operators
+ resourceVersion: '58222'
+ name: quay-operator.v3.8.9
+ namespace: openshift-operators
+ labels:
+ operators.coreos.com/quay-operator.openshift-operators: ''
+spec:
+ customresourcedefinitions:
+ owned:
+ - description: Represents a full Quay registry installation.
+ displayName: Quay Registry
+ kind: QuayRegistry
+ name: quayregistries.quay.redhat.com
+ resources:
+ - kind: Deployment
+ name: ''
+ version: ''
+ - kind: ReplicaSet
+ name: ''
+ version: ''
+ - kind: Pod
+ name: ''
+ version: ''
+ - kind: Secret
+ name: ''
+ version: ''
+ - kind: Job
+ name: ''
+ version: ''
+ - kind: ConfigMap
+ name: ''
+ version: ''
+ - kind: ServiceAccount
+ name: ''
+ version: ''
+ - kind: PersistentVolumeClaim
+ name: ''
+ version: ''
+ - kind: Ingress
+ name: ''
+ version: ''
+ - kind: Route
+ name: ''
+ version: ''
+ - kind: Role
+ name: ''
+ version: ''
+ - kind: Rolebinding
+ name: ''
+ version: ''
+ - kind: HorizontalPodAutoscaler
+ name: ''
+ version: ''
+ - kind: ServiceMonitor
+ name: ''
+ version: ''
+ - kind: PrometheusRule
+ name: ''
+ version: ''
+ specDescriptors:
+ - description: >-
+ Name of the Quay config secret containing base configuration and
+ custom SSL certificates.
+ displayName: Config Bundle Secret
+ path: configBundleSecret
+ x-descriptors:
+ - 'urn:alm:descriptor:io.kubernetes:Secret'
+ - description: >-
+ Declares how the Operator should handle supplemental Quay
+ services.
+ displayName: Components
+ path: components
+ x-descriptors:
+ - 'urn:alm:descriptor:com.tectonic.ui:advanced'
+ - description: The unique name of this type of component.
+ displayName: Kind
+ path: 'components[0].kind'
+ - description: >-
+ Indicates whether lifecycle of this component is managed by the
+ Operator or externally.
+ displayName: Managed
+ path: 'components[0].managed'
+ statusDescriptors:
+ - description: The currently installed version of all Quay components.
+ displayName: Current Version
+ path: currentVersion
+ - description: Observed conditions of Quay components.
+ displayName: Conditions
+ path: conditions
+ x-descriptors:
+ - 'urn:alm:descriptor:io.kubernetes.conditions'
+ - description: >-
+ Name of the secret containing credentials for the Quay config
+ editor.
+ displayName: Config Editor Credentials Secret
+ path: configEditorCredentialsSecret
+ x-descriptors:
+ - 'urn:alm:descriptor:io.kubernetes:Secret'
+ - description: >-
+ Externally accessible URL for container pull/push and web
+ frontend.
+ displayName: Registry Endpoint
+ path: registryEndpoint
+ x-descriptors:
+ - 'urn:alm:descriptor:org.w3:link'
+ - description: Externally accessible URL for the config editor UI.
+ displayName: Config Editor Endpoint
+ path: configEditorEndpoint
+ x-descriptors:
+ - 'urn:alm:descriptor:org.w3:link'
+ version: v1
+ - description: '[DEPRECATED] Old representation of a full Quay installation.'
+ displayName: Quay Ecosystem
+ kind: QuayEcosystem
+ name: quayecosystems.redhatcop.redhat.io
+ version: v1alpha1
+ relatedImages:
+ - image: >-
+ registry.redhat.io/quay/quay-operator-rhel8@sha256:0709b7c45a15445d14dd1e35e2d5843d670b4aaf0b120bca2d3eeb55fdc87021
+ name: >-
+ quay-operator-rhel8-0709b7c45a15445d14dd1e35e2d5843d670b4aaf0b120bca2d3eeb55fdc87021-annotation
+ - image: >-
+ registry.redhat.io/quay/quay-operator-rhel8@sha256:0709b7c45a15445d14dd1e35e2d5843d670b4aaf0b120bca2d3eeb55fdc87021
+ name: quay-operator
+ - image: >-
+ registry.redhat.io/quay/quay-rhel8@sha256:238d5b181703725a20e778f4cdb4fb0677573e9a3dcc06dbf072a56791f98518
+ name: component_quay
+ - image: >-
+ registry.redhat.io/quay/clair-rhel8@sha256:0972d7d7ed38b2e9ef15bd2fa260a912e4e509f42248478fd3a4024c433de30d
+ name: component_clair
+ - image: >-
+ registry.redhat.io/quay/quay-builder-rhel8@sha256:8f969220947d456af25475f28a23293e1f0cffe6f28aef43dbca65faab25919b
+ name: component_builder
+ - image: >-
+ registry.redhat.io/quay/quay-builder-qemu-rhcos-rhel8@sha256:5031f3113cc7cadbb29728daadcf5e5d9995df536ce8e108d1f54735ac8091b7
+ name: component_builder_qemu
+ - image: >-
+ registry.redhat.io/rhel8/postgresql-10@sha256:dfb0ce045df3f0a490a318494e0c7588f8709c6631f0922bace06ed5633326ab
+ name: component_postgres
+ - image: >-
+ registry.redhat.io/rhel8/redis-6@sha256:a287fd707a4f0b9b34f2f6bb0359dbe12fa1dd51e6de7a5685a0191a6d3734fa
+ name: component_redis
+ cleanup:
+ enabled: false
+ apiservicedefinitions: {}
+ keywords:
+ - open source
+ - containers
+ - registry
+ displayName: Red Hat Quay
+ provider:
+ name: Red Hat
+ maturity: stable
+ installModes:
+ - supported: true
+ type: OwnNamespace
+ - supported: true
+ type: SingleNamespace
+ - supported: true
+ type: MultiNamespace
+ - supported: true
+ type: AllNamespaces
+ version: 3.8.9
+ icon:
+ - base64data: >-
+ iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAACXBIWXMAAAsSAAALEgHS3X78AAANmElEQVR4nO2dfWxWVx3Hv/d5aWkpbYE5ZNA+DSB03WAlQx1IhIQxTJyhSzY1SrI5tsQ/TISoMcaYsfiHLnGuJv6xhDFYYkx8iStRk7mOMBKkqEzKNmrBsfVpgYmOrm/07Xm55vf0nHJ7z733Oefcc9tC+0mawj2X9nmeL9/fOef3O+dcy7ZtzGY6U9Z2AI0A6tj3agD3Sb7kcwD6ALQD6KLv9Wn7TeGuWcSsEqQzZdGHvd3xJfvBq0JCvcm/6tN2X3TvSo0ZF4SJ0MS+dgs3TA9HAbTQ10yLM2OCsFD0BIDHhcaZ5RUAR2YqtE27IJ0pi0TYF2E4MgWFteb6tH1kOn/ptAnChDgAICU0zm7S9LqnS5jIBWGhiYTYJjTeWpxgwkQayiIThHXWzbOwjwgL9TH7our8IxGkM2XRiIksXiU03h7004CkPm23mH43RgVhrqDw9G2h8fbkFyyMGXOLMUE6U1YdG8vP9tGTaWg01lSftrtM/NyYcEUD1nG3z0ExwN5zO/sMQhNaEDacPX4b9xcy0Hs/zj6LUIQShL2Aw0LD3OVwWFG0BZkXw5fD/6yxfurXWAytTn1eDH8Gc8CoDSyI4dCne+ynfG/0Qdkh82L4w8UgRvPY+48a6yXfm31QcggbSRwXGuaZIoaTshj2b+qxm4UGH6QFYfOMdhOjqXhlNVaf6kJskfyPGhkZQfuLL2Bx8w+FtiCWP38EVY+qZW/+/qejqPje1xEbviG0eeEnBmEBdlkMn7+/xz4pNHogFbLYDLzF1NC2YleTkhiF19B2EoN165AvXyi0+UHCL9rV5NPqTW9vL3oTpRhu3OLZ7iZIDMIGrDEbr79VY0lluWX7kAMmJ3137D8gXAuC3HFtPId82UIM7Hgk4M6pLN67T0t4ou/hPUKbm2JicHI2yrI2pPJeRQVhiUJjuamqx55AcoVaSaT7X+cn/zywo0nKJeSOJXv3CdeDIOEH4iWFO7JL78TQlp2+d8uKwRm30XimxvqJ0OAiUBAWqowWZlTdkclk0H31w8m/y7okjDs4fi5RFYMzmsf3i4WuQEFYPcNYSoTiuao73n/nHHKJ5JRr5JJi6LiDwqITcslw4+Yp13TFAOtPsjaOCQ0OfAVhQ1yjxaXFT6p9SG53cMglQeGEwmJYd3CcbgwjBmfcxuozNZbvB+ErCOvIjVH+wHaUP6BWxe3peFdwB8cvnEAzLLrdwRldux6jazcYEWPy99l4RrjI8BSEzcaN1sBVPySiq7tbuMbx63R1Bg0UFoO4/vAeY2IQWRvVfrN4T0FmgzsuX3oP48lS4boTL5eEHTR4kVm3Hrl1Gzxa9BnPe3cHgiDMHUaX6tD/WlUudpwv+i/cna6pQYMX2a2iG8OQBxJeLhEEYYvYjJFcWaecupBxB8fZ6ZoaNHiR3fIg7DuWebTok7HxNfc/niIIG1kZLcPq9B3dnR3CNT94p6sTFq91p6XcwRnfLYbIMNAM3j3icjskdAnSiY47BgYGJmfLsvR9aY+W8DJh0UkULsnZ+Jbz75OCsFm50XmHzoeUaW1BbEQuy8ohl6i6Iz/Yj9JzfxOuF8O0S2he4py9Ox2ilhYtgk6mlej7+TOoPKa2/qwyNy5cK0bvoWZU/eHlIneJ5DZuhq2QcZYhD/yI3xaZIDq5pP7fv4LM5S5UHntVySX1m7cK14Igd3x8qBmJ69dQ0fZGwJ0idtlCZHbKZ5xlyNn4Ir+tIAgLV8Y2y+hkWon+303kMakwVN7eJrR7Qe5YsmSJR4s/g39pQW5gYrFh7GSr731+ZHc2GXVJxsYnedjiDjGyyIuj447h0ycwfPrmwvLqP/5KuMcLVXcQH70w0bdROiTX+TbiF94R7gkiCpfYwDcRlSA6E0H+IXFkwkl5ZkzZHTwsOnNTyaNy4jvJfs7sRDFn4wuIQhCdXJLbHZxiLlnVcI9wrRgkvDtRGL+g4ZKldxoVJQesg0OQGSvPEh+/7L0og1xSfs67LynJjGHl6jXC9SCGXj+K3nSXZ6Iw2fqqcK0YJofANEmk7zFTi4Sh6Y7MlXShk/Wj8g3vtrUa7rh8sNlTDCJ+tg3W9f8K14Mw7RKatcfYZnwj6LjD3Xe4WXDxbSy4ODWc6Lhj8PQJ/O+UGBan/FyNvsSkS2wb22LshITQUC5Jxx18qBuEuy+pq60NuNubD34WLDyR+GurlktMpebzQJ0xh0ThDo7TJfFsBjUN9wr3BNHfdgL9bcHu4Oi4JGPIJVyQaqFFEZ1MK82WhwL6DjcVbRMTuNq7liOZlM/QEtd+K79wJn72FCzFXFrOUAHLBkpjJkZYOu6gXBKfLctQcaoVZYN9WLVe7eWOXU4rCWIN30CiVX0vpwmX0EjLq0ClxIKGRi13UC5JlU0N65TdkX5e/T8LDYF1XJKvXSVcVyW0IIs1claq7gALiwvv2ShcD0LVHRxtlxhIp4QSRKcARei4Qycs6riDozNRNFHACiWIzodEuSQdd6iGxexgP66/pr+vv+CSU8G5NC/CzkuMbIuexxyhBJGdRzihEEf1EhUo8UgJSBUSi6qw7Cv6SwSo3kEhSBWdeYyTUIJQGptCkCo6AwEd8Vc8pb+iSaeDphBnfXRNuK5C6JCl80FRNVHHJTfOnxWuB1G6MoVlX1Z3ScEdO9Ur2mHdAROCkEtUwwlVE3VccqbjQmFxmwqp72isfNn5SKEqqALVU8K6A0yQ4JXGEkyHS2hh9cii6qILo92oukTXHTpVRzdxCyMxdq5tKHQ6XXJJhcIyoaHNE3WH9NUPlV2iIkhu4xYtd1DVMSwWMBZjW51Do+MS2XkMLRWlxXBEPpFEuuNd4Z4gqjZvQ9VmuTqczjzChDswEa66YuzE59CQS6i+ofRGVqSkFkRcd207SHepv2SZvoSqf1TfUIHqJybcAYcgRhyCiFzSu2ZDYX+Gk0xpGbovvSfcG4SMS3TcYWJkxbEsnIiZPGWTqn8mXUKrQ2486N3P/FtxoTSx4mn/kZ2uO6jKaAo6goMPe0OPtDh6s3dREBJjZOmywlpaLzLJUmWXLN21GwtqvCvWOosVTLqDRlhwzENm1CWUOKQEIoevmyoWQt7XcEmtR19C1b6cKywWw7Q74sAFRCEIHGt0VeB9CReD0tjFckmjydLC2SQqLHvsccElOtU+k2JgwiGvISpBqN5BVUEVCun1z2yfXDcl28F2+OwvD8LpEi13jNzQqpcEYQEvggvCzp09GnC/ElTv6NUoQi1mEziaLfv1HW6G4iVaLkmwLIHOZk6qJlqSRzfJkLTwn/t77EKcd+ayjJ7SrOMSHk5Uc0k6LqERl0xYdBOFO+IW/sz/HJkg5BKdvmTFd59VziUNKe5JJO56eh+yjz4pXC9GYTGdQXdgQoQfO/48AQtb6sWNAHTCVsVDTVq5JFoMpwIVsOzGzyq/vqTG4ocgSixc4uEKHul3o0cx6RSwKisrUaG4Z5BySToLGj6luGDbRAHKTdzCL52XpgjCZu3GJonQnCjW1jcI1/zgmVZaKqrqkuW1KcSy8pljkxNBsMmg+4BMrwKVepwJQMcltavXIJkZE6574exgr7yk9tJp0R0tTZUhCnckLfzafU0QhD3aR22qXQSdzl0mnBQyrWdvbuihZT+0OE6F1evvk3JJQmNzaBAxIOt10LIgCMPoaUCUmh9ULGDJuMQrhKj2JTIuMVWAclIS8x5AeQrCXKL2CQZA6RCZ/RluUgH7QPxySbR0VMclQZgqQHESFvq83AE/QRhGXMJzUzqdbqrhXt9w4uUOjo5LPlESF64jInckLTwrXGT4CsJGXJ62ksW929VUp0uzZS93cMglWcUsQYPPfnfTs3KadwQdPe4rCGMfewCWMl5nFJrqdGVWpl896PuePSkrKxNc4h40hIWOHU9Y2BH0YwIFYbN3sXpUBC8xOGE7Xdlc0pWDzaFdEhQWdVgQw3POWbkXgYJgQpQW9jQyKYLEgGanm7r75hBYNtOaHejTcgnPEvgNGnQpsdC+qcf+QbF/rnL2e9EZvOxRqqou4eFENdMaxiWmy7MJS+60JSlBWOhqCupPVM61pb5E54Mq/eCCUqaVXKK6R4TOTqnKjhU2f5qA+o1SCw8VC1UcIw90MXnI8O1GWQxf3dRj/0b2bSkttmZD4W84r82L4Q89h0pFDOisfmez+IIo82L4M20PBQMTZTiP5+bF8EZXDIR9Fi6dzExPIxMa5jBhxEDYDTv0i+kFCA1zlLBiwMQOKnoB9Gg4q3BUx9yEPYltf1gxYPLx3W/VWFvpaWT8ZLS5Ak362DxDfS2SB8b2qdMLKrVwN6UIhMbbFHqv9J5NiQGTDnFCTyOjB2DZBTffflCIokShTG5KlUgEwUQIS9EDsOhsc6HxFobqGZRCl02FqBKZIBw62JGeuUSP+REabyGo7EqVvqDikgkiF4RDcxZ6zA89WUZonMXQ6hBakGBiBCXDtAnCIWHoyTKzfTRGoydaNzVdQnCmXRAOhTJ6mMls62Ooj6DlnVGHJj9mTBAOdf70/Ax6ZAM9JUC4YRqg/Rm0JYBWoUfVWcsy44I4IXHoKQF0MD2dhR5VWKNwRHv6aBsZ7VyaaRGczCpBvKDQRic+05m29EVHqcoKRR88O66CNuR30T7wmQpFUgD4Px6QRGRh7pGzAAAAAElFTkSuQmCC
+ mediatype: image/png
+ links:
+ - name: Source Code
+ url: 'https://github.com/quay/quay-operator'
+ install:
+ spec:
+ deployments:
+ - name: quay-operator.v3.8.9
+ spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ name: quay-operator-alm-owned
+ strategy: {}
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ name: quay-operator-alm-owned
+ name: quay-operator-alm-owned
+ spec:
+ containers:
+ - command:
+ - /workspace/manager
+ - '--namespace=$(WATCH_NAMESPACE)'
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MY_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: 'metadata.annotations[''olm.targetNamespaces'']'
+ - name: QUAY_VERSION
+ valueFrom:
+ fieldRef:
+ fieldPath: 'metadata.annotations[''quay-version'']'
+ - name: QUAY_DEFAULT_BRANDING
+ value: redhat
+ - name: RELATED_IMAGE_COMPONENT_QUAY
+ value: >-
+ registry.redhat.io/quay/quay-rhel8@sha256:238d5b181703725a20e778f4cdb4fb0677573e9a3dcc06dbf072a56791f98518
+ - name: RELATED_IMAGE_COMPONENT_CLAIR
+ value: >-
+ registry.redhat.io/quay/clair-rhel8@sha256:0972d7d7ed38b2e9ef15bd2fa260a912e4e509f42248478fd3a4024c433de30d
+ - name: RELATED_IMAGE_COMPONENT_BUILDER
+ value: >-
+ registry.redhat.io/quay/quay-builder-rhel8@sha256:8f969220947d456af25475f28a23293e1f0cffe6f28aef43dbca65faab25919b
+ - name: RELATED_IMAGE_COMPONENT_BUILDER_QEMU
+ value: >-
+ registry.redhat.io/quay/quay-builder-qemu-rhcos-rhel8@sha256:5031f3113cc7cadbb29728daadcf5e5d9995df536ce8e108d1f54735ac8091b7
+ - name: RELATED_IMAGE_COMPONENT_POSTGRES
+ value: >-
+ registry.redhat.io/rhel8/postgresql-10@sha256:dfb0ce045df3f0a490a318494e0c7588f8709c6631f0922bace06ed5633326ab
+ - name: RELATED_IMAGE_COMPONENT_REDIS
+ value: >-
+ registry.redhat.io/rhel8/redis-6@sha256:a287fd707a4f0b9b34f2f6bb0359dbe12fa1dd51e6de7a5685a0191a6d3734fa
+ image: >-
+ registry.redhat.io/quay/quay-operator-rhel8@sha256:0709b7c45a15445d14dd1e35e2d5843d670b4aaf0b120bca2d3eeb55fdc87021
+ name: quay-operator
+ resources: {}
+ serviceAccountName: quay-operator
+ permissions:
+ - rules:
+ - apiGroups:
+ - quay.redhat.com
+ resources:
+ - quayregistries
+ - quayregistries/status
+ verbs:
+ - '*'
+ - apiGroups:
+ - redhatcop.redhat.io
+ resources:
+ - quayecosystems
+ - quayecosystems/status
+ verbs:
+ - '*'
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - '*'
+ - apiGroups:
+ - ''
+ resources:
+ - pods
+ - services
+ - secrets
+ - configmaps
+ - serviceaccounts
+ - persistentvolumeclaims
+ - events
+ verbs:
+ - '*'
+ - apiGroups:
+ - ''
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - watch
+ - list
+ - update
+ - patch
+ - apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - roles
+ - rolebindings
+ verbs:
+ - '*'
+ - apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ - routes/custom-host
+ verbs:
+ - '*'
+ - apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - '*'
+ - apiGroups:
+ - objectbucket.io
+ resources:
+ - objectbucketclaims
+ verbs:
+ - '*'
+ - apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - prometheusrules
+ - servicemonitors
+ verbs:
+ - '*'
+ - apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - '*'
+ serviceAccountName: quay-operator
+ strategy: deployment
+ maintainers:
+ - email: support@redhat.com
+ name: Red Hat
+ description: >-
+ The Red Hat Quay Operator deploys and manages a production-ready
+
+ [Red Hat Quay](https://www.openshift.com/products/quay) private container
+ registry.
+
+ This operator provides an opinionated installation and configuration of Red
+ Hat Quay.
+
+ All components required, including Clair, database, and storage, are
+ provided in an
+
+ operator-managed fashion. Each component may optionally be self-managed.
+
+
+ ## Operator Features
+
+
+ * Automated installation of Red Hat Quay
+
+ * Provisions instance of Redis
+
+ * Provisions PostgreSQL to support both Quay and Clair
+
+ * Installation of Clair for container scanning and integration with Quay
+
+ * Provisions and configures RHOCS for supported registry object storage
+
+ * Enables and configures Quay's registry mirroring feature
+
+
+ ## Prerequisites
+
+
+ By default, the Red Hat Quay operator expects RHOCS to be installed on the
+ cluster to
+
+ provide the _ObjectBucketClaim_ API for object storage. For instructions
+ installing and
+
+ configuring the RHOCS Operator, see the "Enabling OpenShift Container
+ Storage" in the
+
+ [official
+ documentation](https://access.redhat.com/documentation/en-us/red_hat_quay/3/html-single/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index#enabling_openshift_container_storage).
+
+
+ ## Simplified Deployment
+
+
+ The following example provisions a fully operator-managed deployment of Red
+ Hat Quay,
+
+ including all services necessary for production:
+
+
+ ```
+
+ apiVersion: quay.redhat.com/v1
+
+ kind: QuayRegistry
+
+ metadata:
+ name: my-registry
+ ```
+
+
+ ## Documentation
+
+
+ See the
+
+ [official
+ documentation](https://access.redhat.com/documentation/en-us/red_hat_quay/3/html/deploy_red_hat_quay_on_openshift_with_the_quay_operator/index)
+
+ for more complex deployment scenarios and information.
+ replaces: quay-operator.v3.8.8
+ selector:
+ matchLabels:
+ alm-owner-quay-operator: quay-operator
+ operated-by: quay-operator
+ labels:
+ alm-owner-quay-operator: quay-operator
+ operated-by: quay-operator
+status:
+ cleanup: {}
+ conditions:
+ - lastTransitionTime: '2023-07-08T17:38:14Z'
+ lastUpdateTime: '2023-07-08T17:38:14Z'
+ message: requirements not yet checked
+ phase: Pending
+ reason: RequirementsUnknown
+ - lastTransitionTime: '2023-07-08T17:38:14Z'
+ lastUpdateTime: '2023-07-08T17:38:14Z'
+ message: one or more requirements couldn't be found
+ phase: Pending
+ reason: RequirementsNotMet
+ - lastTransitionTime: '2023-07-08T17:38:17Z'
+ lastUpdateTime: '2023-07-08T17:38:17Z'
+ message: 'all requirements found, attempting install'
+ phase: InstallReady
+ reason: AllRequirementsMet
+ - lastTransitionTime: '2023-07-08T17:38:18Z'
+ lastUpdateTime: '2023-07-08T17:38:18Z'
+ message: waiting for install components to report healthy
+ phase: Installing
+ reason: InstallSucceeded
+ - lastTransitionTime: '2023-07-08T17:38:18Z'
+ lastUpdateTime: '2023-07-08T17:38:18Z'
+ message: >-
+ installing: waiting for deployment quay-operator.v3.8.9 to become ready:
+ deployment "quay-operator.v3.8.9" not available: Deployment does not
+ have minimum availability.
+ phase: Installing
+ reason: InstallWaiting
+ - lastTransitionTime: '2023-07-08T17:38:25Z'
+ lastUpdateTime: '2023-07-08T17:38:25Z'
+ message: install strategy completed with no errors
+ phase: Succeeded
+ reason: InstallSucceeded
+ lastTransitionTime: '2023-07-08T17:38:25Z'
+ lastUpdateTime: '2023-07-08T17:38:25Z'
+ message: install strategy completed with no errors
+ phase: Succeeded
+ reason: InstallSucceeded
+ requirementStatus:
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: quayecosystems.redhatcop.redhat.io
+ status: Present
+ uuid: a9a8e535-9653-4aac-8df3-8d669d0b8015
+ version: v1
+ - group: apiextensions.k8s.io
+ kind: CustomResourceDefinition
+ message: CRD is present and Established condition is true
+ name: quayregistries.quay.redhat.com
+ status: Present
+ uuid: 4d309389-4d22-48f0-a333-a8601647804b
+ version: v1
+ - dependents:
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["quay.redhat.com"],"resources":["quayregistries","quayregistries/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["redhatcop.redhat.io"],"resources":["quayecosystems","quayecosystems/status"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["apps"],"resources":["deployments"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":[""],"resources":["pods","services","secrets","configmaps","serviceaccounts","persistentvolumeclaims","events"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["get","watch","list","update","patch"],"apiGroups":[""],"resources":["namespaces"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["rbac.authorization.k8s.io"],"resources":["roles","rolebindings"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["route.openshift.io"],"resources":["routes","routes/custom-host"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["autoscaling"],"resources":["horizontalpodautoscalers"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["objectbucket.io"],"resources":["objectbucketclaims"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["monitoring.coreos.com"],"resources":["prometheusrules","servicemonitors"]}
+ status: Satisfied
+ version: v1
+ - group: rbac.authorization.k8s.io
+ kind: PolicyRule
+ message: >-
+ namespaced
+ rule:{"verbs":["*"],"apiGroups":["batch"],"resources":["jobs"]}
+ status: Satisfied
+ version: v1
+ group: ''
+ kind: ServiceAccount
+ message: ''
+ name: quay-operator
+ status: Present
+ version: v1
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/main.yml
new file mode 100644
index 00000000000..03a4801b4c7
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+# Do not modify this file
+
+- name: Running Pre Workload Tasks
+ include_tasks:
+ file: ./pre_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload Tasks
+ include_tasks:
+ file: ./workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Post Workload Tasks
+ include_tasks:
+ file: ./post_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload removal Tasks
+ include_tasks:
+ file: ./remove_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "destroy" or ACTION == "remove"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/post_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/post_workload.yml
new file mode 100644
index 00000000000..65e66372aba
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/post_workload.yml
@@ -0,0 +1,23 @@
+---
+# Implement your Post Workload deployment tasks here
+# Leave these as the last tasks in the playbook
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: pre_workload tasks complete
+ debug:
+ msg: "Post-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: pre_workload tasks complete
+ debug:
+ msg: "Post-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/pre_workload.yml
new file mode 100644
index 00000000000..34792a4c41d
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/pre_workload.yml
@@ -0,0 +1,24 @@
+---
+# Implement your Pre Workload deployment tasks here
+
+# Leave these as the last tasks in the playbook
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: pre_workload tasks complete
+ debug:
+ msg: "Pre-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: pre_workload tasks complete
+ debug:
+ msg: "Pre-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/quay-deployment.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/quay-deployment.yml
new file mode 100644
index 00000000000..066f465b1d6
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/quay-deployment.yml
@@ -0,0 +1,43 @@
+- name: Deploy Quay Registry Operator
+ kubernetes.core.k8s:
+ definition: >
+ {{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/quay-registry-operator/operator/overlays/stable-3.8') }}
+ register: quay_operator_result
+
+- name: Deploy OpenShift Data Foundation Operator
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/openshift-data-foundation-operator/operator/overlays/stable-4.12') }}"
+ register: data_foundation_operator_result
+
+- name: Wait for OpenShift Container Storage NooBaa deployment
+ ansible.builtin.pause:
+ seconds: 60
+
+- name: Deploy OpenShift Container Storage NooBaa
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/openshift-container-storage-noobaa/overlays/default') }}"
+ register: container_storage_result
+
+- name: Wait for noobaa-core-0 pod to be ready
+ kubernetes.core.k8s_info:
+ kind: Pod
+ name: noobaa-core-0
+ namespace: openshift-storage
+ register: noobaa_pod_info
+ until: noobaa_pod_info.resources.ready
+ ignore_errors: true
+
+- name: Deploy Quay Registry Operator Instance
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/quay-registry-operator/instance/overlay/default') }}"
+
+- name: Finished deploying Quay
+ ansible.builtin.debug:
+ msg: "Quay deployment completed successfully."
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/remove_workload.yml
new file mode 100644
index 00000000000..c4dd771d5a9
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/remove_workload.yml
@@ -0,0 +1,66 @@
+---
+# Implement your Workload removal tasks here
+
+- name: Remove GitOps
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/openshift-gitops') }}"
+ state: absent
+ ignore_errors: true
+ vars:
+ ACTION: remove
+
+- name: Remove Quay Registry Operator Instance
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/quay-registry-operator/instance/overlay/default') }}"
+ state: absent
+ ignore_errors: true
+ vars:
+ ACTION: remove
+
+- name: Remove Quay Registry Operator
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/quay-registry-operator/operator/overlays/stable-3.8') }}"
+ state: absent
+ ignore_errors: true
+ vars:
+ ACTION: remove
+
+- name: Remove Quay CSV
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'files/quay-csv.yaml') | from_yaml }}"
+ vars:
+ ACTION: remove
+
+- name: Remove OpenShift Gitops CSV
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'files/gitops-csv.yaml') | from_yaml }}"
+ vars:
+ ACTION: remove
+
+- name: Remove Pipelines
+ ansible.builtin.include_role:
+ name: ocp4_workload_pipelines
+ ignore_errors: true
+ vars:
+ ACTION: remove
+
+- name: Remove Gitea Operator
+ ansible.builtin.include_role:
+ name: ocp4_workload_gitea_operator
+ ignore_errors: true
+ vars:
+ ACTION: remove
+
+# Leave this as the last task in the playbook.
+- name: Remove_workload tasks complete
+ ansible.builtin.debug:
+ msg: "Remove Workload tasks completed successfully."
+ when: not silent | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/workload.yml
new file mode 100644
index 00000000000..38ba7ae083b
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/tasks/workload.yml
@@ -0,0 +1,76 @@
+---
+# Implement your Workload deployment tasks here
+
+- name: Install OpenShift GitOps Operator
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'files/operator.yaml') | from_yaml }}"
+
+- name: Wait for OpenShift GitOps Operator
+ ansible.builtin.paus:
+ seconds: 60
+
+- name: Install OpenShift Gitops
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/openshift-gitops') }}"
+ state: present
+ register: openshift_gitops_result
+ ignore_errors: true
+
+- name: Install Gitea
+ ansible.builtin.include_role:
+ name: ocp4_workload_gitea_operator
+
+- name: Install OpenShift Pipelines
+ kubernetes.core.k8s:
+ definition: >
+ "{{ lookup('kubernetes.core.kustomize',
+ dir='https://github.com/tosin2013/sno-quickstarts/gitops/cluster-config/openshift-pipelines-operator/overlays/latest') }}"
+ state: present
+ register: openshift_pipelines_result
+ ignore_errors: true
+
+- name: Install and Deploy Quay
+ ansible.builtin.include_tasks: quay-deployment.yml
+
+
+- name: Set gitea route
+ ansible.builtin.set_fact:
+ ocp4_workload_argocd_quay_todo_app_gitea_route: "{{ r_gitea.resources[0].status.giteaRoute }}"
+
+- name: Set gitea repo url
+ ansible.builtin.set_fact:
+ ocp4_workload_argocd_quay_todo_app_gitea_repo_url: >
+ "{{ ocp4_workload_argocd_quay_todo_app_gitea_route }}/{{ ocp4_workload_gitea_user }}/todo-demo-app-helmrepo.git"
+
+- name: Install Gitops Repo Secret
+ kubernetes.core.k8s:
+ state: present
+ merge_type:
+ - strategic-merge
+ - merge
+ definition: "{{ lookup('template', './templates/gitops-repo-secret.yml.j2') }}"
+
+- name: Add ArgoCD Task
+ kubernetes.core.k8s:
+ state: present
+ merge_type:
+ - strategic-merge
+ - merge
+ definition: "{{ lookup('template', './templates/cluster-config.yaml.j2') }}"
+
+- name: Add Pipeline Task
+ kubernetes.core.k8s:
+ state: present
+ merge_type:
+ - strategic-merge
+ - merge
+ definition: "{{ lookup('template', './templates/argocd-deploy-pipeline.yaml.j2') }}"
+
+# Leave this as the last task in the playbook.
+- name: Workload tasks complete
+ ansible.builtin.debug:
+ msg: "Workload Tasks completed successfully."
+ when: not silent
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/argocd-deploy-pipeline.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/argocd-deploy-pipeline.yaml.j2
new file mode 100644
index 00000000000..92f549db1e2
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/argocd-deploy-pipeline.yaml.j2
@@ -0,0 +1,163 @@
+apiVersion: tekton.dev/v1beta1
+kind: Pipeline
+metadata:
+ labels:
+ app: todo-demo-app
+ name: argocd-quay-todo-demo-app-pipeline
+ namespace: todo-demo-app
+spec:
+ params:
+ - default: latest
+ description: Image Tag Value
+ name: IMAGE_TAG
+ type: string
+ - default: CHANGEME
+ description: Current Image Tag Value
+ name: CURRENT_IMAGE_TAG
+ type: string
+ - default: 'quay.io/takinosh/todo-demo-app:v1'
+ name: quay-io-repository
+ type: string
+ - default: latest
+ name: quay-io-image-tag-name
+ type: string
+ - default: >-
+ {{ ocp4_workload_argocd_quay_todo_app_gitea_route }}/{{ ocp4_workload_gitea_user }}/todo-demo-app-helmrepo
+ name: GIT_REPOSITORY
+ type: string
+ - default: pipeline@example.com
+ name: GIT_EMAIL
+ type: string
+ - default: todo-demo-app
+ name: GIT_NAME
+ type: string
+ - default: '1'
+ name: REPLICA_COUNT
+ type: string
+ resources:
+ - name: app-git
+ type: git
+ - name: image
+ type: image
+ - name: todo-demo-app-helmrepo-git
+ type: git
+ tasks:
+ - name: fetch-repository
+ params:
+ - name: url
+ value: 'https://github.com/tosin2013/todo-demo-app'
+ - name: revision
+ value: master
+ - name: subdirectory
+ value: ''
+ - name: deleteExisting
+ value: 'true'
+ taskRef:
+ kind: ClusterTask
+ name: git-clone
+ workspaces:
+ - name: output
+ workspace: shared-workspace
+ - name: maven-run
+ params:
+ - name: CONTEXT_DIR
+ value: .
+ - name: GOALS
+ value:
+ - '-DskipTests'
+ - clean
+ - package
+ runAfter:
+ - fetch-repository
+ taskRef:
+ kind: ClusterTask
+ name: maven
+ workspaces:
+ - name: maven-settings
+ workspace: maven-settings
+ - name: source
+ workspace: shared-workspace
+ - name: build-java-app-image
+ params:
+ - name: CONTEXT
+ value: .
+ - name: DOCKERFILE
+ value: src/main/docker/Dockerfile
+ - name: IMAGE
+ value: >-
+ image-registry.openshift-image-registry.svc:5000/todo-demo-app/todo-demo-app:$(params.IMAGE_TAG)
+ - name: TLSVERIFY
+ value: 'false'
+ runAfter:
+ - maven-run
+ taskRef:
+ kind: ClusterTask
+ name: buildah
+ workspaces:
+ - name: source
+ workspace: shared-workspace
+ - name: tag-test-image
+ params:
+ - name: SCRIPT
+ value: |
+ oc tag todo-demo-app:$(params.IMAGE_TAG) todo-demo-app:latest
+ runAfter:
+ - build-java-app-image
+ taskRef:
+ kind: ClusterTask
+ name: openshift-client
+ - name: push-todo-demo-app-image-to-quay
+ params:
+ - name: quay-io-repository
+ value: $(params.quay-io-repository)
+ - name: quay-io-image-tag-name
+ value: $(params.quay-io-image-tag-name)
+ resources:
+ inputs:
+ - name: image
+ resource: image
+ runAfter:
+ - tag-test-image
+ taskRef:
+ kind: Task
+ name: push-todo-demo-app-image-to-quay
+ - name: update-image-tag-in-git
+ params:
+ - name: GIT_REPOSITORY
+ value: $(params.GIT_REPOSITORY)
+ - name: GIT_EMAIL
+ value: pipeline@example.com
+ - name: GIT_NAME
+ value: todo-demo-app
+ - name: GIT_MANIFEST_DIR
+ value: .
+ - name: TARGET_IMAGE
+ value: $(params.quay-io-repository)
+ - name: TARGET_TAG
+ value: $(params.quay-io-image-tag-name)
+ - name: REPLICA_COUNT
+ value: $(params.REPLICA_COUNT)
+ resources:
+ inputs:
+ - name: source
+ resource: todo-demo-app-helmrepo-git
+ runAfter:
+ - push-todo-demo-app-image-to-quay
+ taskRef:
+ kind: Task
+ name: update-image-tag-in-git
+ - name: argocd-task-sync-and-wait
+ params:
+ - name: application-name
+ value: todo-demo-app
+ - name: revision
+ value: main
+ runAfter:
+ - update-image-tag-in-git
+ taskRef:
+ kind: ClusterTask
+ name: argocd-task-sync-and-wait
+ workspaces:
+ - name: shared-workspace
+ - name: maven-settings
+ - name: helm-shared-workspace
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/cluster-config.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/cluster-config.yaml.j2
new file mode 100644
index 00000000000..d5a9a4f12fe
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/cluster-config.yaml.j2
@@ -0,0 +1,19 @@
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: todo-demo-app
+ namespace: openshift-gitops
+spec:
+ destination:
+ namespace: todo-demo-app
+ server: https://kubernetes.default.svc
+ project: default
+ source:
+ path: app
+ repoURL: "{{ ocp4_workload_argocd_quay_todo_app_gitea_repo_url }}"
+ targetRevision: main
+ syncPolicy:
+ automated:
+ prune: false
+ selfHeal: false
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/gitops-repo-secret.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/gitops-repo-secret.yml.j2
new file mode 100644
index 00000000000..a3bf8cc3ace
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_argocd_quay_todo_app/templates/gitops-repo-secret.yml.j2
@@ -0,0 +1,12 @@
+kind: Secret
+apiVersion: v1
+metadata:
+ name: repo-gitops
+ namespace: openshift-gitops
+ labels:
+ argocd.argoproj.io/secret-type: repository
+data:
+ insecure: "{{ 'true' | b64encode }}"
+ type: "{{ 'git' | b64encode }}"
+ url: "{{ ocp4_workload_argocd_quay_todo_app_gitea_repo_url | b64encode }}"
+type: Opaque
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_authentication_rosa/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_authentication_rosa/tasks/workload.yml
index 6500411fe1a..a228c56f3ac 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_authentication_rosa/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_authentication_rosa/tasks/workload.yml
@@ -108,6 +108,28 @@
_ocp4_workload_authentication_rosa_admin_password: >-
{{ ocp4_workload_authentication_rosa_admin_password }}
+- name: Check if pool already created
+ shell: |
+ aws cognito-idp list-user-pools --max-results 1 | jq '.UserPools | length'
+ register: r_user_pool_size
+
+- name: Delete existing user pools
+ when: r_user_pool_size.stdout | int > 0
+ block:
+ - shell: >
+ aws cognito-idp list-user-pools --max-results 1 | jq -r .UserPools[0].Id
+ register: r_aws_user_pool_id
+ - set_fact:
+ _ocp4_workload_authentication_rosa_aws_user_pool_id: "{{ r_aws_user_pool_id.stdout }}"
+ - shell: |
+ aws cognito-idp delete-user-pool-domain --user-pool-id {{
+ _ocp4_workload_authentication_rosa_aws_user_pool_id }} --domain rosa-{{ guid }}
+ aws cognito-idp delete-user-pool --user-pool-id {{
+ _ocp4_workload_authentication_rosa_aws_user_pool_id }}
+ - shell: |
+ rosa delete idp Cognito --cluster=rosa-{{ guid }} --yes
+ ignore_errors: true
+
- name: Create user pool for admin
shell: >
aws cognito-idp create-user-pool --pool-name rosa-{{ guid }} --auto-verified-attributes email \
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_big_demo/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_big_demo/tasks/workload.yml
index 139de0e4264..c2abcebf87b 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_big_demo/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_big_demo/tasks/workload.yml
@@ -14,7 +14,6 @@
- name: Install JDK 11
command:
cmd: dnf -y install java-11-openjdk-devel
- warn: false
- name: Create /usr/local/maven directory
file:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_gitea_requirements.yml b/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_gitea_requirements.yml
index e19064907c4..9b888fd205a 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_gitea_requirements.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_gitea_requirements.yml
@@ -78,6 +78,15 @@
vars:
body:
name: globex-ui
+ scopes:
+ - write:activitypub
+ - write:misc
+ - write:notification
+ - write:organization
+ - write:package
+ - write:issue
+ - write:repository
+ - write:user
register: r_gitea_token
- name: Set Gitea token variable
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_stackrox.yml b/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_stackrox.yml
index fa44b202795..fc12450ae97 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_stackrox.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_blackhat_secured_container_pipelines/tasks/setup_stackrox.yml
@@ -46,6 +46,12 @@
namespace: "{{ ocp4_workload_blackhat_secured_container_pipelines_stackrox_namespace }}"
name: central
register: r_route
+ retries: 120
+ delay: 10
+ until:
+ - r_route is defined
+ - r_route.resources is defined
+ - r_route.resources | length > 0
- name: Set stackrox endpoint and url
set_fact:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/defaults/main.yml
index 457f4b46337..bd752974e43 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/defaults/main.yml
@@ -9,4 +9,6 @@ ocp4_workload_cert_manager_operator_channel_tmp_kubeconfig: >-
# channel tech-preview at original workload creation 2023-05-04 tok@redhat.com
ocp4_workload_cert_manager_operator_channel: stable-v1
-ocp4_workload_cert_manager_operator_channel_csv: cert-manager-operator.v1.10.2
+
+# Define CSV version when needed in agv
+# ocp4_workload_cert_manager_operator_channel_csv: cert-manager-operator.v1.10.2
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/templates/cert-manager-subscription.j2 b/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/templates/cert-manager-subscription.j2
index 5eaf513e262..da16db1f9bc 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/templates/cert-manager-subscription.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_cert_manager_operator/templates/cert-manager-subscription.j2
@@ -12,4 +12,6 @@ spec:
name: openshift-cert-manager-operator
source: redhat-operators
sourceNamespace: openshift-marketplace
+ {% if ocp4_workload_cert_manager_operator_channel_csv is defined -%}
startingCSV: {{ ocp4_workload_cert_manager_operator_channel_csv }}
+ {% endif %}
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_coolstore_apac_summit/templates/cicd/sonarqube-scan-task.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_coolstore_apac_summit/templates/cicd/sonarqube-scan-task.yaml.j2
index ab50ce0453c..760d587154c 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_coolstore_apac_summit/templates/cicd/sonarqube-scan-task.yaml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_coolstore_apac_summit/templates/cicd/sonarqube-scan-task.yaml.j2
@@ -5,7 +5,7 @@ metadata:
namespace: "{{ ocp4_workload_coolstore_apac_summit_coolstore_namespace }}"
spec:
params:
- - default: 'docker.io/sonarsource/sonar-scanner-cli:latest'
+ - default: docker.io/sonarsource/sonar-scanner-cli:4.7
name: scanImage
type: string
- default: >-
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_coolstore_backoffice_demo_ohc/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_coolstore_backoffice_demo_ohc/tasks/workload.yml
index 58d649885df..8cb7db31f38 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_coolstore_backoffice_demo_ohc/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_coolstore_backoffice_demo_ohc/tasks/workload.yml
@@ -14,7 +14,6 @@
- name: Install JDK 11
command:
cmd: dnf -y install java-11-openjdk-devel
- warn: false
- name: Create /usr/local/maven directory
file:
@@ -638,4 +637,4 @@
definition: "{{ lookup('template', 'cicd/app-ci-pipeline-prod-rolebinding.yaml.j2' ) | from_yaml }}"
- name: Create stackrox resources
- include_tasks: stackrox_create_secrets.yml
\ No newline at end of file
+ include_tasks: stackrox_create_secrets.yml
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_devsecops_validated_pattern/tasks/setup_gitea_requirements.yml b/ansible/roles_ocp_workloads/ocp4_workload_devsecops_validated_pattern/tasks/setup_gitea_requirements.yml
index 21357a1c1ac..7f82ada31a1 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_devsecops_validated_pattern/tasks/setup_gitea_requirements.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_devsecops_validated_pattern/tasks/setup_gitea_requirements.yml
@@ -78,6 +78,15 @@
vars:
body:
name: globex-ui
+ scopes:
+ - write:activitypub
+ - write:misc
+ - write:notification
+ - write:organization
+ - write:package
+ - write:issue
+ - write:repository
+ - write:user
register: r_gitea_token
- name: Set Gitea token variable
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/acs.yml b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/acs.yml
index b894ec1a45d..ed2bb787469 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/acs.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/acs.yml
@@ -18,7 +18,7 @@
definition: "{{ lookup('template', 'acs-subs.yml.j2') }}"
- name: Wait for ACS CRD to exist
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/gitops.yml b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/gitops.yml
index 09094ffbb45..74035de2170 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/gitops.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/gitops.yml
@@ -11,7 +11,7 @@
definition: "{{ lookup('template', 'gitops-subs.yml.j2') }}"
- name: Wait for GitOps CRD to exist
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
@@ -27,7 +27,7 @@
definition: "{{ lookup('template', 'pipelines-subs.yml.j2') }}"
- name: Wait for GitOps CRD to exist
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/infrastructure.yml b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/infrastructure.yml
index 5088a9c38d6..a9d64085fc6 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/infrastructure.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/infrastructure.yml
@@ -152,7 +152,7 @@
# Install CodeReady Workspaces
- name: see if codeready is installed
- k8s_facts:
+ k8s_info:
api_version: org.eclipse.che/v1
kind: CheCluster
name: codeready-workspaces
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/install-codeready.yaml b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/install-codeready.yaml
index 3b3db674ee9..04d00bc7137 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/install-codeready.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/install-codeready.yaml
@@ -13,7 +13,7 @@
# wait for CRD to be a thing
- name: Wait for CodeReady CRD to be ready
- k8s_facts:
+ k8s_info:
api_version: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
name: checlusters.org.eclipse.che
@@ -44,7 +44,7 @@
delay: "15"
- name: Get codeready keycloak deployment
- k8s_facts:
+ k8s_info:
kind: Deployment
namespace: ocp-workshop
name: keycloak
@@ -73,7 +73,7 @@
msg: "codeready keycloak admin password: {{ codeready_sso_admin_password }}"
- name: get keycloak pod
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Pod
namespace: ocp-workshop
@@ -133,7 +133,7 @@
- ./files/stack_imagestream.yaml
- name: wait for stack to be a thing
- k8s_facts:
+ k8s_info:
kind: ImageStream
name: quarkus-stack
namespace: openshift
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/quay.yml b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/quay.yml
index cff4c727e8e..f5b7de4f64a 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/quay.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_dso/tasks/quay.yml
@@ -69,7 +69,7 @@
approved: true
- name: wait for the CSVs to exist
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ ocp4_dso_quay_csv }}"
@@ -80,7 +80,7 @@
until: csv_exists_out.resources | length > 0
- name: wait for the CSVs to be Succeeded
- k8s_facts:
+ k8s_info:
api_version: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
name: "{{ ocp4_dso_quay_csv }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/.yamllint b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/.yamllint
new file mode 100644
index 00000000000..b2a7e1775e9
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/.yamllint
@@ -0,0 +1,13 @@
+---
+extends: default
+
+rules:
+ comments:
+ require-starting-space: false
+ min-spaces-from-content: 1
+ comments-indentation: disable
+ indentation:
+ indent-sequences: consistent
+ line-length:
+ max: 120
+ allow-non-breakable-inline-mappings: true
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/defaults/main.yml
new file mode 100644
index 00000000000..357c921cfb9
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/defaults/main.yml
@@ -0,0 +1,25 @@
+---
+# --------------------------------------------------------
+# Ansible Automation Platform Controller URL
+# --------------------------------------------------------
+# ocp4_workload_eda_controller_aap_controller_url: [Required]
+
+# --------------------------------------------------------
+# Role's mandatory variables
+# --------------------------------------------------------
+become_override: false
+ocp_username: user-redhat.com
+silent: false
+tmp_dir: /tmp/{{ guid }}
+tmp_kubeconfig: "{{ tmp_dir }}/.kube/config"
+
+# --------------------------------------------------------
+# Workload: ocp4_workload_eda_controller
+# --------------------------------------------------------
+ocp4_workload_eda_controller_project: "aap"
+ocp4_workload_eda_controller_project_app_name: "eda-controller"
+
+ocp4_workload_eda_controller_admin_password: "{{ common_password }}"
+
+ocp4_workload_eda_controller_cluster_rolebinding_name: eda_default
+ocp4_workload_eda_controller_cluster_rolebinding_role: cluster-admin
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/meta/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/meta/main.yml
new file mode 100644
index 00000000000..81386b9eab3
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ role_name: ocp4_workload_eda_controller
+ author: Mitesh Sharma (mitsharm@redhat.com)
+ description: |
+ Installs EDA on OpenShift
+ license: GPLv3
+ min_ansible_version: "2.9"
+ platforms: []
+ galaxy_tags:
+ - eda
+ - openshift
+ - aap
+dependencies: []
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/readme.adoc b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/readme.adoc
new file mode 100644
index 00000000000..d4fc6b867a9
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/readme.adoc
@@ -0,0 +1,6 @@
+== ocp4_workload_eda_controller
+
+This role installs EDA on OpenShift
+
+== Dependencies
+Role: ocp4_workload_automation_controller_platform
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/main.yml
new file mode 100644
index 00000000000..03a4801b4c7
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+# Do not modify this file
+
+- name: Running Pre Workload Tasks
+ include_tasks:
+ file: ./pre_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload Tasks
+ include_tasks:
+ file: ./workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Post Workload Tasks
+ include_tasks:
+ file: ./post_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload removal Tasks
+ include_tasks:
+ file: ./remove_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "destroy" or ACTION == "remove"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/post_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/post_workload.yml
new file mode 100644
index 00000000000..33fc224b1d0
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/post_workload.yml
@@ -0,0 +1,28 @@
+---
+# Implement your Post Workload deployment tasks here
+- name: Remove temp kube config
+ file:
+ path: "{{ tmp_kubeconfig }}"
+ state: absent
+
+# Leave these as the last tasks in the playbook
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: pre_workload tasks complete
+ debug:
+ msg: "Post-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: pre_workload tasks complete
+ debug:
+ msg: "Post-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/pre_workload.yml
new file mode 100644
index 00000000000..fdf3d4b33af
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/pre_workload.yml
@@ -0,0 +1,34 @@
+---
+# Implement your Pre Workload deployment tasks here
+- name: Ensure directory exists
+ file:
+ path: "{{ tmp_dir }}"
+ state: directory
+
+- name: Copy .kube/config and set env var
+ copy:
+ src: ~/.kube
+ dest: "{{ tmp_dir }}"
+ remote_src: true
+
+# Leave these as the last tasks in the playbook
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: pre_workload tasks complete
+ debug:
+ msg: "Pre-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: pre_workload tasks complete
+ debug:
+ msg: "Pre-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/remove_workload.yml
new file mode 100644
index 00000000000..bfbfd17254f
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/remove_workload.yml
@@ -0,0 +1,23 @@
+---
+# Implement your Workload removal tasks here
+- name: Ensure directory exists
+ file:
+ path: "{{ tmp_dir }}"
+ state: directory
+
+- name: Copy .kube/config and set env var
+ copy:
+ src: ~/.kube
+ dest: "{{ tmp_dir }}"
+ remote_src: true
+
+- name: Remove temp kube config
+ file:
+ path: "{{ tmp_dir }}"
+ state: absent
+
+# Leave this as the last task in the playbook.
+- name: remove_workload tasks complete
+ debug:
+ msg: "Remove Workload tasks completed successfully."
+ when: not silent | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/workload.yml
new file mode 100644
index 00000000000..5e61282bbce
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/tasks/workload.yml
@@ -0,0 +1,85 @@
+---
+- name: Setup environment vars
+ environment:
+ KUBECONFIG: "{{ tmp_kubeconfig }}"
+ block:
+ - name: Create secret and Install EDA
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('template', __definition) }}"
+ loop:
+ - eda_admin_secret.j2
+ - eda_controller.j2
+ loop_control:
+ loop_var: __definition
+
+ - name: Retrieve created route
+ kubernetes.core.k8s_info:
+ api_version: "route.openshift.io/v1"
+ kind: Route
+ name: "{{ ocp4_workload_eda_controller_project_app_name }}"
+ namespace: "{{ ocp4_workload_eda_controller_project }}"
+ register: r_eda_route
+ until: r_eda_route.resources[0].spec.host is defined
+ retries: 30
+ delay: 45
+
+ - name: Get eda-controller route hostname
+ ansible.builtin.set_fact:
+ eda_controller_hostname: "{{ r_eda_route.resources[0].spec.host }}"
+
+ - name: Wait for eda_controller to be running
+ ansible.builtin.uri:
+ url: https://{{ eda_controller_hostname }}/api/eda/v1/users/me/awx-tokens/
+ user: "admin"
+ password: "{{ ocp4_workload_eda_controller_admin_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ status_code: 200
+ register: r_result
+ until: not r_result.failed
+ retries: 60
+ delay: 45
+
+ - name: Create Rolebinding for Rulebook Activations
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('template', 'cluster_rolebinding.j2') }}"
+
+- name: Display Version and credentials
+ when: not silent | bool
+ ansible.builtin.debug:
+ msg:
+ - "EDA Controller URL: https://{{ eda_controller_hostname }}"
+ - "EDA Controller Admin Login: admin"
+ - "EDA Controller Admin Password: {{ ocp4_workload_eda_controller_admin_password }}"
+
+- name: Print Access information
+ agnosticd_user_info:
+ msg: "{{ item }}"
+ loop:
+ - "EDA Controller URL: https://{{ eda_controller_hostname }}"
+ - "EDA Controller Admin Login: admin"
+ - "EDA Controller Admin Password: {{ ocp4_workload_eda_controller_admin_password }}"
+
+- name: Print Access information
+ agnosticd_user_info:
+ data:
+ eda_controller_web_url: "https://{{ eda_controller_hostname }}"
+ eda_controller_admin_user: admin
+ eda_controller_admin_password: "{{ ocp4_workload_eda_controller_admin_password }}"
+
+- name: Set facts for Access information
+ ansible.builtin.set_fact:
+ eda_controller_web_url: "https://{{ eda_controller_hostname }}"
+ eda_controller_admin_user: admin
+ eda_controller_admin_password: "{{ ocp4_workload_eda_controller_admin_password }}"
+
+
+# Leave this as the last task in the playbook.
+- name: Workload tasks complete
+ when: not silent | bool
+ ansible.builtin.debug:
+ msg: "Workload Tasks completed successfully."
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/cluster_rolebinding.j2 b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/cluster_rolebinding.j2
new file mode 100644
index 00000000000..2a66b5a7ffb
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/cluster_rolebinding.j2
@@ -0,0 +1,13 @@
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ ocp4_workload_eda_controller_cluster_rolebinding_name }}
+subjects:
+ - kind: ServiceAccount
+ name: default
+ namespace: {{ ocp4_workload_eda_controller_project }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ ocp4_workload_eda_controller_cluster_rolebinding_role }}
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_admin_secret.j2 b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_admin_secret.j2
new file mode 100644
index 00000000000..16ab144b9bc
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_admin_secret.j2
@@ -0,0 +1,15 @@
+---
+kind: Secret
+apiVersion: v1
+metadata:
+ name: {{ ocp4_workload_eda_controller_project_app_name }}-admin-password
+ namespace: {{ ocp4_workload_eda_controller_project }}
+ labels:
+ app.kubernetes.io/component: eda
+ app.kubernetes.io/managed-by: eda-operator
+ app.kubernetes.io/name: {{ ocp4_workload_eda_controller_project_app_name }}
+ app.kubernetes.io/operator-version: '2.4'
+ app.kubernetes.io/part-of: {{ ocp4_workload_eda_controller_project_app_name }}
+data:
+ password: {{ ocp4_workload_eda_controller_admin_password | b64encode }}
+type: Opaque
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_controller.j2 b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_controller.j2
new file mode 100644
index 00000000000..2441a5c7dba
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_eda_controller/templates/eda_controller.j2
@@ -0,0 +1,26 @@
+---
+apiVersion: eda.ansible.com/v1alpha1
+kind: EDA
+metadata:
+ name: {{ ocp4_workload_eda_controller_project_app_name }}
+ namespace: {{ ocp4_workload_eda_controller_project }}
+spec:
+ route_tls_termination_mechanism: Edge
+ ingress_type: Route
+ loadbalancer_port: 80
+ no_log: true
+ image_pull_policy: IfNotPresent
+ ui:
+ replicas: 1
+ set_self_labels: true
+ api:
+ gunicorn_workers: 2
+ replicas: 1
+ redis:
+ replicas: 1
+ admin_user: admin
+ loadbalancer_protocol: http
+ worker:
+ replicas: 3
+ automation_server_url: '{{ ocp4_workload_eda_controller_aap_controller_url }}'
+ admin_password_secret: {{ ocp4_workload_eda_controller_project_app_name }}-admin-password
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/post_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/post_workload.yml
index 78ab9518ad4..fe3497d90dd 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/post_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/post_workload.yml
@@ -25,7 +25,7 @@
register: pod_list
until: pod_list|json_query('resources[*].status.phase')|unique == ["Running"]
retries: 5
- delay: 30
+ delay: 60
- name: Remove secret {{ ocp4_workload.starburst.secret }}
kubernetes.core.k8s:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/pre_workload.yml
index 865392d73d4..28d219d7bb0 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/pre_workload.yml
@@ -54,13 +54,12 @@
secret_key: "{{ aws_secret_access_key }}"
region: "{{ aws_region }}"
bucket: "{{ aws_s3_bucket_name }}"
- object: /data/creditcard_with_empty_values.csv
- src: "{{ local_dataset_path.dest }}creditcard_with_empty_values.csv"
+ object: /data/features.csv
+ src: "{{ local_dataset_path.dest }}data/features.csv"
mode: put
# Leave these as the last tasks in the playbook
# ---------------------------------------------
-
# For deployment onto a dedicated cluster (as part of the
# cluster deployment) set workload_shared_deployment to False
# This is the default so it does not have to be set explicitely
@@ -79,4 +78,4 @@
msg: "Pre-Software checks completed successfully"
when:
- not silent|bool
- - workload_shared_deployment|default(False)
\ No newline at end of file
+ - workload_shared_deployment|default(False)
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/remove_workload.yml
index bb4ccfaff21..0e8edf0c6cd 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/remove_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/remove_workload.yml
@@ -5,6 +5,18 @@
template: "{{ item }}"
with_fileglob:
- "templates/starburst/operands/*.j2"
+ - "templates/starburst/cache/*.j2"
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Delete postgres catalog resources
+ kubernetes.core.k8s:
+ state: absent
+ template: "{{ item }}"
+ loop:
+ - postgres/postgres.yml.j2
register: result
until: result is not failed
retries: 10
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/workload.yml
index 8a5effad023..7f35108fc9e 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/tasks/workload.yml
@@ -47,6 +47,85 @@
starburst_query_editor_link: "http://{{ ocp4_workload.starburst.namespace }}-{{ ocp4_workload_starburst_route_url }}"
starburst_s3_bucket_name: "{{ aws_s3_bucket_name }}"
+- name: Create objects for postgresql catalog
+ kubernetes.core.k8s:
+ state: present
+ template: postgres/postgres.yml.j2
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create Starburst cache service secret
+ kubernetes.core.k8s:
+ state: present
+ template: starburst/cache/cache-secret.yml.j2
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create objects for Starburst cache service from deployment config
+ kubernetes.core.k8s:
+ state: present
+ template: starburst/cache/cache-deploymentconfig.yml.j2
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create Starburst cache service service
+ kubernetes.core.k8s:
+ state: present
+ template: starburst/cache/cache-service.yml.j2
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Get Starburst cache service service info
+ kubernetes.core.k8s_info:
+ kind: Service
+ namespace: "{{ ocp4_workload.starburst.namespace }}"
+ name: "postgresql"
+ register: ocp4_workload_starburst_postgresql_service
+
+- name: Set the Starburst cache service IP and Port
+ set_fact:
+ postgresql_service_ip: "{{ ocp4_workload_starburst_postgresql_service.resources[0].spec.clusterIP }}"
+ postgresql_service_port: "{{ ocp4_workload_starburst_postgresql_service.resources[0].spec.ports[0].port }}"
+
+- name: Get postgres cache db data
+ kubernetes.core.k8s_info:
+ kind: Pod
+ namespace: "{{ ocp4_workload.starburst.namespace }}"
+ label_selectors:
+ - name=postgresql
+ register: r_service_db_pod
+ until: r_service_db_pod["resources"] is defined and (r_service_db_pod["resources"] | length > 0)
+ retries: 10
+ delay: 6
+
+- name: Wait until postgres cache db is up
+ kubernetes.core.k8s_exec:
+ namespace: "{{ ocp4_workload.starburst.namespace }}"
+ pod: "{{ r_service_db_pod.resources[0].metadata.name }}"
+ command: 'pg_isready'
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create starburst db in cache service
+ kubernetes.core.k8s_exec:
+ namespace: "{{ ocp4_workload.starburst.namespace }}"
+ pod: "{{ r_service_db_pod.resources[0].metadata.name }}"
+ command: psql -c "CREATE DATABASE starburst;"
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
- name: Create rhods and starburst subscriptions
kubernetes.core.k8s:
state: present
@@ -70,6 +149,54 @@
retries: 10
delay: 6
+- name: Get postgresql catalog db data
+ kubernetes.core.k8s_info:
+ kind: Pod
+ namespace: "postgres"
+ label_selectors:
+ - app=postgres
+ register: r_catalog_db_pod
+ until: r_catalog_db_pod["resources"] is defined and (r_catalog_db_pod["resources"] | length > 0)
+ retries: 10
+ delay: 6
+
+- name: Wait until postgresql catalog db is running
+ kubernetes.core.k8s_exec:
+ namespace: "postgres"
+ pod: "{{ r_catalog_db_pod.resources[0].metadata.name }}"
+ command: 'pg_isready'
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Copy transactions.csv to postgres pod
+ kubernetes.core.k8s_cp:
+ namespace: "postgres"
+ pod: "{{ r_catalog_db_pod.resources[0].metadata.name }}"
+ remote_path: /tmp
+ local_path: "{{ local_dataset_path.dest }}data/transactions.csv"
+
+- name: Create transactions table
+ kubernetes.core.k8s_exec:
+ namespace: "postgres"
+ pod: "{{ r_catalog_db_pod.resources[0].metadata.name }}"
+ command: 'psql postgres postgres -c "CREATE TABLE transactions (id SERIAL, Time INTEGER, Amount NUMERIC(10,2), Class INTEGER, PRIMARY KEY (id));"'
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
+- name: Load csv into transactions table
+ kubernetes.core.k8s_exec:
+ namespace: "postgres"
+ pod: "{{ r_catalog_db_pod.resources[0].metadata.name }}"
+ command: "psql postgres postgres -c \"COPY transactions(id, Time, Amount, Class) FROM '/tmp/transactions.csv' DELIMITER ',' CSV HEADER;\""
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
+
- name: workload Tasks Complete
debug:
msg: workload Tasks Complete
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/postgres/postgres.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/postgres/postgres.yml.j2
new file mode 100644
index 00000000000..5087c7db2cf
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/postgres/postgres.yml.j2
@@ -0,0 +1,84 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: postgres
+ labels:
+ name: postgres
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: postgresql
+ namespace: postgres
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ storageClassName: gp2-csi
+ volumeMode: Filesystem
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: postgres
+ name: postgres
+ namespace: postgres
+spec:
+ selector:
+ matchLabels:
+ app: postgres
+ template:
+ metadata:
+ labels:
+ app: postgres
+ spec:
+ containers:
+ - env:
+ - name: POSTGRES_DB
+ value: postgres
+ - name: POSTGRES_USER
+ value: postgres
+ - name: POSTGRES_PASSWORD
+ value: r3dh4t1!
+ - name: PGDATA
+ value: /temp/data
+ image: postgres:latest
+ imagePullPolicy: Always
+ name: postgres
+ volumeMounts:
+ - mountPath: "/temp"
+ name: temp
+ ports:
+ - containerPort: 5432
+ protocol: TCP
+ resources:
+ limits:
+ cpu: 60m
+ memory: 512Mi
+ requests:
+ cpu: 30m
+ memory: 128Mi
+ volumes:
+ - name: temp
+ persistentVolumeClaim:
+ claimName: postgresql
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: postgres
+ name: postgres
+ namespace: postgres
+spec:
+ ports:
+ - name: http
+ port: 5432
+ protocol: TCP
+ selector:
+ app: postgres
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-deploymentconfig.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-deploymentconfig.yml.j2
new file mode 100644
index 00000000000..ed5510369e8
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-deploymentconfig.yml.j2
@@ -0,0 +1,101 @@
+kind: DeploymentConfig
+apiVersion: apps.openshift.io/v1
+metadata:
+ annotations:
+ template.alpha.openshift.io/wait-for-ready: 'true'
+ name: postgresql
+ generation: 2
+ namespace: {{ ocp4_workload.starburst.namespace }}
+spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 600
+ resources: {}
+ activeDeadlineSeconds: 21600
+ triggers:
+ - type: ImageChange
+ imageChangeParams:
+ automatic: true
+ containerNames:
+ - postgresql
+ from:
+ kind: ImageStreamTag
+ namespace: openshift
+ name: 'postgresql:10-el8'
+ lastTriggeredImage: >-
+ image-registry.openshift-image-registry.svc:5000/openshift/postgresql@sha256:c51c2456d92ce71905d72088fa14379eb27ec123860bc3b0d4564b7221eb9ca9
+ - type: ConfigChange
+ replicas: 1
+ revisionHistoryLimit: 10
+ test: false
+ selector:
+ name: postgresql
+ template:
+ metadata:
+ creationTimestamp: null
+ labels:
+ name: postgresql
+ spec:
+ volumes:
+ - name: postgresql-data
+ emptyDir: {}
+ containers:
+ - resources:
+ limits:
+ memory: 512Mi
+ readinessProbe:
+ exec:
+ command:
+ - /usr/libexec/check-container
+ initialDelaySeconds: 5
+ timeoutSeconds: 1
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ terminationMessagePath: /dev/termination-log
+ name: postgresql
+ livenessProbe:
+ exec:
+ command:
+ - /usr/libexec/check-container
+ - '--live'
+ initialDelaySeconds: 120
+ timeoutSeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ failureThreshold: 3
+ env:
+ - name: POSTGRESQL_USER
+ valueFrom:
+ secretKeyRef:
+ name: postgresql
+ key: database-user
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: postgresql
+ key: database-password
+ - name: POSTGRESQL_DATABASE
+ valueFrom:
+ secretKeyRef:
+ name: postgresql
+ key: database-name
+ securityContext:
+ capabilities: {}
+ privileged: false
+ ports:
+ - containerPort: 5432
+ protocol: TCP
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: postgresql-data
+ mountPath: /var/lib/pgsql/data
+ terminationMessagePolicy: File
+ image: >-
+ image-registry.openshift-image-registry.svc:5000/openshift/postgresql@sha256:c51c2456d92ce71905d72088fa14379eb27ec123860bc3b0d4564b7221eb9ca9
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+ schedulerName: default-scheduler
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-secret.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-secret.yml.j2
new file mode 100644
index 00000000000..f108bde072d
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-secret.yml.j2
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: postgresql
+ namespace: {{ ocp4_workload.starburst.namespace }}
+stringData:
+ database-name: starburst_query_logger
+ database-password: starburst
+ database-user: starburst
+type: Opaque
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-service.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-service.yml.j2
new file mode 100644
index 00000000000..c364e4c3250
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/cache/cache-service.yml.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: postgresql
+ namespace: {{ ocp4_workload.starburst.namespace }}
+spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ protocol: TCP
+ targetPort: 5432
+ selector:
+ name: postgresql
+ sessionAffinity: None
+ type: ClusterIP
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/operands/starburstenterprise.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/operands/starburstenterprise.yml.j2
index fc8ebbd28fd..2f7c2e53744 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/operands/starburstenterprise.yml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_fraud_detection_usecase/templates/starburst/operands/starburstenterprise.yml.j2
@@ -15,6 +15,11 @@ spec:
repository: registry.connect.redhat.com/starburst/starburst-enterprise
tag: 402-e.1
catalogs:
+ postgres: |-
+ connector.name=postgresql
+ connection-url=jdbc:postgresql://postgres.postgres.svc.cluster.local:5432/
+ connection-password=r3dh4t1!
+ connection-user=postgres
s3: |-
connector.name=hive
hive.metastore.uri=thrift://hive:9083
@@ -24,6 +29,10 @@ spec:
hive.allow-drop-table=true
hive.metastore-cache-ttl=60m
hive.compression-codec=none
+ materialized-views.enabled=true
+ materialized-views.namespace=mv
+ materialized-views.storage-schema=mvstorage
+ cache-service.uri=http://coordinator.starburst.svc.cluster.local:8180
coordinator:
heapHeadroomPercentage: 30
heapSizePercentage: 90
@@ -62,6 +71,10 @@ spec:
node-scheduler.include-coordinator=false
http-server.http.port=8080
discovery.uri=http://localhost:8080
+ insights.jdbc.url=jdbc:postgresql://postgresql.{{ ocp4_workload.starburst.namespace }}.svc.cluster.local:5432/starburst_query_logger
+ insights.jdbc.user=starburst
+ insights.jdbc.password=starburst
+ insights.persistence-enabled=true
log.properties: |
# Enable verbose logging from Starburst Enterprise
#io.trino=DEBUG
@@ -74,6 +87,12 @@ spec:
plugin.dir=/usr/lib/starburst/plugin
node.server-log-file=/var/log/starburst/server.log
node.launcher-log-file=/var/log/starburst/launcher.log
+ cache.properties: |
+ service-database.user=starburst
+ service-database.password=starburst
+ service-database.jdbc-url=jdbc:postgresql://postgresql.starburst.svc.cluster.local:5432/
+ starburst.user=user
+ starburst.jdbc-url=jdbc:trino://coordinator:8080
resources:
limits:
cpu: 1
@@ -82,21 +101,89 @@ spec:
cpu: 1
memory: 2Gi
worker:
+ additionalProperties: ''
+ affinity: {}
autoscaling:
enabled: false
maxReplicas: 100
minReplicas: 1
targetCPUUtilizationPercentage: 80
+ deploymentAnnotations: {}
deploymentTerminationGracePeriodSeconds: 300
+ envFrom: []
+ etcFiles:
+ jvm.config: |
+ -server
+ -XX:G1HeapRegionSize=32M
+ -XX:+ExplicitGCInvokesConcurrent
+ -XX:+ExitOnOutOfMemoryError
+ -XX:+HeapDumpOnOutOfMemoryError
+ -XX:-OmitStackTraceInFastThrow
+ -XX:ReservedCodeCacheSize=512M
+ -XX:PerMethodRecompilationCutoff=10000
+ -XX:PerBytecodeRecompilationCutoff=10000
+ -Djdk.attach.allowAttachSelf=true
+ -Djdk.nio.maxCachedBufferSize=2000000
+ -XX:+UnlockDiagnosticVMOptions
+ -XX:+UseAESCTRIntrinsics
+ --add-opens=java.base/sun.nio.ch=ALL-UNNAMED
+ --add-opens=java.base/java.nio=ALL-UNNAMED
+ --add-opens=java.base/java.lang=ALL-UNNAMED
+ --add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED
+ -XX:-UseBiasedLocking
+ -XX:+UseG1GC
+ other: {}
+ properties:
+ config.properties: |
+ coordinator=false
+ http-server.http.port=8080
+ discovery.uri=http://coordinator:8080
+ log.properties: |
+ # Enable verbose logging from Starburst Enterprise
+ #io.trino=DEBUG
+ #com.starburstdata.presto=DEBUG
+ {% raw %}
+ node.properties: |
+ node.environment={{ include "starburst.environment" . }}
+ {% endraw %}
+ node.data-dir=/data/starburst
+ plugin.dir=/usr/lib/starburst/plugin
+ node.server-log-file=/var/log/starburst/server.log
+ node.launcher-log-file=/var/log/starburst/launcher.log
heapHeadroomPercentage: 30
heapSizePercentage: 90
- nodeMemoryHeadroom: 1Gi
- prestoWorkerShutdownGracePeriodSeconds: 120
- replicas: 1
+ initContainers: []
+ kedaScaler:
+ enabled: false
+ image:
+ pullPolicy: IfNotPresent
+ repository: registry.connect.redhat.com/starburst/keda-trino-scaler
+ tag: 0.1.7
+ port: 8021
+ scaledObjectSpec:
+ advanced: {}
+ cooldownPeriod: 300
+ idleReplicaCount: 0
+ maxReplicaCount: 100
+ minReplicaCount: 1
+ pollingInterval: 30
+ scaleTargetRef:
+ name: worker
+ triggers:
+ - metadata:
+ numberOfQueriesPerWorker: '10'
+ scaleInToIdleReplicaCountIfNoQueuedQueriesLeft: 'true'
+ scaleMethod: query_queue
+ type: external
+ nodeMemoryHeadroom: 2Gi
+ nodeSelector: {}
+ podAnnotations: {}
+ priorityClassName: null
+ replicas: 2
resources:
- limits:
- cpu: 1
- memory: 2Gi
- requests:
- cpu: 1
- memory: 2Gi
+ cpu: 3
+ memory: 12Gi
+ securityContext: {}
+ sidecars: []
+ starburstWorkerShutdownGracePeriodSeconds: 120
+ tolerations: []
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_gitops_sonarqube/templates/acm-application-set.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_gitops_sonarqube/templates/acm-application-set.yml.j2
index ac9d037b549..caf6ac456a0 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_gitops_sonarqube/templates/acm-application-set.yml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_gitops_sonarqube/templates/acm-application-set.yml.j2
@@ -25,7 +25,7 @@ spec:
helm:
parameters:
- name: sonarqube.image
- value: docker.io/kenmoini/openshift-sonarqube
+ value: quay.io/redhat-gpte/sonarqube:7.7
- name: sonarqube.adminPassword
value: {{ ocp4_workload_gitops_sonarqube_admin_password }}
- name: sonarqube.namespace
@@ -39,4 +39,4 @@ spec:
selfHeal: true
syncOptions:
- CreateNamespace=true
- - PruneLast=true
\ No newline at end of file
+ - PruneLast=true
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/fuse/create-instance.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/fuse/create-instance.yml
index 2bc1b73d61d..442e71873d0 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/fuse/create-instance.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/fuse/create-instance.yml
@@ -4,7 +4,7 @@
# Check 3scale tenant details for this user, need management URL for integration
- name: Get 3scale tenant details secret
- k8s_facts:
+ k8s_info:
kind: Secret
name: "{{ _tenant_admin_secret_name }}"
namespace: "{{ ocp4_workload_integreatly_threescale_namespace }}"
@@ -36,7 +36,7 @@
# Create fuse pull secret (not sure using k8s makes sense in this case)
- name: Get existing fuse pull secret
- k8s_facts:
+ k8s_info:
api_version: v1
kind: secret
name: syndesis-pull-secret
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/idp/create-sso-idp.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/idp/create-sso-idp.yml
index f95bd0eb10d..fe18e34b014 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/idp/create-sso-idp.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/idp/create-sso-idp.yml
@@ -1,6 +1,6 @@
---
- name: Get OAuth URL
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: oauth-openshift
@@ -8,7 +8,7 @@
register: _action_get_oauth_route
- name: Get RHMI Cluster SSO URL
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: keycloak-edge
@@ -16,7 +16,7 @@
register: _action_get_cluster_sso_route
- name: Get cluster console resource
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Console
name: cluster
@@ -88,7 +88,7 @@
- name
- name: Get the RHMI custom resource
- k8s_facts:
+ k8s_info:
api_version: integreatly.org/v1alpha1
kind: RHMI
name: "{{ ocp4_workload_integreatly_custom_resource_name }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/threescale/create-tenant.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/threescale/create-tenant.yml
index efc24b16dbc..47820d65df9 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/threescale/create-tenant.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/files/threescale/create-tenant.yml
@@ -5,7 +5,7 @@
# Check if tenant is already registered via secret
- name: Get tenant details secret
- k8s_facts:
+ k8s_info:
kind: Secret
name: "{{ _tenant_admin_secret_name }}"
namespace: "{{ ocp4_workload_integreatly_threescale_namespace }}"
@@ -18,7 +18,7 @@
## Create the secret
- block:
- name: Get 3scale detail secret {{ ocp4_workload_integreatly_seed_secret_name }}
- k8s_facts:
+ k8s_info:
kind: Secret
name: "{{ ocp4_workload_integreatly_seed_secret_name }}"
namespace: "{{ ocp4_workload_integreatly_threescale_namespace }}"
@@ -79,7 +79,7 @@
msg: Retrieving details for tenant {{ _tenant_id }}"
- name: Get tenant details secret
- k8s_facts:
+ k8s_info:
kind: Secret
name: "{{ _tenant_admin_secret_name }}"
namespace: "{{ ocp4_workload_integreatly_threescale_namespace }}"
@@ -95,7 +95,7 @@
# Update SSO client with redirect URL for client
- name: Get 3scale workshop SSO client
- k8s_facts:
+ k8s_info:
api_version: "{{ ocp4_workload_integreatly_threescale_sso_client_version }}"
kind: "{{ ocp4_workload_integreatly_threescale_sso_client_kind }}"
name: "{{ ocp4_workload_integreatly_threescale_sso_client_name }}"
@@ -109,7 +109,7 @@
_sso_redirect_uris: "{{ (_action_get_client.resources[0].spec.client.redirectUris | default([])) + [(_tenant_host + '/*' | string)] }}"
- name: Get managed 3scale SSO client
- k8s_facts:
+ k8s_info:
api_version: "{{ ocp4_workload_integreatly_threescale_sso_client_version }}"
kind: "{{ ocp4_workload_integreatly_threescale_sso_client_kind }}"
name: "{{ ocp4_workload_integreatly_threescale_managed_sso_client }}"
@@ -138,7 +138,7 @@
# Update 3scale tenant with SSO option
- name: Get SSO route
- k8s_facts:
+ k8s_info:
kind: Route
name: "{{ ocp4_workload_integreatly_sso_route_name }}"
namespace: "{{ ocp4_workload_integreatly_threescale_sso_namespace }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/post_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/post_workload.yml
index d090d39d797..ec3d4a4588d 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/post_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/post_workload.yml
@@ -170,7 +170,7 @@
# Wait for installation to complete
- name: Get RHMI custom resource
- k8s_facts:
+ k8s_info:
api_version: v1alpha1
kind: RHMI
name: "{{ ocp4_workload_integreatly_custom_resource_name }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/remove_workload.yml
index 176560fe542..53ef51329c2 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/remove_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/remove_workload.yml
@@ -13,7 +13,7 @@
namespace: "{{ ocp4_workload_integreatly_namespace }}"
- name: Wait for RHMI to uninstall
- k8s_facts:
+ k8s_info:
api_version: v1alpha1
kind: RHMI
namespace: '{{ ocp4_workload_integreatly_namespace }}'
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/workload.yml
index 598ba5c34fe..a8b2e4f26c0 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_integreatly/tasks/workload.yml
@@ -36,7 +36,7 @@
# should speed up the start of rhmi installations on clusters with more than 3
# worker nodes.
- name: Check worker MachineConfigPool has enough available nodes
- k8s_facts:
+ k8s_info:
api_version: machineconfiguration.openshift.io/v1
kind: MachineConfigPool
name: "{{ ocp4_workload_integreatly_machineconfigpool_name }}"
@@ -95,7 +95,7 @@
- minio-route.yml.j2
- name: Create external Minio instance route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
name: "{{ ocp4_workload_integreatly_minio_route_name }}"
@@ -285,7 +285,7 @@
until: _create_subscription is succeeded
- name: Check RHMI custom resource is in a ready state
- k8s_facts:
+ k8s_info:
api_version: integreatly.org/v1alpha1
kind: RHMI
name: "{{ ocp4_workload_integreatly_custom_resource_name }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_lpe_automation_controller/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_lpe_automation_controller/tasks/workload.yml
index a99c6bbedb2..1431e523f0d 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_lpe_automation_controller/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_lpe_automation_controller/tasks/workload.yml
@@ -72,7 +72,7 @@
definition: "{{ lookup('template', './templates/automationcontroller.j2' ) | from_yaml }}"
- name: Retrieve created route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: "{{ automation_controller_app_name }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/defaults/main.yaml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/defaults/main.yaml
index a34ffa4a857..bf68916d899 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/defaults/main.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/defaults/main.yaml
@@ -6,7 +6,7 @@ silent: false
# ------------------------------------------------
# RHV Environment
# ------------------------------------------------
-ocp4_workload_mad_roadshow_rhv_host: rhvm.dev.cnv.infra.opentlc.com
+ocp4_workload_mad_roadshow_rhv_host: rhvm-pub.cnv.infra.opentlc.com
ocp4_workload_mad_roadshow_rhv_url: https://{{ ocp4_workload_mad_roadshow_rhv_host }}/ovirt-engine/api
# Admin account on RHV, Set password from secrets
@@ -54,7 +54,9 @@ ocp4_workload_mad_roadshow_vm_cluster: Default
ocp4_workload_mad_roadshow_vm_user_name: lab-user
ocp4_workload_mad_roadshow_vm_user_password: ""
ocp4_workload_mad_roadshow_vm_user_password_length: 12
-
+# yamllint disable rule:line-length
+ocp4_workload_mad_roadshow_vm_user_public_ssh_key: |-
+ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvZvn+GL0wTOsAdh1ikIQoqj2Fw/RA6F14O347rgKdpkgOQpGQk1k2gM8wcla2Y1o0bPIzwlNy1oh5o9uNjZDMeDcEXWuXbu0cRBy4pVRhh8a8zAZfssnqoXHHLyPyHWpdTmgIhr0UIGYrzHrnySAnUcDp3gJuE46UEBtrlyv94cVvZf+EZUTaZ+2KjTRLoNryCn7vKoGHQBooYg1DeHLcLSRWEADUo+bP0y64+X/XTMZOAXbf8kTXocqAgfl/usbYdfLOgwU6zWuj8vxzAKuMEXS1AJSp5aeqRKlbbw40IkTmLoQIgJdb2Zt98BH/xHDe9xxhscUCfWeS37XLp75J
# ------------------------------------------------
# Oracle VM Properties
# ------------------------------------------------
@@ -70,6 +72,10 @@ ocp4_workload_mad_roadshow_oracle_vm_template: rhel85-empty
# ocp4_workload_mad_roadshow_oracle_vm_template: ama-template-oracle
ocp4_workload_mad_roadshow_oracle_vm_name: "oracle-{{ guid | default(xxxxx) }}"
+ocp4_workload_mad_roadshow_oracle_vm_cpu_cores: 1
+ocp4_workload_mad_roadshow_oracle_vm_cpu_sockets: 2
+ocp4_workload_mad_roadshow_oracle_vm_memory: 8GiB
+
# https://yum.oracle.com/repo/OracleLinux/OL8/appstream/x86_64/getPackage/oracle-database-preinstall-21c-1.0-1.el8.x86_64.rpm
ocp4_workload_mad_roadshow_oracle_preinstall_rpm: https://gpte-public.s3.amazonaws.com/ama_demo/oracle-database-preinstall-21c-1.0-1.el8.x86_64.rpm
# https://download.oracle.com/otn-pub/otn_software/db-express/oracle-database-xe-21c-1.0-1.ol8.x86_64.rpm
@@ -95,6 +101,10 @@ ocp4_workload_mad_roadshow_tomcat_vm_template: rhel85-empty
# ocp4_workload_mad_roadshow_tomcat_vm_template: ama-template-tomcat
ocp4_workload_mad_roadshow_tomcat_vm_name: "tomcat-{{ guid | default(xxxxx) }}"
+ocp4_workload_mad_roadshow_tomcat_vm_cpu_cores: 1
+ocp4_workload_mad_roadshow_tomcat_vm_cpu_sockets: 1
+ocp4_workload_mad_roadshow_tomcat_vm_memory: 4GiB
+
# Tomcat Download URL
ocp4_workload_mad_roadshow_tomcat_download_url: https://gpte-public.s3.amazonaws.com/apache-tomcat-9.0.64.tar.gz
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/jboss-webserver56.yaml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/files/imagestream-jboss-webserver56.yaml
similarity index 100%
rename from ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/jboss-webserver56.yaml
rename to ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/files/imagestream-jboss-webserver56.yaml
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-oracle-vm.yml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-oracle-vm.yml
index 3f2adc8a590..3d16947344a 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-oracle-vm.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-oracle-vm.yml
@@ -1,5 +1,5 @@
---
-- name: Create and start Oracle VM
+- name: Create Oracle VM
ovirt.ovirt.ovirt_vm:
auth:
insecure: true
@@ -8,12 +8,16 @@
template: "{{ ocp4_workload_mad_roadshow_oracle_vm_template }}"
cluster: "{{ ocp4_workload_mad_roadshow_vm_cluster }}"
storage_domain: "{{ ocp4_workload_mad_roadshow_rhv_storage }}"
+ cpu_cores: "{{ ocp4_workload_mad_roadshow_oracle_vm_cpu_cores | int }}"
+ cpu_sockets: "{{ ocp4_workload_mad_roadshow_oracle_vm_cpu_sockets | int }}"
+ memory: "{{ ocp4_workload_mad_roadshow_oracle_vm_memory }}"
cloud_init:
host_name: "{{ ocp4_workload_mad_roadshow_oracle_vm_name }}"
user_name: "{{ ocp4_workload_mad_roadshow_vm_user_name }}"
root_password: "{{ ocp4_workload_mad_roadshow_vm_user_password }}"
nic_boot_protocol: dhcp
nic_name: eth0
+ authorized_ssh_keys: "{{ ocp4_workload_mad_roadshow_vm_user_public_ssh_key }}"
cloud_init_persist: true
- name: Get Oracle VM NIC
@@ -66,14 +70,17 @@
module: shell
args: ssh-keyscan -H "{{ _ocp4_workload_mad_roadshow_oracle_ip }}" >> $HOME/.ssh/known_hosts
-- name: Wait for Oracle database to be running
- ansible.builtin.wait_for:
- host: "{{ _ocp4_workload_mad_roadshow_oracle_ip }}"
- port: 1521
- state: started
- timeout: 300
- register: r_wait_for_database
+- name: Wait for Oracle database only if using template
+ when: not ocp4_workload_mad_roadshow_oracle_vm_install_from_scratch | bool
+ block:
+ - name: Wait for Oracle database to be running
+ ansible.builtin.wait_for:
+ host: "{{ _ocp4_workload_mad_roadshow_oracle_ip }}"
+ port: 1521
+ state: started
+ timeout: 300
+ register: r_wait_for_database
-- name: Print result of wait step
- ansible.builtin.debug:
- msg: "{{ r_wait_for_database }}"
+ - name: Print result of wait step
+ ansible.builtin.debug:
+ msg: "{{ r_wait_for_database }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-tomcat-vm.yml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-tomcat-vm.yml
index 5291a3e9f8a..e3d900bc53a 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-tomcat-vm.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/rhv-setup-tomcat-vm.yml
@@ -8,12 +8,16 @@
template: "{{ ocp4_workload_mad_roadshow_tomcat_vm_template }}"
cluster: "{{ ocp4_workload_mad_roadshow_vm_cluster }}"
storage_domain: "{{ ocp4_workload_mad_roadshow_rhv_storage }}"
+ cpu_cores: "{{ ocp4_workload_mad_roadshow_tomcat_vm_cpu_cores | int }}"
+ cpu_sockets: "{{ ocp4_workload_mad_roadshow_tomcat_vm_cpu_sockets | int }}"
+ memory: "{{ ocp4_workload_mad_roadshow_tomcat_vm_memory }}"
cloud_init:
host_name: "{{ ocp4_workload_mad_roadshow_tomcat_vm_name }}"
user_name: "{{ ocp4_workload_mad_roadshow_vm_user_name }}"
root_password: "{{ ocp4_workload_mad_roadshow_vm_user_password }}"
nic_boot_protocol: dhcp
nic_name: eth0
+ authorized_ssh_keys: "{{ ocp4_workload_mad_roadshow_vm_user_public_ssh_key }}"
cloud_init_persist: true
- name: Get Tomcat VM NIC
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/vm-common-install-packages.yml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/vm-common-install-packages.yml
index 71771b9249a..58b36d7c0c9 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/vm-common-install-packages.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/vm-common-install-packages.yml
@@ -14,11 +14,13 @@
- name: Register with activation-key for Satellite
community.general.redhat_subscription:
state: present
- consumer_name: "{{ _ocp4_workload_mad_roadshow_name }}"
- server_hostname: "https://{{ set_repositories_satellite_url }}:8443/rhsm"
+ consumer_name: "{{ _ocp4_workload_mad_roadshow_vm_name }}"
+ server_hostname: "{{ set_repositories_satellite_url }}"
+ server_port: 8443
+ server_prefix: /rhsm
rhsm_baseurl: "https://{{ set_repositories_satellite_url }}/pulp/repos"
activationkey: "{{ set_repositories_satellite_activationkey }}"
- org_id: "{{ set_repositories_satellite_org }}"
+ org_id: "{{ set_repositories_satellite_org | default(satellite_org) }}"
pool: "{{ set_repositories_satellite_pool | default(omit) }}"
force_register: false
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/workload.yml
index 89c7e93b1c8..91752dd0643 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/tasks/workload.yml
@@ -120,7 +120,7 @@
- name: Set up RHV IM user
ansible.builtin.include_tasks: rhv-setup-im-user.yml
-- name: Create a VMs in RHV
+- name: Create VMs in RHV
environment:
OVIRT_URL: "{{ ocp4_workload_mad_roadshow_rhv_url }}"
OVIRT_USERNAME: "{{ ocp4_workload_mad_roadshow_rhv_admin_user_name }}"
@@ -148,7 +148,7 @@
become: true
vars:
ansible_ssh_user: "{{ ocp4_workload_mad_roadshow_vm_user_name }}"
- _ocp4_workload_mad_roadshow_name: "{{ ocp4_workload_mad_roadshow_oracle_vm_name }}"
+ _ocp4_workload_mad_roadshow_vm_name: "{{ ocp4_workload_mad_roadshow_oracle_vm_name }}"
block:
- name: Configure Oracle database VM (packages)
when: ocp4_workload_mad_roadshow_oracle_vm_install_from_scratch | bool
@@ -170,7 +170,7 @@
become: true
vars:
ansible_ssh_user: "{{ ocp4_workload_mad_roadshow_vm_user_name }}"
- _ocp4_workload_mad_roadshow_name: "{{ ocp4_workload_mad_roadshow_tomcat_vm_name }}"
+ _ocp4_workload_mad_roadshow_vm_name: "{{ ocp4_workload_mad_roadshow_tomcat_vm_name }}"
block:
- name: Configure Tomcat VM (packages)
when: ocp4_workload_mad_roadshow_tomcat_vm_install_from_scratch | bool
@@ -404,14 +404,9 @@
label: "{{ ocp4_workload_mad_roadshow_gitea_user_prefix }}{{ n }}"
- name: Create JBoss Web Server 5.6 ImageStream
- k8s:
+ kubernetes.core.k8s:
state: present
- merge_type:
- - strategic-merge
- - merge
- definition: "{{ lookup('file', item ) | from_yaml }}"
- loop:
- - jboss-webserver56.yaml
+ definition: "{{ lookup('file', 'imagestream-jboss-webserver56.yaml' ) | from_yaml }}"
# Cleanup Private Key
- name: Remove private key
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/templates/oracle/setup-customer-database.sql.j2 b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/templates/oracle/setup-customer-database.sql.j2
index d19d2961944..6a2d461a77e 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/templates/oracle/setup-customer-database.sql.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_mad_roadshow/templates/oracle/setup-customer-database.sql.j2
@@ -5,6 +5,4 @@ GRANT CREATE SESSION TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
GRANT CREATE TABLE TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
GRANT CREATE SEQUENCE TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
GRANT UNLIMITED TABLESPACE TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
-GRANT CONNECT TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
GRANT CREATE SESSION GRANT ANY PRIVILEGE TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
-GRANT UNLIMITED TABLESPACE TO {{ ocp4_workload_mad_roadshow_oracle_db_user }};
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/knative.yml b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/knative.yml
index 2800a8601f5..43d3f314031 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/knative.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/knative.yml
@@ -7,7 +7,7 @@
namespace: "{{ user_project }}"
- name: "Wait for Knative CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/serverless.yml b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/serverless.yml
index 40c0153f220..708fb2add80 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/serverless.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/serverless.yml
@@ -12,7 +12,7 @@
namespace: knative-serving
- name: "Wait for Knative CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/storage.yml b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/storage.yml
index 0b0cb1de859..39d755c8d74 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/storage.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_infra/tasks/storage.yml
@@ -52,7 +52,7 @@
channel: " {{ ocs_channel }}"
- name: "Wait for Storage CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
@@ -68,7 +68,7 @@
definition: "{{ lookup('template', 'storage/storagecluster.yml.j2') }}"
- name: "Waiting for Noobaa to become ready"
- k8s_facts:
+ k8s_info:
api_version: "noobaa.io/v1alpha1"
kind: NooBaa
namespace: "{{ ocs_namespace }}"
@@ -100,7 +100,7 @@
definition: "{{ lookup('template', 'storage/pv_pool_objectbucketclaim.yml.j2') }}"
- name: "Wait for Bucket to exist"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
@@ -111,7 +111,7 @@
delay: 10
- name: "Wait for Bucket to have status"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
@@ -122,7 +122,7 @@
delay: 10
- name: "Wait for Bucket to become bound"
- k8s_facts:
+ k8s_info:
api_version: "objectbucket.io/v1alpha1"
kind: ObjectBucketClaim
name: "{{ ocs_mcg_pv_pool_bucket_name }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_user/tasks/open_data_hub.yml b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_user/tasks/open_data_hub.yml
index 23e128bbacd..80f2d655586 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_user/tasks/open_data_hub.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_ml_workflows_user/tasks/open_data_hub.yml
@@ -102,7 +102,7 @@
suffix: opentlc-mgr
- name: "Wait for Open Data Hub CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/README.md b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/README.md
new file mode 100644
index 00000000000..6204febeb27
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/README.md
@@ -0,0 +1 @@
+### NFD and NVIDIA GPU Setup Role ###
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/defaults/main.yml
new file mode 100644
index 00000000000..3c750f297ee
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/defaults/main.yml
@@ -0,0 +1,28 @@
+become_override: false
+silent: false
+
+# ------------------------------------------------
+# Node Feature Discovery Operator
+# ------------------------------------------------
+nfd_operator_namespace: openshift-nfd
+nfd_operator_channel: "stable"
+nfd_operator_automatic_install_plan_approval: true
+nfd_operator_starting_csv: "nfd.4.12.0-202307182142"
+nfd_operator_wait_for_deploy: true
+nfd_operator_use_catalog_snapshot: false
+nfd_operator_catalogsource_name: ""
+nfd_operator_catalog_snapshot_image: ""
+nfd_operator_catalog_snapshot_image_tag: ""
+
+# ------------------------------------------------
+# NVIDIA GPU Operator
+# ------------------------------------------------
+nvidia_gpu_operator_namespace: nvidia-gpu-operator
+nvidia_gpu_operator_channel: v23.6
+nvidia_gpu_operator_automatic_install_plan_approval: true
+nvidia_gpu_operator_starting_csv: gpu-operator-certified.v23.6.0
+nvidia_gpu_operator_wait_for_deploy: true
+nvidia_gpu_operator_use_catalog_snapshot: false
+nvidia_gpu_operator_catalogsource_name: ""
+nvidia_gpu_operator_catalog_snapshot_image: ""
+nvidia_gpu_operator_catalog_snapshot_image_tag: ""
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_cr.yaml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_cr.yaml
new file mode 100644
index 00000000000..3dcc78cedfc
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_cr.yaml
@@ -0,0 +1,129 @@
+---
+apiVersion: nfd.openshift.io/v1
+kind: NodeFeatureDiscovery
+metadata:
+ name: nfd-instance
+ namespace: openshift-nfd
+spec:
+ operand:
+ image: >-
+ registry.redhat.io/openshift4/ose-node-feature-discovery:v4.12
+ imagePullPolicy: Always
+ servicePort: 12000
+ workerConfig:
+ configData: |
+ core:
+ # labelWhiteList:
+ # noPublish: false
+ sleepInterval: 60s
+ # sources: [all]
+ # klog:
+ # addDirHeader: false
+ # alsologtostderr: false
+ # logBacktraceAt:
+ # logtostderr: true
+ # skipHeaders: false
+ # stderrthreshold: 2
+ # v: 0
+ # vmodule:
+ ## NOTE: the following options are not dynamically run-time
+ ## configurable and require a nfd-worker restart to take effect
+ ## after being changed
+ # logDir:
+ # logFile:
+ # logFileMaxSize: 1800
+ # skipLogHeaders: false
+ sources:
+ # cpu:
+ # cpuid:
+ ## NOTE: whitelist has priority over blacklist
+ # attributeBlacklist:
+ # - "BMI1"
+ # - "BMI2"
+ # - "CLMUL"
+ # - "CMOV"
+ # - "CX16"
+ # - "ERMS"
+ # - "F16C"
+ # - "HTT"
+ # - "LZCNT"
+ # - "MMX"
+ # - "MMXEXT"
+ # - "NX"
+ # - "POPCNT"
+ # - "RDRAND"
+ # - "RDSEED"
+ # - "RDTSCP"
+ # - "SGX"
+ # - "SSE"
+ # - "SSE2"
+ # - "SSE3"
+ # - "SSE4.1"
+ # - "SSE4.2"
+ # - "SSSE3"
+ # attributeWhitelist:
+ # kernel:
+ # kconfigFile: "/path/to/kconfig"
+ # configOpts:
+ # - "NO_HZ"
+ # - "X86"
+ # - "DMI"
+ pci:
+ deviceClassWhitelist:
+ - "0200"
+ - "03"
+ - "12"
+ deviceLabelFields:
+ # - "class"
+ - "vendor"
+ # - "device"
+ # - "subsystem_vendor"
+ # - "subsystem_device"
+ # usb:
+ # deviceClassWhitelist:
+ # - "0e"
+ # - "ef"
+ # - "fe"
+ # - "ff"
+ # deviceLabelFields:
+ # - "class"
+ # - "vendor"
+ # - "device"
+ # custom:
+ # - name: "my.kernel.feature"
+ # matchOn:
+ # - loadedKMod: ["example_kmod1", "example_kmod2"]
+ # - name: "my.pci.feature"
+ # matchOn:
+ # - pciId:
+ # class: ["0200"]
+ # vendor: ["15b3"]
+ # device: ["1014", "1017"]
+ # - pciId :
+ # vendor: ["8086"]
+ # device: ["1000", "1100"]
+ # - name: "my.usb.feature"
+ # matchOn:
+ # - usbId:
+ # class: ["ff"]
+ # vendor: ["03e7"]
+ # device: ["2485"]
+ # - usbId:
+ # class: ["fe"]
+ # vendor: ["1a6e"]
+ # device: ["089a"]
+ # - name: "my.combined.feature"
+ # matchOn:
+ # - pciId:
+ # vendor: ["15b3"]
+ # device: ["1014", "1017"]
+ # loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
+ customConfig:
+ configData: |
+ # - name: "more.kernel.features"
+ # matchOn:
+ # - loadedKMod: ["example_kmod3"]
+ # - name: "more.features.by.nodename"
+ # value: customValue
+ # matchOn:
+ # - nodename: ["special-.*-node-.*"]
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_operatorgroup.yaml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_operatorgroup.yaml
new file mode 100644
index 00000000000..222c6418822
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_operatorgroup.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ generateName: openshift-nfd-
+ name: openshift-nfd
+ namespace: openshift-nfd
+spec:
+ targetNamespaces:
+ - openshift-nfd
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_sub.yaml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_sub.yaml
new file mode 100644
index 00000000000..7dc0b66ea64
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nodefeature_discovery_sub.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: nfd
+ namespace: openshift-nfd
+spec:
+ channel: "stable"
+ installPlanApproval: Automatic
+ name: nfd
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_clusterpolicy.json b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_clusterpolicy.json
new file mode 100644
index 00000000000..e3f338e6ee7
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_clusterpolicy.json
@@ -0,0 +1,121 @@
+{
+ "apiVersion": "nvidia.com/v1",
+ "kind": "ClusterPolicy",
+ "metadata": {
+ "name": "gpu-cluster-policy"
+ },
+ "spec": {
+ "operator": {
+ "defaultRuntime": "crio",
+ "use_ocp_driver_toolkit": true,
+ "initContainer": {}
+ },
+ "sandboxWorkloads": {
+ "enabled": false,
+ "defaultWorkload": "container"
+ },
+ "driver": {
+ "enabled": true,
+ "upgradePolicy": {
+ "autoUpgrade": true,
+ "drain": {
+ "deleteEmptyDir": false,
+ "enable": false,
+ "force": false,
+ "timeoutSeconds": 300
+ },
+ "maxParallelUpgrades": 1,
+ "maxUnavailable": "25%",
+ "podDeletion": {
+ "deleteEmptyDir": false,
+ "force": false,
+ "timeoutSeconds": 300
+ },
+ "waitForCompletion": {
+ "timeoutSeconds": 0
+ }
+ },
+ "repoConfig": {
+ "configMapName": ""
+ },
+ "certConfig": {
+ "name": ""
+ },
+ "licensingConfig": {
+ "nlsEnabled": false,
+ "configMapName": ""
+ },
+ "virtualTopology": {
+ "config": ""
+ },
+ "kernelModuleConfig": {
+ "name": ""
+ }
+ },
+ "dcgmExporter": {
+ "enabled": true,
+ "config": {
+ "name": ""
+ },
+ "serviceMonitor": {
+ "enabled": true
+ }
+ },
+ "dcgm": {
+ "enabled": true
+ },
+ "daemonsets": {
+ "updateStrategy": "RollingUpdate",
+ "rollingUpdate": {
+ "maxUnavailable": "1"
+ }
+ },
+ "devicePlugin": {
+ "enabled": true,
+ "config": {
+ "name": "",
+ "default": ""
+ }
+ },
+ "gfd": {
+ "enabled": true
+ },
+ "migManager": {
+ "enabled": true
+ },
+ "nodeStatusExporter": {
+ "enabled": true
+ },
+ "mig": {
+ "strategy": "single"
+ },
+ "toolkit": {
+ "enabled": true
+ },
+ "validator": {
+ "plugin": {
+ "env": [
+ {
+ "name": "WITH_WORKLOAD",
+ "value": "false"
+ }
+ ]
+ }
+ },
+ "vgpuManager": {
+ "enabled": false
+ },
+ "vgpuDeviceManager": {
+ "enabled": true
+ },
+ "sandboxDevicePlugin": {
+ "enabled": true
+ },
+ "vfioManager": {
+ "enabled": true
+ },
+ "gds": {
+ "enabled": false
+ }
+ }
+}
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_operatorgroup.yaml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_operatorgroup.yaml
new file mode 100644
index 00000000000..9b74f108864
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_operatorgroup.yaml
@@ -0,0 +1,9 @@
+---
+apiVersion: operators.coreos.com/v1
+kind: OperatorGroup
+metadata:
+ name: nvidia-gpu-operator-group
+ namespace: nvidia-gpu-operator
+spec:
+ targetNamespaces:
+ - nvidia-gpu-operator
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_sub copy.yaml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_sub copy.yaml
new file mode 100644
index 00000000000..4811056a36c
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/files/nvidia_gpu_sub copy.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: gpu-operator-certified
+ namespace: nvidia-gpu-operator
+spec:
+ channel: "{{ _ocp4_workload_nvidia_gpu_operator_channel }}"
+ installPlanApproval: Automatic
+ name: gpu-operator-certified
+ source: certified-operators
+ sourceNamespace: openshift-marketplace
+ startingCSV: "{{ _ocp4_workload_nvidia_gpu_operator_csv }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/main.yml
new file mode 100644
index 00000000000..fbf3df9760f
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+# Do not modify this file
+
+- name: Running Pre Workload Tasks
+ ansible.builtin.include_tasks:
+ file: ./pre_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload Tasks
+ ansible.builtin.include_tasks:
+ file: ./workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Post Workload Tasks
+ ansible.builtin.include_tasks:
+ file: ./post_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "create" or ACTION == "provision"
+
+- name: Running Workload removal Tasks
+ ansible.builtin.include_tasks:
+ file: ./remove_workload.yml
+ apply:
+ become: "{{ become_override | bool }}"
+ when: ACTION == "destroy" or ACTION == "remove"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nfd_operator.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nfd_operator.yml
new file mode 100644
index 00000000000..00c9bd34f5b
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nfd_operator.yml
@@ -0,0 +1,34 @@
+---
+- name: "Ensure nfd namespace exists"
+ kubernetes.core.k8s:
+ state: present
+ api_version: v1
+ kind: Namespace
+ name: "{{ nfd_operator_namespace }}"
+
+- name: Create NodeFeatureDiscovery operatorgroup
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'nodefeature_discovery_operatorgroup.yaml') | from_yaml }}"
+ register: operatorgroup_result
+ until: operatorgroup_result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create NodeFeaturEDiscovery subscription
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'nodefeature_discovery_sub.yaml') | from_yaml }}"
+ register: subscription_result
+ until: subscription_result is not failed
+ retries: 10
+ delay: 6
+
+- name: Create NodeFeatureDiscovery Custom Resource
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'nodefeature_discovery_cr.yaml') | from_yaml }}"
+ register: result
+ until: result is not failed
+ retries: 10
+ delay: 6
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nvidia_gpu_operator.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nvidia_gpu_operator.yml
new file mode 100644
index 00000000000..b58166324bd
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/nvidia_gpu_operator.yml
@@ -0,0 +1,32 @@
+---
+- name: "Ensure nvidia_gpu namespace exists"
+ kubernetes.core.k8s:
+ state: present
+ api_version: v1
+ kind: Namespace
+ name: "{{ nvidia_gpu_operator_namespace }}"
+
+- name: Create NVIDIA GPU operatorgroup
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'nvidia_gpu_operatorgroup.yaml') | from_yaml }}"
+ register: operatorgroup_result
+ retries: 10
+ delay: 6
+
+- name: Create NVIDIA GPU subscription
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('template', 'nvidia_gpu_sub.yaml.j2') | from_yaml }}"
+ register: subscription_result
+ retries: 20
+ delay: 6
+
+- name: 120 second pause for NVIDIA GPU operator setup
+ pause:
+ seconds: 120
+
+- name: Setup NVIDIA GPU Cluster Policy
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('file', 'nvidia_gpu_clusterpolicy.json') | from_yaml }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/post_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/post_workload.yml
new file mode 100644
index 00000000000..ed7841d0fe2
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/post_workload.yml
@@ -0,0 +1,27 @@
+---
+# Implement your Post Workload deployment tasks here
+# --------------------------------------------------
+
+
+# Leave these as the last tasks in the playbook
+# ---------------------------------------------
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: Post_workload tasks complete
+ ansible.builtin.debug:
+ msg: "Post-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: Post_workload tasks complete
+ ansible.builtin.debug:
+ msg: "Post-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/pre_workload.yml
new file mode 100644
index 00000000000..90778bd6d72
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/pre_workload.yml
@@ -0,0 +1,26 @@
+---
+# Implement your Pre Workload deployment tasks here
+# -------------------------------------------------
+
+# Leave these as the last tasks in the playbook
+# ---------------------------------------------
+
+# For deployment onto a dedicated cluster (as part of the
+# cluster deployment) set workload_shared_deployment to False
+# This is the default so it does not have to be set explicitely
+- name: Pre_workload tasks complete
+ ansible.builtin.debug:
+ msg: "Pre-Workload tasks completed successfully."
+ when:
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
+
+# For RHPDS deployment (onto a shared cluster) set
+# workload_shared_deployment to True
+# (in the deploy script or AgnosticV configuration)
+- name: Pre_workload tasks complete
+ ansible.builtin.debug:
+ msg: "Pre-Software checks completed successfully"
+ when:
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/remove_workload.yml
new file mode 100644
index 00000000000..005f915ac8e
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/remove_workload.yml
@@ -0,0 +1,48 @@
+---
+- name: Remove NVIDIA GPU Cluster Policy
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'nvidia_gpu_clusterpolicy.json') | from_yaml }}"
+
+- name: Remove NVIDIA GPU subscription
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('template', 'nvidia_gpu_sub.yaml.j2') | from_yaml }}"
+
+- name: Remove NVIDIA GPU operatorgroup
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'nvidia_gpu_operatorgroup.yaml') | from_yaml }}"
+
+- name: Remove NodeFeatureDiscovery Custom Resource
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'nodefeature_discovery_cr.yaml') | from_yaml }}"
+
+- name: Remove NodeFeaturEDiscovery subscription
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'nodefeature_discovery_sub.yaml') | from_yaml }}"
+
+- name: Remove NodeFeatureDiscovery operatorgroup
+ kubernetes.core.k8s:
+ state: absent
+ definition: "{{ lookup('file', 'nodefeature_discovery_operatorgroup.yaml') | from_yaml }}"
+
+- name: 60 second pause
+ pause:
+ seconds: 60
+
+- name: "Remove nvidia_gpu namespace "
+ kubernetes.core.k8s:
+ state: absent
+ api_version: v1
+ kind: Namespace
+ name: "{{ nvidia_gpu_operator_namespace }}"
+
+- name: "Remove nfd namespace "
+ kubernetes.core.k8s:
+ state: absent
+ api_version: v1
+ kind: Namespace
+ name: "{{ nfd_operator_namespace }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/workload.yml
new file mode 100644
index 00000000000..9a0ba1b30bb
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/tasks/workload.yml
@@ -0,0 +1,10 @@
+---
+- name: Install Node Feature Discovery Operator
+ ansible.builtin.include_tasks: nfd_operator.yml
+
+- name: Install NVIDIA GPU Operator
+ ansible.builtin.include_tasks: nvidia_gpu_operator.yml
+
+- name: 60 second pause
+ pause:
+ seconds: 60
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/namespace.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/namespace.yaml.j2
new file mode 100644
index 00000000000..7ca96bd2705
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/namespace.yaml.j2
@@ -0,0 +1,7 @@
+{% for __namespace in r_namespaces %}
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: '{{ __namespace.name }}'
+{% endfor %}
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/nvidia_gpu_sub.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/nvidia_gpu_sub.yaml.j2
new file mode 100644
index 00000000000..e1e692071ae
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_nvidia_gpu_setup/templates/nvidia_gpu_sub.yaml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: gpu-operator-certified
+ namespace: nvidia-gpu-operator
+spec:
+ channel: '{{ nvidia_gpu_operator_channel }}'
+ installPlanApproval: Automatic
+ name: gpu-operator-certified
+ source: certified-operators
+ sourceNamespace: openshift-marketplace
+ startingCSV: '{{ nvidia_gpu_operator_starting_csv }}'
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_opendatahub_old/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_opendatahub_old/tasks/workload.yml
index c52b835f4d1..472898e6433 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_opendatahub_old/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_opendatahub_old/tasks/workload.yml
@@ -30,7 +30,7 @@
suffix: opentlc-mgr
- name: "Wait for Open Data Hub CRD's to exist"
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/defaults/main.yaml b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/defaults/main.yaml
index 146a645b842..9adbfff75ca 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/defaults/main.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/defaults/main.yaml
@@ -12,5 +12,5 @@ ocp4_workload_opentour_dach_2022_user_count: >-
ocp4_workload_opentour_dach_2022_user_prefix: >-
{{ (ocp4_workload_authentication_htpasswd_user_base | default('opentour')) ~ '-' }}
-ocp4_workload_opentour_dach_2022_infra_repo: https://github.com/sa-mw-dach/opentour-2022-gitops-infra.git
-ocp4_workload_opentour_dach_2022_infra_repo_tag: HEAD
+ocp4_workload_opentour_dach_2022_infra_repo: https://github.com/sa-mw-dach/microservice-introduction-gitops-infra.git
+ocp4_workload_opentour_dach_2022_infra_repo_tag: main
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/argocd_info.yml b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/argocd_info.yml
index 1b43585ce11..e7d90f270e7 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/argocd_info.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/argocd_info.yml
@@ -1,6 +1,6 @@
---
- name: Retrieve created route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: openshift-gitops-server
@@ -8,7 +8,7 @@
register: r_route
- name: Retrieve aap secret
- k8s_facts:
+ k8s_info:
api_version: "v1"
kind: Secret
name: openshift-gitops-cluster
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/workload.yml
index 7d112bd9f33..e523923b0b6 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_opentour_dach_2022/tasks/workload.yml
@@ -36,12 +36,11 @@
state: present
definition: "{{ lookup('template', 'appsets/generic.yaml.j2' ) | from_yaml }}"
with_items:
- - service-mesh-app
- - service-mesh-system
- gitops
- - pipeline
- - hello-dev
- - hello-main
+ - dev
+ - service-mesh-system
+ - apps
+ - keycloak
# Patch openshift pipelines because of
# https://access.redhat.com/solutions/6975952
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/install-guides.yaml b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/install-guides.yaml
index 4b0cdc5500a..027469f9563 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/install-guides.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/install-guides.yaml
@@ -1,6 +1,6 @@
---
- name: search for guide {{ guide }}
- k8s_facts:
+ k8s_info:
kind: DeploymentConfig
name: web-{{ guide }}
namespace: "{{project}}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/pre_workload.yml
index 826f0cf2b2d..ed896c5165a 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/pre_workload.yml
@@ -9,7 +9,7 @@
fail_msg: "Must define ocp_username and guid"
- name: Get Web Console route
- k8s_facts:
+ k8s_info:
api_version: route.openshift.io/v1
kind: Route
namespace: openshift-console
@@ -17,7 +17,7 @@
register: r_console_route
- name: Get API server URL
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Infrastructure
name: cluster
@@ -36,7 +36,7 @@
route_subdomain: "{{ r_ingress_config.resources[0].spec.domain }}"
- name: Get codeready keycloak deployment
- k8s_facts:
+ k8s_info:
kind: Deployment
namespace: codeready
name: keycloak
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/verify-workload.yaml b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/verify-workload.yaml
index aead35033bf..e51e3785292 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/verify-workload.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_quarkus_workshop_user/tasks/verify-workload.yaml
@@ -1,6 +1,6 @@
---
- name: verify user project exists
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Namespace
name: "quarkus-{{ guid }}-project"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/defaults/main.yml
index fd62b5cccc0..001016bffd6 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/defaults/main.yml
@@ -58,6 +58,4 @@ ocp4_workload_redhat_developer_hub_gitlab_template_locations:
ocp4_workload_redhat_developer_hub_backstage_image: quay.io/redhat-gpte/backstage:1.0.0
-ocp4_workload_redhat_developer_hub_gitlab_runner_starting_csv: gitlab-runner-operator.v1.15.1
-
redhat_gpte_devhub_pull_secret: ""
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/tasks/setup_gitlab_runner.yml b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/tasks/setup_gitlab_runner.yml
index 69c0f750980..bf73722c8a2 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/tasks/setup_gitlab_runner.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub/tasks/setup_gitlab_runner.yml
@@ -13,7 +13,6 @@
install_operator_packagemanifest_name: gitlab-runner-operator
install_operator_automatic_install_plan_approval: true
install_operator_csv_nameprefix: gitlab-runner-operator
- install_operator_starting_csv: "{{ ocp4_workload_redhat_developer_hub_gitlab_runner_starting_csv }}"
- name: Template out registration token script
ansible.builtin.template:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/defaults/main.yml
index 8f3da73d934..099b654d2ea 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/defaults/main.yml
@@ -19,4 +19,6 @@ ocp4_workload_redhat_developer_hub_bootstrap_env:
githubinfraorganization: "janus-idp"
githubinfrarevision: "main"
-ocp4_workload_redhat_developer_hub_bootstrap_vault_namespace: vault
\ No newline at end of file
+ocp4_workload_redhat_developer_hub_bootstrap_vault_namespace: vault
+
+ocp4_workload_redhat_developer_hub_bootstrap_external_secrets_version: 0.9.2
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/tasks/setup_external_secrets.yml b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/tasks/setup_external_secrets.yml
index c10a8aec5ff..6867034690c 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/tasks/setup_external_secrets.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/tasks/setup_external_secrets.yml
@@ -1,24 +1,17 @@
---
-- name: Install External Secrets Operator
- block:
- - name: Install External Secrets Operator
- include_role:
- name: install_operator
- vars:
- install_operator_action: install
- install_operator_name: external-secrets-operator
- install_operator_namespace: openshift-operators
- install_operator_channel: stable
- install_operator_catalog: community-operators
- install_operator_packagemanifest_name: external-secrets-operator
- install_operator_automatic_install_plan_approval: true
- install_operator_csv_nameprefix: external-secrets-operator
- install_operator_starting_csv: external-secrets-operator.v0.9.1
-
-- name: Create operator config
- kubernetes.core.k8s:
- state: present
- definition: "{{ lookup('template', 'operator-config-cluster.yml.j2' ) | from_yaml }}"
+- name: Install external secrets helm chart
+ shell: |
+ helm repo add external-secrets https://charts.external-secrets.io
+ helm install external-secrets external-secrets/external-secrets \
+ -n external-secrets --create-namespace --set installCRDs=true \
+ --set securityContext.runAsUser=null \
+ --set certController.securityContext.runAsUser=null \
+ --set webhook.securityContext.runAsUser=null \
+ --version {{ ocp4_workload_redhat_developer_hub_bootstrap_external_secrets_version }}
+ retries: 5
+ delay: 10
+ register: r_external_secrets
+ until: r_external_secrets is not failed
- name: Create cluster secret store of vault
kubernetes.core.k8s:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/templates/operator-config-cluster.yml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/templates/operator-config-cluster.yml.j2
deleted file mode 100644
index 50bfb0607c7..00000000000
--- a/ansible/roles_ocp_workloads/ocp4_workload_redhat_developer_hub_bootstrap/templates/operator-config-cluster.yml.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: operator.external-secrets.io/v1alpha1
-kind: OperatorConfig
-metadata:
- name: cluster
- namespace: openshift-operators
-spec:
- prometheus:
- enabled: true
- service:
- port: 8080
- resources:
- limits:
- cpu: 100m
- memory: 256Mi
- requests:
- cpu: 10m
- memory: 96Mi
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/tasks/workload.yml
index b51a2b3cafa..3c1921813e1 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/tasks/workload.yml
@@ -72,7 +72,7 @@
- web-terminal-subscription.yaml.j2
- kubernetes.core.k8s_info:
- api_version: v1
+ api_version: route.openshift.io/v1
kind: Route
namespace: openshift-gitops
label_selectors:
@@ -88,7 +88,7 @@
register: r_secret_kinfo
- kubernetes.core.k8s_info:
- api_version: v1
+ api_version: route.openshift.io/v1
kind: Route
namespace: retail-rhods-project
label_selectors:
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-kustomize/retail-app.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-kustomize/retail-app.yaml.j2
index 0f5ece26c9b..866b1512fa9 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-kustomize/retail-app.yaml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-kustomize/retail-app.yaml.j2
@@ -12,6 +12,7 @@ spec:
path: overlays/development
repoURL: https://gitea.apps.cluster-{{ guid }}.{{ ocp4_base_domain }}/{{ ocp4_workload_gitea_aiml_user }}/retail-dev-gitops.git
targetRevision: HEAD
+ insecure: true
syncPolicy:
automated:
prune: true
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-prod-kustomize/retail-prod-app.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-prod-kustomize/retail-prod-app.yaml.j2
index f7946247bf3..9d65da12d45 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-prod-kustomize/retail-prod-app.yaml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_retail_aiml_workshop/templates/retail-prod-kustomize/retail-prod-app.yaml.j2
@@ -12,6 +12,7 @@ spec:
path: overlays/production
repoURL: https://gitea.apps.cluster-{{ guid }}.{{ ocp4_base_domain }}/{{ ocp4_workload_gitea_aiml_user }}/retail-prod-gitops.git
targetRevision: HEAD
+ insecure: true
syncPolicy:
automated:
prune: true
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_rhacs_demo_apps/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_rhacs_demo_apps/tasks/pre_workload.yml
index 19b7cd40bdc..a9caae14a91 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_rhacs_demo_apps/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_rhacs_demo_apps/tasks/pre_workload.yml
@@ -1,19 +1,19 @@
# vim: set ft=ansible
---
# Implement your Pre Workload deployment tasks here
-- name: Get central api endpoint from Secured Cluster CR
- kubernetes.core.k8s_info:
- kind: SecuredCluster
- api_version: platform.stackrox.io/v1alpha1
- namespace: stackrox
- name: stackrox-secured-cluster-services
- # register: r_stackrox_central_route
- register: __secured_cluster
+#- name: Get central api endpoint from Secured Cluster CR
+# kubernetes.core.k8s_info:
+# kind: SecuredCluster
+# api_version: platform.stackrox.io/v1alpha1
+# namespace: stackrox
+# name: stackrox-secured-cluster-services
+# register: r_stackrox_central_route
+# register: __secured_cluster
-- name: Store central endpoint as a fact
- set_fact:
+#- name: Store central endpoint as a fact
+# set_fact:
# __central_endpoint: "{{ r_stackrox_central_route.resources[0].spec.host }}"
- __central_endpoint: "{{ __secured_cluster.resources[0].spec.centralEndpoint }}"
+# __central_endpoint: "{{ __secured_cluster.resources[0].spec.centralEndpoint }}"
# Leave this as the last task in the playbook.
- name: pre_workload tasks complete
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/amq-streams-operator.yaml b/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/amq-streams-operator.yaml
index 755fd2a72b1..f289e14164c 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/amq-streams-operator.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/amq-streams-operator.yaml
@@ -56,7 +56,7 @@
- name: Wait for the CRD to be available
when: not ocp4_workload_rhtr_xraylab_workload_destroy|bool
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/odh-operator.yaml b/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/odh-operator.yaml
index 874fcc93335..5fc502e8fa2 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/odh-operator.yaml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_rhtr_xraylab/tasks/odh-operator.yaml
@@ -56,7 +56,7 @@
- name: Wait for the CRD to be available
when: not ocp4_workload_rhtr_xraylab_workload_destroy|bool
- k8s_facts:
+ k8s_info:
api_version: "apiextensions.k8s.io/v1beta1"
kind: CustomResourceDefinition
name: "{{ item }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_serverless_ml_workshop/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_serverless_ml_workshop/tasks/workload.yml
index 26868b37efb..d3aa1e0ba87 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_serverless_ml_workshop/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_serverless_ml_workshop/tasks/workload.yml
@@ -54,8 +54,8 @@
name: my-cluster
namespace: kafka
register: r_kafka_cluster
- retries: 30
- delay: 5
+ retries: 90
+ delay: 10
until:
- r_kafka_cluster.resources[0].status.clusterId is defined
- r_kafka_cluster.resources[0].status.clusterId | length > 0
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/defaults/main.yml
index 9fe496e226c..f99113cce05 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/defaults/main.yml
@@ -1,9 +1,24 @@
---
become_override: false
-ocp_username: user-redhat.com
+ocp_username: '{{ openshift_cluster_admin_username | default("admin", True)}}'
silent: false
tmp_dir: /tmp/{{ guid }}
tmp_kubeconfig: "{{ tmp_dir }}/.kube/config"
# Enable skupper installation on bastion host
service_interconnect_install_skupper: true
+
+# provision_webapp
+ocp4_workload_service_interconnect_webapp_operator_tag: 0.0.63-workshop-1
+ocp4_workload_service_interconnect_webapp_client_id: tutorial-web-app
+ocp4_workload_service_interconnect_webapp_group_name: dedicated-admins
+ocp4_workload_service_interconnect_webapp_operator_template_path: /home/tutorial-web-app-operator/deploy/template/tutorial-web-app.yml
+ocp4_workload_service_interconnect_webapp_operator_resources: >-
+ https://github.com/RedHat-Middleware-Workshops/tutorial-web-app-operator/archive/v{{ocp4_workload_service_interconnect_webapp_operator_tag}}.zip
+ocp4_workload_service_interconnect_webapp_operator_resource_items:
+ - rbac.yaml
+ - sa.yaml
+ - crd.yaml
+ - operator.yaml
+ocp4_workload_service_interconnect_webapp_walkthrough_locations:
+ - "https://github.com/RedHat-Middleware-Workshops/service-interconnect-lab-instructions.git"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/pre_workload.yml
index fdf3d4b33af..418c86b6fe4 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/pre_workload.yml
@@ -11,6 +11,63 @@
dest: "{{ tmp_dir }}"
remote_src: true
+- name: Figure out paths
+ block:
+ - name: Retrieve Ingress config
+ k8s_info:
+ api_version: config.openshift.io/v1
+ kind: Ingress
+ name: cluster
+ register: r_ingress_config
+ - name: Get OpenShift Apps Domain
+ set_fact:
+ route_subdomain: "{{ r_ingress_config.resources | json_query('[0].spec.appsDomain') }}"
+ - name: Get OpenShift Domain
+ set_fact:
+ route_subdomain: "{{ r_ingress_config.resources | json_query('[0].spec.domain') }}"
+ when:
+ - route_subdomain | length == 0
+ - name: Retrieve Console config
+ k8s_info:
+ api_version: config.openshift.io/v1
+ kind: Console
+ name: cluster
+ register: r_console_config
+ - name: Get OpenShift Console
+ set_fact:
+ console_url: "{{ r_console_config.resources | json_query('[0].status.consoleURL') }}"
+ - name: Retrieve Infrastructure config
+ k8s_info:
+ api_version: config.openshift.io/v1
+ kind: Infrastructure
+ name: cluster
+ register: r_infrastructure_config
+ - name: Get OpenShift API
+ set_fact:
+ api_url: "{{ r_infrastructure_config.resources | json_query('[0].status.apiServerURL') }}"
+ - name: debug
+ debug:
+ msg:
+ - "Console URL: {{ console_url }}"
+ - "API URL: {{ api_url }}"
+ - "Route Subdomain: {{ route_subdomain }}"
+ - "Admin username: {{ ocp_username }}"
+ - name: Retrieve OpenShift Version
+ k8s_info:
+ api_version: config.openshift.io/v1
+ kind: ClusterVersion
+ name: version
+ register: r_version_config
+ - name: Fetch OpenShift cluster version
+ set_fact:
+ openshift_version: "{{ r_version_config.resources | json_query('[0].status.desired.version') | regex_findall('^(?:(\\d+\\.\\d+))') | first }}"
+ when: (ocp_version is not defined) or (ocp_version | length == 0)
+ - name: debug
+ debug:
+ msg: "Setting up for OpenShift version: {{ openshift_version }}"
+ when:
+ - service_interconnect_application is defined
+
# Leave these as the last tasks in the playbook
# For deployment onto a dedicated cluster (as part of the
@@ -20,8 +77,8 @@
debug:
msg: "Pre-Workload tasks completed successfully."
when:
- - not silent | bool
- - not workload_shared_deployment | default(false) | bool
+ - not silent | bool
+ - not workload_shared_deployment | default(false) | bool
# For RHPDS deployment (onto a shared cluster) set
# workload_shared_deployment to True
@@ -30,5 +87,5 @@
debug:
msg: "Pre-Software checks completed successfully"
when:
- - not silent | bool
- - workload_shared_deployment | default(false) | bool
+ - not silent | bool
+ - workload_shared_deployment | default(false) | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/provision_instructions.yaml b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/provision_instructions.yaml
new file mode 100644
index 00000000000..9d478301c9f
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/provision_instructions.yaml
@@ -0,0 +1,70 @@
+---
+- name: Evaluate namespace if not exists -> {{ webapp_namespace }}
+ kubernetes.core.k8s:
+ api_version: v1
+ kind: Namespace
+ name: "{{ webapp_namespace }}"
+ state: present
+
+- name: Set temp dir
+ ansible.builtin.set_fact:
+ webapp_operator_tmp: "/tmp/webapp-operator"
+
+- name: Ensure example directory exists
+ ansible.builtin.file:
+ path: "{{ webapp_operator_tmp }}"
+ state: directory
+ mode: "u+rwx"
+
+- name: Download example files
+ ansible.builtin.unarchive:
+ src: "{{ ocp4_workload_service_interconnect_webapp_operator_resources }}"
+ dest: "{{ webapp_operator_tmp }}"
+ remote_src: true
+
+- name: Create WebApp Operator Resources
+ kubernetes.core.k8s:
+ state: present
+ namespace: "{{ webapp_namespace }}"
+ src: "{{ webapp_operator_tmp }}/tutorial-web-app-operator-{{ ocp4_workload_service_interconnect_webapp_operator_tag }}/deploy/{{ item }}"
+ loop: "{{ ocp4_workload_service_interconnect_webapp_operator_resource_items }}"
+
+- name: Add additional walkthrough locations in the default list
+ ansible.builtin.set_fact:
+ ocp4_workload_service_interconnect_webapp_walkthrough_locations: "{{ ocp4_workload_service_interconnect_webapp_walkthrough_locations }}"
+
+- name: Retrieve additional services
+ ansible.builtin.set_fact:
+ solution_explorer_services: '{{ lookup("template", "instructions-services.json.j2") }}'
+
+- name: Create WebApp custom resource
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'instructions-webapp.yaml.j2') }}"
+
+- name: Get webapp secure route
+ kubernetes.core.k8s_info:
+ kind: Route
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+ api_version: route.openshift.io/v1
+ register: webapp_secure_route
+ until:
+ - webapp_secure_route.resources is defined
+ - webapp_secure_route.resources | length > 0
+ retries: 10
+ delay: 30
+
+- name: Retrieve Route
+ ansible.builtin.set_fact:
+ webapp_secure_route: "{{ webapp_secure_route.resources[0].spec.host }}"
+
+- name: Create OpenShift OAuth client
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'oauthclient.yaml.j2') }}"
+
+- name: Create OpenShift Group
+ kubernetes.core.k8s:
+ state: present
+ resource_definition: "{{ lookup('template', 'instructions-group.yaml.j2') }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/workload.yml
index 4cdefbe08f4..5c0da522835 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/tasks/workload.yml
@@ -4,34 +4,143 @@
debug:
msg: "Setting up workload for user ocp_username = {{ ocp_username }}"
+# Workaround for intermittent problems when installing the Terminal Operator too quickly after DevWorkspaces
+# checking the DevWorkspaces install would be better, but... *quickfix
+- name: Pause for 5 minutes to allow the cluster to settle down
+ ansible.builtin.pause:
+ minutes: 5
- name: Deploy application on AWS OCP Cluster
+ block:
+ - name: install resources
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('template', item ) | from_yaml }}"
+ loop:
+ - "aws/namespace.yaml.j2"
+ - "aws/deployment.yaml.j2"
+ - "aws/service.yaml.j2"
+ - "aws/route.yaml.j2"
+ - "terminal-subscription.yaml.j2"
+ # - name: Provision Solution Explorer
+ # include_tasks: provision_instructions.yaml
+ # vars:
+ # webapp_namespace: "solution-explorer"
+ - name: Wait for Web Terminal tooling to install
+ k8s_info:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-tooling
+ namespace: openshift-operators
+ register: crd_terminal
+ until: crd_terminal.resources | default([]) | list | length == 1
+ retries: 90
+ delay: 10
+ - name: Patch terminal operator tooling
+ kubernetes.core.k8s:
+ state: patched
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-tooling
+ namespace: openshift-operators
+ definition:
+ metadata:
+ annotations:
+ web-terminal.redhat.com/unmanaged-state: "true"
+ spec:
+ components:
+ - container:
+ image: quay.io/redhatintegration/rhi-tools:dev2
+ memoryLimit: 512Mi
+ name: web-terminal-tooling
+ - name: Wait for Web Terminal exec to install
+ k8s_info:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-exec
+ namespace: openshift-operators
+ register: crd_terminal_exec
+ until: crd_terminal_exec.resources | default([]) | list | length == 1
+ retries: 90
+ delay: 10
+ - name: Patch terminal operator exec
+ kubernetes.core.k8s_json_patch:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-exec
+ namespace: openshift-operators
+ patch:
+ - op: replace
+ path: /spec/components/0/container/env/0/value
+ value: 180m
when:
- service_interconnect_application is defined
- service_interconnect_application == "aws"
environment:
KUBECONFIG: "{{ tmp_kubeconfig }}"
- kubernetes.core.k8s:
- state: present
- definition: "{{ lookup('template', item ) | from_yaml }}"
- loop:
- - "aws/namespace.yaml.j2"
- - "aws/deployment.yaml.j2"
- - "aws/service.yaml.j2"
- - "aws/route.yaml.j2"
- name: Deploy application on Azure OCP Cluster
+ block:
+ - name: install resources
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('template', item ) | from_yaml }}"
+ loop:
+ - "azure/namespace.yaml.j2"
+ - "azure/deployment.yaml.j2"
+ - "terminal-subscription.yaml.j2"
+ - name: Wait for Web Terminal tooling to install
+ k8s_info:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-tooling
+ namespace: openshift-operators
+ register: crd_terminal
+ until: crd_terminal.resources | default([]) | list | length == 1
+ retries: 90
+ delay: 10
+ - name: Patch terminal operator tooling
+ kubernetes.core.k8s:
+ state: patched
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-tooling
+ namespace: openshift-operators
+ definition:
+ metadata:
+ annotations:
+ web-terminal.redhat.com/unmanaged-state: "true"
+ spec:
+ components:
+ - container:
+ image: quay.io/redhatintegration/rhi-tools:dev2
+ memoryLimit: 512Mi
+ name: web-terminal-tooling
+ - name: Wait for Web Terminal exec to install
+ k8s_info:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-exec
+ namespace: openshift-operators
+ register: crd_terminal_exec
+ until: crd_terminal_exec.resources | default([]) | list | length == 1
+ retries: 90
+ delay: 10
+ - name: Patch terminal operator exec
+ kubernetes.core.k8s_json_patch:
+ api_version: workspace.devfile.io/v1alpha2
+ kind: DevWorkspaceTemplate
+ name: web-terminal-exec
+ namespace: openshift-operators
+ patch:
+ - op: replace
+ path: /spec/components/0/container/env/0/value
+ value: 180m
when:
- service_interconnect_application is defined
- service_interconnect_application == "azure"
environment:
KUBECONFIG: "{{ tmp_kubeconfig }}"
- kubernetes.core.k8s:
- state: present
- definition: "{{ lookup('template', item ) | from_yaml }}"
- loop:
- - "azure/namespace.yaml.j2"
- - "azure/deployment.yaml.j2"
- name: Download & Install Skupper on Host
when: service_interconnect_install_skupper | bool
@@ -40,9 +149,8 @@
ansible.builtin.shell: >-
/usr/bin/curl https://skupper.io/install.sh | sh
-
# Leave this as the last task in the playbook.
- name: workload tasks complete
debug:
msg: "Workload Tasks completed successfully."
- when: not silent | bool
\ No newline at end of file
+ when: not silent | bool
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-group.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-group.yaml.j2
new file mode 100644
index 00000000000..8bac19e1a40
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-group.yaml.j2
@@ -0,0 +1,6 @@
+kind: Group
+apiVersion: user.openshift.io/v1
+metadata:
+ name: '{{ocp4_workload_service_interconnect_webapp_group_name}}'
+users:
+ - "{{ocp4_workload_authentication_admin_user|default(ocp_username,true)}}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-services.json.j2 b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-services.json.j2
new file mode 100644
index 00000000000..8cd3b8bb7b0
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-services.json.j2
@@ -0,0 +1,20 @@
+{
+ "3scale": {
+ "Host":"https://3scale-admin.{{ route_subdomain }}",
+ "Version":"2.7.0.GA"
+ },
+ "codeready":{
+ "Host":"https://devspaces.{{ route_subdomain }}",
+ "Version":"3.4.0"
+ }
+{% if ocp4_workload_service_interconnect_azure_route_domain is defined %}
+ ,
+ "Azure": {
+ "Attributes": {
+ "azure-subdomain": "{{ ocp4_workload_service_interconnect_azure_route_domain }}",
+ "azure-console": "https://{{ ocp4_workload_service_interconnect_azure_console_hostname }}"
+ },
+ "Host": "{{ ocp4_workload_service_interconnect_azure_console_hostname }}"
+ }
+{% endif %}
+}
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-webapp.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-webapp.yaml.j2
new file mode 100644
index 00000000000..e5a6221101a
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/instructions-webapp.yaml.j2
@@ -0,0 +1,22 @@
+apiVersion: "integreatly.org/v1alpha1"
+kind: "WebApp"
+metadata:
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+ labels:
+ app: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+spec:
+ app_label: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ template:
+ path: "{{ ocp4_workload_service_interconnect_webapp_operator_template_path }}"
+ parameters:
+ IMAGE: quay.io/redhatintegration/tutorial-web-app:latest
+ OPENSHIFT_OAUTHCLIENT_ID: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ OPENSHIFT_OAUTH_HOST: "oauth-openshift.{{ route_subdomain }}"
+ OPENSHIFT_HOST: "console-openshift-console.{{ route_subdomain }}"
+ INSTALLED_SERVICES: |-
+ {{ solution_explorer_services }}
+ OPENSHIFT_VERSION: "4"
+{% if ocp4_workload_service_interconnect_webapp_walkthrough_locations is defined %}
+ WALKTHROUGH_LOCATIONS: "{{ ocp4_workload_service_interconnect_webapp_walkthrough_locations|join(',') }}"
+{% endif %}
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/oauthclient.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/oauthclient.yaml.j2
new file mode 100644
index 00000000000..5c488f541f2
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/oauthclient.yaml.j2
@@ -0,0 +1,8 @@
+apiVersion: oauth.openshift.io/v1
+grantMethod: auto
+kind: OAuthClient
+metadata:
+ name: "{{ ocp4_workload_service_interconnect_webapp_client_id }}"
+ namespace: "{{ webapp_namespace }}"
+redirectURIs:
+ - "https://{{ webapp_secure_route }}"
\ No newline at end of file
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/terminal-subscription.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/terminal-subscription.yaml.j2
new file mode 100644
index 00000000000..be7e58b3ce4
--- /dev/null
+++ b/ansible/roles_ocp_workloads/ocp4_workload_service_interconnect/templates/terminal-subscription.yaml.j2
@@ -0,0 +1,11 @@
+apiVersion: operators.coreos.com/v1alpha1
+kind: Subscription
+metadata:
+ name: web-terminal
+ namespace: openshift-operators
+spec:
+ channel: fast
+ installPlanApproval: Automatic
+ name: web-terminal
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/additional/pipelines-and-triggers.yml b/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/additional/pipelines-and-triggers.yml
index 0adef464ddc..bf48eba2cc5 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/additional/pipelines-and-triggers.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/additional/pipelines-and-triggers.yml
@@ -37,7 +37,7 @@
loop: "{{ lookup('fileglob', './templates/common/triggers/*.yaml.j2', wantlist=True)}}"
- name: Retrieve created event listener route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: "{{ _pipeline_name_ }}-{{ _pipeline_dir_ }}-event-listener"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/workload.yml
index 5c457ca8f08..90d59855994 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_tekton_execution_environment/tasks/workload.yml
@@ -10,7 +10,7 @@
- ocp4_workload_tekton_ee_hub_registry_password is not defined
block:
- name: Retrieve created hub route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: hub
@@ -21,7 +21,7 @@
delay: 30
- name: Retrieve hub secret
- k8s_facts:
+ k8s_info:
api_version: "v1"
kind: Secret
name: "{{ ocp4_workload_tekton_ee_hub_admin_secret }}"
@@ -45,7 +45,7 @@
- ocp4_workload_tekton_ee_automation_controller_password is not defined
block:
- name: Retrieve automation controller route
- k8s_facts:
+ k8s_info:
api_version: "route.openshift.io/v1"
kind: Route
name: controller
@@ -56,7 +56,7 @@
delay: 30
- name: Retrieve automation controller admin secret
- k8s_facts:
+ k8s_info:
api_version: "v1"
kind: Secret
name: "{{ ocp4_workload_tekton_ee_automation_controller_admin_secret }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_tl500/tasks/pre_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_tl500/tasks/pre_workload.yml
index efdd38231b3..60d0ae0fc8e 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_tl500/tasks/pre_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_tl500/tasks/pre_workload.yml
@@ -18,7 +18,7 @@
- kubernetes.core
- name: Get API server URL
- k8s_facts:
+ k8s_info:
api_version: config.openshift.io/v1
kind: Infrastructure
name: cluster
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_validated_pattern/templates/values-secret.yaml.j2 b/ansible/roles_ocp_workloads/ocp4_workload_validated_pattern/templates/values-secret.yaml.j2
index d03a8011093..4d3cef29c73 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_validated_pattern/templates/values-secret.yaml.j2
+++ b/ansible/roles_ocp_workloads/ocp4_workload_validated_pattern/templates/values-secret.yaml.j2
@@ -1,37 +1,78 @@
---
+version: "2.0"
secrets:
# NEVER COMMIT THESE VALUES TO GIT
- imageregistry:
+
+ - name: imageregistry
+ fields:
# eg. Quay -> Robot Accounts -> Robot Login
- username: PLAINTEXT
- password: PLAINTEXT
+ - name: username
+ value: robot-account
+ - name: password
+ value:
- git:
+ - name: git
+ fields:
# Go to: https://github.com/settings/tokens
- username: {{ ocp4_workload_validated_pattern_gitea_user }}
- password: {{ ocp4_workload_validated_pattern_gitea_token }}
+ - name: username
+ value: "org or github user"
+ - name: password
+ value: "token"
{% if ocp4_workload_validated_pattern_name is match('industrial-edge')
and
ocp4_workload_validated_pattern_s3_bucket_create | bool %}
- aws:
- s3Secret: {{ _ocp4_workload_validated_pattern_s3_secret }}
+ - name: aws
+ fields:
+ - name: aws_access_key_id
+ ini_file: ~/.aws/credentials
+ ini_key: aws_access_key_id
+ - name: aws_secret_access_key
+ ini_file: ~/.aws/credentials
+ ini_key: aws_secret_access_key
+ - name: s3Secret
+ value: {{ _ocp4_workload_validated_pattern_s3_secret }}
{% endif %}
{% if ocp4_workload_validated_pattern_name is match('multicloud-gitops') %}
- config-demo:
- # Secret used for demonstrating vault storage, external secrets, and ACM distribution
- secret: {{ _ocp4_workload_validated_pattern_config_demo_secret }}
+ - name: config-demo
+ vaultPrefixes:
+ - global
+ fields:
+ - name: secret
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy
{% endif %}
{% if ocp4_workload_validated_pattern_name is match('xray') %}
- xraylab:
- db:
- db_user: xraylab
- db_passwd: xraylab
- db_root_passwd: xraylab
- db_host: xraylabdb
- db_dbname: xraylabdb
- db_master_user: xraylab
- db_master_password: xraylab
+---
+ # Database login credentials and configuration
+ - name: xraylab
+ fields:
+ - name: database-user
+ value: xraylab
+ - name: database-host
+ value: xraylabdb
+ - name: database-db
+ value: xraylabdb
+ - name: database-master-user
+ value: xraylab
+ - name: database-password
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy
+ - name: database-root-password
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy
+ - name: database-master-password
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy
+
+ # Grafana Dashboard admin user/password
+ - name: grafana
+ fields:
+ - name: GF_SECURITY_ADMIN_USER
+ value: root
+ - name: GF_SECURITY_ADMIN_PASSWORD
+ onMissingValue: generate
+ vaultPolicy: validatedPatternDefaultPolicy
{% endif %}
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/defaults/main.yml b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/defaults/main.yml
index 950b92a76ce..94b476ffec4 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/defaults/main.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/defaults/main.yml
@@ -1,5 +1,34 @@
+---
become_override: false
ocp_username: opentlc-mgr
silent: false
-tmp_dir: /tmp/{{ guid }}
-tmp_kubeconfig: "{{ tmp_dir }}/.kube/config"
+
+# Channel to use for the Web Terminal subscription
+ocp4_workload_web_terminal_channel: fast
+
+
+# Set automatic InstallPlan approval. If set to false it is also suggested
+# to set the starting_csv to pin a specific version
+# This variable has no effect when using a catalog snapshot (always true)
+ocp4_workload_web_terminal_automatic_install_plan_approval: true
+
+# Set a starting ClusterServiceVersion.
+# Recommended to leave empty to get latest in the channel when not using
+# a catalog snapshot.
+# Highly recommended to be set when using a catalog snapshot but can be
+# empty to get the latest available in the channel at the time when
+# the catalog snapshot got created. Example: web-terminal.v1.8.0
+ocp4_workload_web_terminal_starting_csv: ""
+
+# Use a catalog snapshot
+ocp4_workload_web_terminal_use_catalog_snapshot: false
+
+# Catalog Source Name when using a catalog snapshot. This should be unique
+# in the cluster to avoid clashes
+ocp4_workload_web_terminal_catalogsource_name: redhat-operators-snapshot-web-terminal
+
+# Catalog snapshot image
+ocp4_workload_web_terminal_catalog_snapshot_image: quay.io/gpte-devops-automation/olm_snapshot_redhat_catalog
+
+# Catalog snapshot image tag
+ocp4_workload_web_terminal_catalog_snapshot_image_tag: v4.13_2023_07_31
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/remove_workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/remove_workload.yml
index 33f35af8e58..db07f38a891 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/remove_workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/remove_workload.yml
@@ -1,9 +1,18 @@
-- name: Remove web terminal operator
- k8s:
- state: absent
- definition: "{{ lookup('template', item ) | from_yaml }}"
- loop:
- - web-terminal-subscription.yaml.j2
+---
+- name: Install Web Terminal operator
+ include_role:
+ name: install_operator
+ vars:
+ install_operator_action: remove
+ install_operator_name: web-terminal
+ install_operator_namespace: openshift-operators
+ install_operator_channel: "{{ ocp4_workload_web_terminal_channel }}"
+ install_operator_catalog: redhat-operators
+ install_operator_catalogsource_setup: "{{ ocp4_workload_web_terminal_use_catalog_snapshot | default(false) }}"
+ install_operator_catalogsource_name: "{{ ocp4_workload_web_terminal_catalogsource_name | default('') }}"
+ install_operator_catalogsource_namespace: openshift-operators
+ install_operator_catalogsource_image: "{{ ocp4_workload_web_terminal_catalog_snapshot_image | default('') }}"
+ install_operator_catalogsource_image_tag: "{{ ocp4_workload_web_terminal_catalog_snapshot_image_tag | default('') }}"
# Leave this as the last task in the playbook.
- name: remove_workload tasks complete
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/workload.yml
index 3735b6cd9d5..cd906456e84 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_web_terminal/tasks/workload.yml
@@ -1,7 +1,17 @@
---
-- name: Set up web terminal
- kubernetes.core.k8s:
- state: present
- definition: "{{ lookup('template', item ) | from_yaml }}"
- loop:
- - web-terminal-subscription.yaml.j2
+- name: Install Web Terminal operator
+ ansible.builtin.include_role:
+ name: install_operator
+ vars:
+ install_operator_action: install
+ install_operator_name: web-terminal
+ install_operator_namespace: openshift-operators
+ install_operator_channel: "{{ ocp4_workload_web_terminal_channel }}"
+ install_operator_catalog: redhat-operators
+ install_operator_automatic_install_plan_approval: "{{ ocp4_workload_web_terminal_automatic_install_plan_approval | default(true) }}"
+ install_operator_starting_csv: "{{ ocp4_workload_web_terminal_starting_csv }}"
+ install_operator_catalogsource_setup: "{{ ocp4_workload_web_terminal_use_catalog_snapshot | default(false) }}"
+ install_operator_catalogsource_name: "{{ ocp4_workload_web_terminal_catalogsource_name | default('') }}"
+ install_operator_catalogsource_namespace: openshift-operators
+ install_operator_catalogsource_image: "{{ ocp4_workload_web_terminal_catalog_snapshot_image | default('') }}"
+ install_operator_catalogsource_image_tag: "{{ ocp4_workload_web_terminal_catalog_snapshot_image_tag | default('') }}"
diff --git a/ansible/roles_ocp_workloads/ocp4_workload_workload_monitoring/tasks/workload.yml b/ansible/roles_ocp_workloads/ocp4_workload_workload_monitoring/tasks/workload.yml
index 8cd0f7185f9..151988e3250 100644
--- a/ansible/roles_ocp_workloads/ocp4_workload_workload_monitoring/tasks/workload.yml
+++ b/ansible/roles_ocp_workloads/ocp4_workload_workload_monitoring/tasks/workload.yml
@@ -13,7 +13,7 @@
enableUserWorkload: true
- name: Wait for the prometheus user monitoring pods to roll out
- k8s_facts:
+ k8s_info:
api_version: apps/v1
kind: StatefulSet
name: prometheus-user-workload
diff --git a/docs/Developing_Workloads_on_Bastion.adoc b/docs/Developing_Workloads_on_Bastion.adoc
index 1e236d7e7ba..d6c0741b5a0 100644
--- a/docs/Developing_Workloads_on_Bastion.adoc
+++ b/docs/Developing_Workloads_on_Bastion.adoc
@@ -11,7 +11,7 @@ While the following instructions outline the use of the shared OpenTLC developme
== Requesting access to the OpenTLC Shared Cluster development bastion
-1. Request access for the Bastion provisioning catalog item by sending an e-mail to rhpds-help@redhat.com asking to be added to the *opentlc-access-bastion* group.
+1. Request access for the Bastion provisioning catalog item by making a ticket here: https://redhat.service-now.com/help?id=sc_cat_item&sys_id=00c0316a1bf39450e43942a7bc4bcbd1
2. Once access has been granted log into http://labs.opentlc.com
3. Open the catalog *DevOps Shared Cluster Testing* and select the catalog item *DEV - OCP 4.4 Shared Bastion Access*. Click *Order*.
4. Check the checkbox and click *Submit*.
diff --git a/sonar-project.properties b/sonar-project.properties
new file mode 100644
index 00000000000..6fb6786bf26
--- /dev/null
+++ b/sonar-project.properties
@@ -0,0 +1,2 @@
+sonar.projectKey=com.redhat.rhpds.redhat-cop.agnosticd
+sonar.qualitygate.wait=true
diff --git a/tools/execution_environments/ee-multicloud-public/Containerfile b/tools/execution_environments/ee-multicloud-public/Containerfile
index a0ddb84ff93..f9ab60629ae 100644
--- a/tools/execution_environments/ee-multicloud-public/Containerfile
+++ b/tools/execution_environments/ee-multicloud-public/Containerfile
@@ -1,4 +1,4 @@
-FROM registry.access.redhat.com/ubi8/ubi:8.7
+FROM registry.access.redhat.com/ubi8/ubi
USER root
WORKDIR /root
@@ -19,6 +19,7 @@ RUN dnf install -y python39-pip \
python39 \
python39-devel \
rsync \
+ sshpass \
tar \
unzip \
vim \
@@ -67,15 +68,15 @@ RUN rm -rf /tmp/* /root/.cache /root/*
# In OpenShift, container will run as a random uid number and gid 0. Make sure things
# are writeable by the root group.
RUN for dir in \
- /home/runner \
/home/runner/.ansible \
/home/runner/.ansible/tmp \
- /runner \
/home/runner \
/runner/env \
/runner/inventory \
/runner/project \
- /runner/artifacts ; \
+ /runner/artifacts \
+ /runner/requirements_collections/ansible_collections \
+ /runner ; \
do mkdir -m 0775 -p $dir ; chmod -R g+rwx $dir ; chgrp -R root $dir ; done && \
for file in \
/home/runner/.ansible/galaxy_token \
@@ -85,7 +86,7 @@ RUN for dir in \
ENV HOME=/home/runner
-COPY entrypoint.sh /usr/local/bin/entrypoint
+ADD https://raw.githubusercontent.com/ansible/ansible-builder/release_3.0/src/ansible_builder/_target_scripts/entrypoint /usr/local/bin/entrypoint
RUN chmod 755 /usr/local/bin/entrypoint
WORKDIR /runner
diff --git a/tools/execution_environments/ee-multicloud-public/ee-report.sh b/tools/execution_environments/ee-multicloud-public/ee-report.sh
index e1725526757..900fed2d292 100755
--- a/tools/execution_environments/ee-multicloud-public/ee-report.sh
+++ b/tools/execution_environments/ee-multicloud-public/ee-report.sh
@@ -24,3 +24,6 @@ dnf list installed
echo -e "\n# Alternatives\n"
alternatives --list
+
+echo -e "\n# /runner directory \n"
+find /runner -printf "%M %u %g %k %p\n"
diff --git a/tools/execution_environments/ee-multicloud-public/entrypoint.sh b/tools/execution_environments/ee-multicloud-public/entrypoint.sh
deleted file mode 100755
index e1f2d212c1f..00000000000
--- a/tools/execution_environments/ee-multicloud-public/entrypoint.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-
-# We need to fix a number of problems here that manifest under different container runtimes, as well as tweak some
-# things to simplify runner's containerized launch behavior. Since runner currently always expects to bind-mount its
-# callback plugins under ~/.ansible, it must have prior knowledge of the user's homedir before the container is launched
-# in order to know where to mount in the callback dir. In all cases, we must get a consistent answer from $HOME
-# and anything that queries /etc/passwd for a homedir (eg, `~root`), or lots of things (including parts of Ansible
-# core itself) will be broken.
-
-# If we're running as a legit default user that has an entry in /etc/passwd and a valid homedir, we're all good.
-
-# If the username/uid we're running under is not represented in /etc/passwd or the current user's homedir is something
-# other than /home/runner (eg, the container was run with --user and some dynamic unmapped UID from the host with
-# primary GID 0), we need to correct that in order for ansible-runner's callbacks to function properly. Some things
-# (eg podman/cri-o today) already create an /etc/passwd entry on the fly in this case, but they set the homedir to
-# WORKDIR, which causes potential collisions with mounted/mapped volumes. For consistency, we'll
-# just always set the current user's homedir to `/home/runner`, which we've already configured in a way
-# that should always work with known container runtimes (eg, ug+rwx and all dirs owned by the root group).
-
-# If current user is not listed in /etc/passwd, add an entry with username==uid, primary gid 0, and homedir /home/runner
-
-# If current user is in /etc/passwd but $HOME != `/home/runner`, rewrite that user's homedir in /etc/passwd to
-# /home/runner and export HOME=/home/runner for this session only. All new sessions (eg podman exec) should
-# automatically set HOME to the value in /etc/passwd going forward.
-
-# Ideally in the future, we can come up with a better way for the outer runner to dynamically inject its callbacks, or
-# rely on the inner runner's copy. This would allow us to restore the typical POSIX user homedir conventions.
-
-# if any of this business fails, we probably want to fail fast
-if [ -n "$EP_DEBUG" ]; then
- set -eux
- echo 'hello from entrypoint'
-else
- set -e
-fi
-
-# current user might not exist in /etc/passwd at all
-if ! $(whoami &> /dev/null) || ! getent passwd $(whoami || id -u) &> /dev/null ; then
- if [ -n "$EP_DEBUG" ]; then
- echo "adding missing uid $(id -u) into /etc/passwd"
- fi
- echo "$(id -u):x:$(id -u):0:container user $(id -u):/home/runner:/bin/bash" >> /etc/passwd
- export HOME=/home/runner
-fi
-
-MYHOME=`getent passwd $(whoami) | cut -d: -f6`
-
-if [ "$MYHOME" != "$HOME" ] || [ "$MYHOME" != "/home/runner" ]; then
- if [ -n "$EP_DEBUG" ]; then
- echo "replacing homedir for user $(whoami)"
- fi
- # sed -i wants to create a tempfile next to the original, which won't work with /etc permissions in many cases,
- # so just do it in memory and overwrite the existing file if we succeeded
- NEWPW=$(sed -r "s/(^$(whoami):(.*:){4})(.*:)/\1\/home\/runner:/g" /etc/passwd)
- echo "$NEWPW" > /etc/passwd
- # ensure the envvar matches what we just set in /etc/passwd for this session; future sessions set automatically
- export HOME=/home/runner
-fi
-
-if [[ -n "${LAUNCHED_BY_RUNNER}" ]]; then
- # Special actions to be compatible with old ansible-runner versions, 2.1.x specifically
- RUNNER_CALLBACKS=$(python3 -c "from ansible_runner.display_callback.callback import awx_display; print(awx_display.__file__)")
- export ANSIBLE_CALLBACK_PLUGINS="$(dirname $RUNNER_CALLBACKS)"
-
- # old versions split the callback name between awx_display and minimal, but new version just uses awx_display
- export ANSIBLE_STDOUT_CALLBACK=awx_display
-fi
-
-if [[ -d ${AWX_ISOLATED_DATA_DIR} ]]; then
- if output=$(ansible-galaxy collection list --format json 2> /dev/null); then
- echo $output > ${AWX_ISOLATED_DATA_DIR}/collections.json
- fi
- ansible --version 2> /dev/null | head -n 1 > ${AWX_ISOLATED_DATA_DIR}/ansible_version.txt
-fi
-
-SCRIPT=/usr/local/bin/dumb-init
-# NOTE(pabelanger): Downstream we install dumb-init from RPM.
-if [ -f "/usr/bin/dumb-init" ]; then
- SCRIPT=/usr/bin/dumb-init
-fi
-
-exec $SCRIPT -- "${@}"
diff --git a/tools/execution_environments/ee-multicloud-public/readme.adoc b/tools/execution_environments/ee-multicloud-public/readme.adoc
index f17aa636fba..1e9d7f976b6 100644
--- a/tools/execution_environments/ee-multicloud-public/readme.adoc
+++ b/tools/execution_environments/ee-multicloud-public/readme.adoc
@@ -1,5 +1,20 @@
== Changelog ==
+=== v0.1.0 ===
+
+* Add community.okd collection
+* size +5M
+* link:https://gist.github.com/fridim/c420ed8c415694a389bbc9e204b650b0[ee-report diff with v0.0.18]
+* link:https://gist.github.com/fridim/a12d0ac2387d030d07a2c6bf1e5c7b53[full ee-report]
+
+=== v0.0.18 ===
+
+* Fix requirements_collections path, see link:https://github.com/redhat-cop/agnosticd/pull/6746[#6746]
+* size +16M
+* link:https://gist.github.com/fridim/03ff4cff5183b323e6245fa95219122e[ee-report diff with v0.0.17]
+* link:https://gist.github.com/fridim/dfc2de437375ba437b1b41ffa57912a9[full ee-report]
+
+
=== v0.0.17 ===
* Add `passlib` python module, needed for htpasswd
diff --git a/tools/execution_environments/ee-multicloud-public/requirements.txt b/tools/execution_environments/ee-multicloud-public/requirements.txt
index 7ade39aedf6..bc4fc1fe1b7 100644
--- a/tools/execution_environments/ee-multicloud-public/requirements.txt
+++ b/tools/execution_environments/ee-multicloud-public/requirements.txt
@@ -8,7 +8,8 @@ dumb-init
jsonpatch
kubernetes>=12.0.0
ncclient
-openstacksdk>=1.0.0
+# Fix openstacksdk version till this issue is solved: https://storyboard.openstack.org/#!/story/2010908
+openstacksdk==1.3.1
packet-python>=1.43.1
passlib
paramiko
@@ -17,6 +18,13 @@ pyOpenSSL
pypsrp[kerberos,credssp]
python-daemon
python-openstackclient
+python-heatclient
+python-cinderclient
+python-designateclient
+python-keystoneclient
+python-neutronclient
+python-novaclient
+python-swiftclient
pywinrm[kerberos,credssp]
pyyaml
requests-oauthlib
diff --git a/tools/execution_environments/ee-multicloud-public/requirements.yml b/tools/execution_environments/ee-multicloud-public/requirements.yml
index f4837b8ed99..7280f4782e4 100644
--- a/tools/execution_environments/ee-multicloud-public/requirements.yml
+++ b/tools/execution_environments/ee-multicloud-public/requirements.yml
@@ -20,9 +20,12 @@ collections:
# cryptography
- name: community.crypto
-
- name: community.general
+# kubernetes>=12.0.0
+# requests-oauthlib
+- name: community.okd
+
# requirements.txt from the collection
- name: community.vmware
diff --git a/training/04_Middleware_on_OpenShift/05_02_Shared_Example_Lab.adoc b/training/04_Middleware_on_OpenShift/05_02_Shared_Example_Lab.adoc
index 5fafbe33ecb..72436b9c7db 100644
--- a/training/04_Middleware_on_OpenShift/05_02_Shared_Example_Lab.adoc
+++ b/training/04_Middleware_on_OpenShift/05_02_Shared_Example_Lab.adoc
@@ -91,7 +91,7 @@ Explore the remove_workload.yml below:
msg: pre_workload tasks complete
- name: Get Namespaces
- k8s_facts:
+ k8s_info:
api_version: v1
kind: Namespace
label_selectors: