--- # Initial Step: # # Schedule a new job giving a specific topic and specifying the remote CI. # The return of this action contains all the data associated with the job, # we hence register those data for later consumptions # - hosts: localhost tasks: - name: Schedule a new job dci_job: topic: 'OSP8' remoteci: 'dci-env-ovb-1' register: job_info # New state # # User is free to do whaterver she needs before entering pre-run state. # Usually this is used to sync the components the job is relying on. # - hosts: localhost vars: dci_status: 'new' dci_comment: 'Creating OSP 8 local mirrors and synchronizing them' components: "{{ job_info['components'] }}" job_id: "{{ job_info['job_id'] }}" tasks: - block: - name: Ensure proper directories are created file: path: '{{ dci_mirror_location }}/{{ job_id }}' state: directory with_items: "{{ components }}" - name: Retrieve component dci_component: dest: '{{ dci_mirror_location }}/{{ item["id"] }}.tar' id: '{{ item["id"] }}' with_items: "{{ components }}" - name: Unarchive component unarchive: src: '{{ dci_mirror_location }}/{{ item["id"] }}.tar' dest: '{{ dci_mirror_location }}/{{ job_id }}' remote_src: True with_items: "{{ components }}" rescue: - name: Fail properly fail: msg: 'Something went wrong with the installation' # Pre-run state # # User is free to do whaterver she needs before entering pre-run state. # Usually this is used to boot (not provision) the undercloud # - hosts: localhost vars: dci_status: 'pre-run' dci_comment: 'Spawning the undercloud' tasks: - block: - include: pre-run.yml rescue: - name: Fail properly fail: msg: 'Something went wrong with the installation' # Run state # # User is free to do whaterver she needs before entering running state. # Usually this is used to provision both undercloud and the overcloud. # - hosts: UNDERCLOUD.IP user: centos become: true vars: dci_status: 'running' dci_comment: 'Provision the undercloud and the overcloud' tasks: - block: - include: running.yml rescue: - name: Tear me down shell: echo 'Tear me down' - name: Fail properly fail: msg: 'Something went wrong with the installation' # Post-run state # # User is free to do whaterver she needs before entering post-run state. # Usually this is used to run tests, certifications, etc... # - hosts: localhost vars: dci_status: 'post-run' dci_comment: 'Running tests' job_id: "{{ job_info['job_id'] }}" tasks: - block: - name: Run tests dci_run_tests: undercloud_ip: '46.231.133.242' remoteci: 'dci-env-ovb-1' key_filename: '/root/.ssh/id_rsa' job_id: '{{ job_id }}' - name: Upload junit results dci_file: path: '{{ item.path }}' name: '{{ item.name }}' mime: 'application/junit' job_id: '{{ job_id }}' with_items: - {'name': 'Rally', 'path': 'rally.xml' } - {'name': 'Tempest', 'path': 'tempest.xml' } rescue: - name: Tear me down shell: echo 'Tear me down' - name: Fail properly fail: msg: 'Something went wrong with the installation' # Ideally the above could be replaced with a more generic play like the following # #- hosts: UNDERCLOUD.IP # become: true # user: centos # vars: # dci_status: 'post-run' # dci_comment: 'Running tests' # tests: "{{ job_info['tests'] }}" # roles: # - { role: ansible-role-certification, when: 'certification' in tests } # - { role: ansible-role-tempest, when: 'tempest' in tests } # Success state # # User is free to do whaterver she needs before entering pre-run state. # Usually this is used to teardown the plateform # - hosts: localhost vars: dci_status: 'success' tasks: - include: teardown.yml