Ansible add_host does not work, it's skipping

2019-04-13 01:49发布

问题:

I am using Ansible to create a new EC2 instance and try to install some packages on it. The problem is I am adding a new host to a host group but I can not see that host group in another play. When it reaches "Configure EC2 instance" it says:

PLAY [Configure EC2 instance] *************************************************************** skipping: no hosts matched

Here is the code:

---
  - name: Provision an EC2 Instance
    hosts: localhost
    connection: local
    gather_facts: False
    tags: provisioning
    # Necessary Variables for creating/provisioning the EC2 Instance
    vars_files: 
      - vars/variables.yml
      - vars/aws_auth.yml

# Task that will be used to Launch/Create an EC2 Instance
tasks:
  -   name: Create security group
      ec2_group:
          aws_access_key: "{{ec2_access_key}}"
          aws_secret_key: "{{ec2_secret_key}}"
          name: "{{ project_name }}_security_group"
          description: "{{ project_name }} security group"
          region: "{{ aws_region }}"
          rules:
              - proto: tcp
                from_port: 22
                to_port: 22
                cidr_ip: 0.0.0.0/0
              - proto: tcp
                from_port: 80
                to_port: 80
                cidr_ip: 0.0.0.0/0
              - proto: tcp
                from_port: 443
                to_port: 443
                cidr_ip: 0.0.0.0/0
          rules_egress:
              - proto: all
                cidr_ip: 0.0.0.0/0
      register: basic_firewall

  -   name: Create an EC2 key
      ec2_key:
          aws_access_key: "{{ec2_access_key}}"
          aws_secret_key: "{{ec2_secret_key}}"
          name: "{{ project_name }}-{{ env }}-key"
          region: "{{ aws_region }}"
      register: ec2_key

  - name: save private key
    copy:
      content: "{{ ec2_key.key.private_key }}" 
      dest: "private_keys/aws-{{ env }}-private.pem" 
      mode: 0600
    when: ec2_key.changed

  -   name: Create an EC2 instance
      ec2:
          aws_access_key: "{{ec2_access_key}}"
          aws_secret_key: "{{ec2_secret_key}}"
          key_name: "{{ project_name }}-{{ env }}-key"
          region: "{{ aws_region }}"
          group_id: "{{ basic_firewall.group_id }}"
          instance_type: "{{ instance_type }}"
          image: "{{ ami }}"
          wait: yes
          instance_tags:
              env: "{{ env }}"
          count_tag: env
          exact_count: 1
      register: ec2

  - name: Add new instance to host group
    add_host:
      name: "{{ item.public_dns_name }}"
      groups: launched
    with_items: "{{ ec2.tagged_instances }}"

  - name: Wait for SSH to come up
    wait_for:
      host: "{{ item.public_dns_name }}"
      port: 22
      delay: 60
      timeout: 320
      state: started
    with_items: "{{ ec2.tagged_instances }}"

  - name: Refresh inventory to ensure new instaces exist in inventory
    meta: refresh_inventory

  - name: Configure EC2 instance
    hosts: launched
    gather_facts: False
    tasks:
      - debug: var=group_names
      - debug: msg="{{ inventory_hostname }}"
      - debug: var=hostvars[inventory_hostname]          
      - debug: msg="groups={{groups}}"
        run_once: true


      - name: install drush
        yum: name=drush state=present

      - name: install git
        yum: name=git state=present

      - name: download Drupal
        shell: drush dl drupal-7

Here is the output of add_host:

   "add_host": {
    "groups": [
        "launched"
    ], 
    "host_name": "xxx.us-east-2.compute.amazonaws.com", 
    "host_vars": {}
}, 

Here is the value of the ec2.tagged_instances:

    "tagged_instances": [
    {
        "ami_launch_index": "0", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "status": "attached", 
                "volume_id": "vol-0a095bd6e62ca6xxx"
            }
        }, 
        "dns_name": "xxx.us-east-2.compute.amazonaws.com", 
        "ebs_optimized": false, 
        "groups": {
            "sg-90a9bxxx": "xxx_automation_security_group"
        }, 
        "hypervisor": "xen", 
        "id": "i-0f39cd12657aad100", 
        "image_id": "ami-11aa8c74", 
        "instance_type": "t2.micro", 
        "kernel": null, 
        "key_name": "xxx_automation-staging-key", 
        "launch_time": "2017-07-19T00:12:52.000Z", 
        "placement": "us-east-2b", 
        "private_dns_name": "xxx.us-east-2.compute.internal", 
        "private_ip": "172.31.24.xxx", 
        "public_dns_name": "xxx.us-east-2.compute.amazonaws.com", 
        "public_ip": "18.220.52.xxx", 
        "ramdisk": null, 
        "region": "us-east-2", 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "running", 
        "state_code": 16, 
        "tags": {
            "env": "staging"
        }, 
        "tenancy": "default", 
        "virtualization_type": "hvm"
    }

回答1:

Remove meta: refresh_inventory task from your play.

It's not required for in-memory inventory and it causes Ansible to refresh the inventory based on files and dynamic inventory scripts. It actually clears the in-memory inventory created in the previous step.