Creating a new virtual server I'm getting an odd error that I can't seem to track down.
This particular test playbook creates a set of servers, a service group, binds the servers to the service group, then creates the virtual server with the service group bound to it. This error only occurs on the initial creation run. Subsequent runs finish successfully. Second run has a single change in the vserver and any additional runs of the playbook remain unchanged.
fatal: [localhost -> localhost]: FAILED! => {
"changed": true,
"diff": {
"timeout": "difference. ours: (<type 'float'>) 10.0 other: (<type 'int'>) 2"
},
"invocation": {
"module_args": {
"appflowlog": null,
"appfw_policybindings": null,
"authentication": null,
"authenticationhost": null,
"authn401": null,
"authnprofile": null,
"authnvsname": null,
"backuplbmethod": null,
"backuppersistencetimeout": null,
"bypassaaaa": null,
"cacheable": null,
"clttimeout": null,
"comment": null,
"connfailover": null,
"cookiename": null,
"datalength": null,
"dataoffset": null,
"dbprofilename": null,
"dbslb": null,
"disabled": false,
"disableprimaryondown": null,
"dns64": null,
"dnsprofilename": null,
"downstateflush": null,
"hashlength": null,
"healththreshold": null,
"httpprofilename": null,
"icmpvsrresponse": null,
"insertvserveripport": null,
"instance_ip": null,
"ipmask": null,
"ippattern": null,
"ipv46": "10.81.244.79",
"l2conn": null,
"lbmethod": null,
"listenpolicy": null,
"listenpriority": null,
"m": null,
"macmoderetainvlan": null,
"mas_proxy_call": false,
"maxautoscalemembers": null,
"minautoscalemembers": null,
"mssqlserverversion": null,
"mysqlcharacterset": null,
"mysqlprotocolversion": null,
"mysqlservercapabilities": null,
"mysqlserverversion": null,
"name": "gen-anstest",
"netmask": null,
"netprofile": null,
"newservicerequest": null,
"newservicerequestincrementinterval": null,
"newservicerequestunit": null,
"nitro_auth_token": null,
"nitro_pass": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"nitro_protocol": "https",
"nitro_timeout": 310.0,
"nitro_user": "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER",
"nsip": "10.81.228.156",
"oracleserverversion": null,
"persistencebackup": null,
"persistencetype": null,
"persistmask": null,
"port": 80,
"processlocal": null,
"push": null,
"pushlabel": null,
"pushmulticlients": null,
"pushvserver": null,
"range": null,
"recursionavailable": null,
"redirectportrewrite": null,
"redirurl": null,
"rhistate": null,
"rtspnat": null,
"save_config": true,
"servicebindings": null,
"servicegroupbindings": [
{
"servicegroupname": "test-service-group-1"
}
],
"servicetype": "HTTP",
"sessionless": null,
"skippersistency": null,
"sobackupaction": null,
"somethod": null,
"sopersistence": null,
"sopersistencetimeout": null,
"sothreshold": null,
"ssl_certkey": null,
"state": "present",
"tcpprofilename": null,
"timeout": 10.0,
"tosid": null,
"v6netmasklen": null,
"v6persistmasklen": null,
"validate_certs": false,
"vipheader": null
}
},
"loglines": [
"Applying actions for state present",
"Checking if lb vserver exists",
"Add lb vserver",
"service_bindings_identical",
"Getting configured service bindings",
"Getting actual service bindings",
"servicegroup_bindings_identical",
"Getting configured service group bindings",
"Getting actual service group bindings",
"len 1",
"sync_servicegroup_bindings",
"Getting actual service group bindings",
"Getting configured service group bindings",
"Adding servicegroup binding test-service-group-1",
"Checking policy bindings identical",
"Getting actual appfw policy bindings",
"Getting configured appfw policy bindings",
"Enabling lb server",
"Sanity checks for state present",
"Checking if lb vserver exists",
"Checking if configured lb vserver is identical"
],
"msg": "lb vserver is not configured correctly"
}
- name: Create load balancing vserver bound to servicegroup
delegate_to: localhost
citrix_adc_lb_vserver:
nsip: "{{ netscaler_host_ip }}"
nitro_protocol: "{{ netscaler_nitro_protocol }}"
nitro_user: "{{ netscaler_nitro_user }}"
nitro_pass: "{{ netscaler_nitro_password }}"
validate_certs: "{{ netscaler_validate_certs }}"
name: "{{ netscaler_virtual_server }}"
servicetype: HTTP
ipv46: "{{ vserver_ip }}"
port: 80
timeout: 10
servicegroupbindings:
- servicegroupname: "{{ netscaler_service_group }}"
environment:
no_proxy: "{{ netscaler_host_ip }}"
If I remove the timeout from the task it completes successfully the first time. Something strange with it, but it was repeatable so I thought I would post it so it could get a look. Let me know if you need some additional information.