-
Notifications
You must be signed in to change notification settings - Fork 14
/
management-fluentd.yml
198 lines (173 loc) · 5.45 KB
/
management-fluentd.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
# Fluentd log handling
# + Elasticsearch + Kibana in Docker
# https://www.fluentd.org/guides/recipes/elasticsearch-and-s3
# This assumes docker has already been installed
# (by management-prometheus.yml)
- hosts: "{{ idr_environment | default('idr') }}-management-hosts"
tasks:
- name: Create fluentd server configuration directory
become: yes
file:
path: /etc/fluentd
recurse: yes
state: directory
- name: Create data top level directory
become: yes
file:
path: /data
state: directory
owner: root
group: root
- name: Create fluentd aggregated logs directory
become: yes
file:
path: /data/fluentd
state: directory
owner: "{{ fluentd_uid }}"
group: "{{ fluentd_uid }}"
- name: Copy fluentd server configuration files
become: yes
template:
src: files/fluentd-server/fluent.conf.j2
dest: /etc/fluentd/fluent.conf
register: fluent_conf_status
- name: Create elasticsearch directory
become: yes
file:
path: /data/elasticsearch
state: directory
# User id in elasticsearch Docker image
owner: 1000
group: 1000
- name: Create docker network
become: yes
docker_network:
name: fluent-es-kb
state: present
- name: Run docker elasticsearch
become: yes
docker_container:
image: "{{ elasticsearch_docker_image }}"
name: elasticsearch
cleanup: True
env:
discovery.type: single-node
ES_JAVA_OPTS: "-Xmx4096m"
networks:
- name: fluent-es-kb
published_ports:
- "9200:9200"
#- "9300:9300"
state: started
restart_policy: always
volumes:
- /data/elasticsearch:/usr/share/elasticsearch/data
- name: Run docker fluentd
become: yes
docker_container:
image: "{{ fluentd_docker_image }}"
name: fluentd
cleanup: True
env:
FLUENT_UID: "{{ fluentd_uid }}"
networks:
- name: fluent-es-kb
published_ports:
- "24224:24224/udp"
- "24224:24224"
restart: "{{ fluent_conf_status is changed }}"
state: started
restart_policy: always
volumes:
- /etc/fluentd/fluent.conf:/fluentd/etc/fluent.conf:ro
- /data/fluentd:/data/fluentd
- name: Run docker kibana
become: yes
docker_container:
image: "{{ kibana_docker_image }}"
name: kibana
cleanup: True
networks:
- name: fluent-es-kb
published_ports:
- "5601:5601"
state: started
restart_policy: always
- name: Run elasticsearch curator
become: yes
docker_container:
image: "{{ elasticsearch_curator_docker_image }}"
name: elasticsearch-curator
cleanup: True
env:
OLDER_THAN_IN_DAYS: "{{ elasticsearch_expire_logs_days }}"
INTERVAL_IN_HOURS: "24"
networks:
- name: fluent-es-kb
state: started
restart_policy: always
vars:
fluentd_shared_key: "{{ idr_secret_fluentd_shared_key | default('fluentd') }}"
fluentd_slack_token: "{{ idr_secret_management_slack_token | default(None) }}"
fluentd_slack_channel: "{{ idr_logs_slack_channel | default('idr-logs') }}"
fluentd_elasticsearch_host: elasticsearch
fluentd_uid: "1010"
elasticsearch_docker_image: "docker.elastic.co/elasticsearch/elasticsearch-oss:6.1.1"
elasticsearch_curator_docker_image: "openmicroscopy/elasticsearch-curator:5.4.1"
elasticsearch_expire_logs_days: "14"
fluentd_docker_image: "openmicroscopy/fluentd:0.1.0"
kibana_docker_image: "docker.elastic.co/kibana/kibana-oss:6.1.1"
# Load hostvars for management server
- hosts: >-
{{ idr_environment | default('idr') }}-management-hosts
{{ idr_parent_environment | default('idr') }}-management-hosts
- hosts: >
{{ idr_environment | default('idr') }}-proxy-hosts
{{ idr_environment | default('idr') }}-omero-hosts
# TODO: Ideally we'd use the `validate:` option to check these config
# files, but it's not possible to validate only a fragment.
# Instead we need to create these files before applying the role
# However, we can only trigger the restart handler if the agent is
# already installed.
pre_tasks:
- name: Check if td-agent already installed
stat:
path: /etc/init.d/td-agent
register: _td_agent_st
- name: Create fluentd conf.d
become: yes
file:
path: /etc/td-agent/conf.d
recurse: yes
state: directory
- name: Configure fluentd forwarding
become: yes
template:
src: files/fluentd/forward-conf.j2
dest: /etc/td-agent/conf.d/forward.conf
notify:
- restart fluentd if installed
- name: Copy fluentd configuration
become: yes
copy:
src: "{{ item }}"
dest: "/etc/td-agent/conf.d/{{ item | basename }}"
with_items:
# These are specifc to each host-group
- "{{ fluentd_source_configs }}"
notify:
- restart fluentd if installed
handlers:
- name: restart fluentd if installed
become: yes
systemd:
daemon_reload: yes
name: td-agent
state: restarted
when: _td_agent_st.stat.exists
roles:
- role: ome.fluentd
vars:
_monitoring_idr_environment: "{{ idr_parent_environment | default(idr_environment | default('idr')) + '-management-hosts' }}"
fluentd_server_address: "{{ hostvars[groups[_monitoring_idr_environment][0]]['ansible_' + (idr_net_iface | default('eth0'))]['ipv4']['address'] }}"
fluentd_shared_key: "{{ idr_secret_fluentd_shared_key | default('fluentd') }}"