Merge pull request 'Debパッケージを利用してConsulををインストールする' (#65) from use-deb-package-to-install-consul into master

Reviewed-on: #65
This commit is contained in:
Kazuhiro MUSASHI 2020-11-28 16:23:27 +09:00
commit a82caa2ebd
21 changed files with 294 additions and 143 deletions

View File

@ -18,11 +18,8 @@ dns = run_command(cmd).stdout.chomp
node.reverse_merge!({
'consul' => {
'base_binary_url' => 'https://releases.hashicorp.com/consul/',
'arch' => node['kernel']['machine'] =~ /x86_64/ ? 'amd64' : '386',
'tmp_path' => '/tmp/itamae_tmp/consul.zip',
'manager' => true,
'manager_hosts' => '["192.168.10.110", "192.168.10.101", "192.168.10.111", "192.168.10.115"]',
'manager' => false,
'manager_hosts' => '["192.168.10.101", "192.168.10.251", "192.168.10.252"]',
'ipaddr' => ipaddr,
'dns' => dns
}

View File

@ -1,7 +1,5 @@
include_recipe './attributes.rb'
include_recipe './prerequisites.rb'
include_recipe './install.rb'
include_recipe './setup.rb'

View File

@ -1,3 +1,5 @@
package 'dnsmasq'
%w(dnsmasq resolvconf systemd-resolved).each do |s|
service s do
action :nothing

View File

@ -0,0 +1,20 @@
[Unit]
Description="HashiCorp Consul - A service mesh solution"
Documentation=https://www.consul.io/
Requires=network-online.target
After=network-online.target
ConditionFileNotEmpty=/etc/consul.d/consul.hcl
[Service]
User=consul
Group=consul
ExecStartPre=/usr/bin/find /var/log/consul/ -type f -delete
ExecStart=/usr/bin/consul agent -config-dir=/etc/consul.d/
ExecReload=/bin/kill --signal HUP $MAINPID
KillMode=process
KillSignal=SIGTERM
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@ -1,53 +1,20 @@
# -------------------------------------------
# Calculating the latest `consul` version:
# -------------------------------------------
download_url = ''
tag_version = ''
# Install `Consul`:
KEYSRV = "https://apt.releases.hashicorp.com/gpg"
ID = "A3219F7B"
begin
require 'net/http'
uri = URI.parse('https://www.consul.io/downloads.html')
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
if response.body =~ /consul_(\d+\.\d+\.\d+)/
tag_version = $1
download_url = \
"#{node['consul']['base_binary_url']}#{tag_version}/consul_#{tag_version}_linux_#{node['consul']['arch']}.zip"
end
end
rescue
# Abort the chef client process:
raise 'Cannot connect to https://www.consul.io/downloads.html'
execute "apt-key adv --keyserver #{KEYSRV} --recv-keys #{ID}" do
not_if 'apt-key list | grep HashiCorp'
end
# -------------------------------------------
# Main Part
# -------------------------------------------
# Retrieve the Ubuntu code:
DIST = run_command('lsb_release -cs').stdout.chomp
# バージョン確認して、アップデート必要かどうか確認
result = run_command("consul version | grep #{tag_version}", error: false)
if result.exit_status != 0
# Download:
execute "wget #{download_url} -O #{node['consul']['tmp_path']}"
# Unzip:
execute "unzip -qo #{node['consul']['tmp_path']}" do
cwd '/opt/consul/bin/'
end
file '/opt/consul/bin/consul' do
owner 'root'
group 'root'
mode '755'
end
# Create link:
link '/usr/local/bin/consul' do
user 'root'
to '/opt/consul/bin/consul'
end
# Deploy the `apt` sources:
template '/etc/apt/sources.list.d/consul.list' do
action :create
variables(distribution: DIST)
end
execute 'apt update'
package 'consul'

View File

@ -1,14 +0,0 @@
# Ensure that `unzip` and `dnsmasq` are available:
%w( unzip dnsmasq ).each do |p|
package p do
action :install
end
end
%w(/etc/consul.d /var/opt/consul /opt/consul/bin).each do |d|
directory d do
owner 'root'
group 'root'
mode '755'
end
end

View File

@ -1,12 +1,12 @@
remote_file '/etc/supervisor/conf.d/consul.conf' do
owner 'root'
group 'root'
mode '644'
if node['consul']['manager']
SRC = 'consul-server.hcl.erb'
else
SRC = 'consul-agent.hcl.erb'
end
template '/etc/consul.d/config.json' do
owner 'root'
group 'root'
template '/etc/consul.d/consul.hcl' do
owner 'consul'
group 'consul'
mode '644'
variables(manager: node['consul']['manager'],
@ -14,19 +14,33 @@ template '/etc/consul.d/config.json' do
ipaddr: node['consul']['ipaddr'],
)
notifies :restart, 'service[supervisor]'
source "templates/etc/consul.d/#{SRC}"
notifies :restart, 'service[consul]'
end
directory '/var/log/consul/' do
owner 'consul'
group 'consul'
mode '0755'
end
remote_file '/etc/systemd/system/consul.service' do
owner 'root'
group 'root'
mode '0644'
notifies :restart, 'service[consul]'
end
remote_file '/etc/consul.d/service-consul.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
end
execute 'Reload supervisor' do
user 'root'
command '/usr/bin/supervisorctl update'
service 'consul' do
action [:enable, :start]
end
# iptables settings here:

View File

@ -0,0 +1 @@
deb [arch=amd64] https://apt.releases.hashicorp.com <%= @distribution %> main

View File

@ -1,23 +0,0 @@
{
"datacenter": "aws",
<% if @manager then%>
"bootstrap_expect": 3,
"addresses": {
"http": "0.0.0.0"
},
"ui": true,
<% end %>
"bind_addr": "<%= @ipaddr %>",
"disable_remote_exec": false,
"disable_update_check": true,
"leave_on_terminate": true,
"retry_interval": "30s",
"data_dir": "/var/opt/consul",
"log_level": "INFO",
"enable_syslog": false,
"enable_script_checks": true,
"rejoin_after_leave": true,
"retry_join": <%= @manager_hosts %>,
"encrypt": "LPKrNBQZnJIc8tJpViI4ug==",
"server": <%= @manager %>
}

View File

@ -0,0 +1,85 @@
# Full configuration options can be found at https://www.consul.io/docs/agent/options.html
# datacenter
# This flag controls the datacenter in which the agent is running. If not provided,
# it defaults to "dc1". Consul has first-class support for multiple datacenters, but
# it relies on proper configuration. Nodes in the same datacenter should be on a
# single LAN.
#datacenter = "aws"
# data_dir
# This flag provides a data directory for the agent to store state. This is required
# for all agents. The directory should be durable across reboots. This is especially
# critical for agents that are running in server mode as they must be able to persist
# cluster state. Additionally, the directory must support the use of filesystem
# locking, meaning some types of mounted folders (e.g. VirtualBox shared folders) may
# not be suitable.
data_dir = "/opt/consul"
# client_addr
# The address to which Consul will bind client interfaces, including the HTTP and DNS
# servers. By default, this is "127.0.0.1", allowing only loopback connections. In
# Consul 1.0 and later this can be set to a space-separated list of addresses to bind
# to, or a go-sockaddr template that can potentially resolve to multiple addresses.
client_addr = "0.0.0.0"
# ui
# Enables the built-in web UI server and the required HTTP routes. This eliminates
# the need to maintain the Consul web UI files separately from the binary.
ui = false
# server
# This flag is used to control if an agent is in server or client mode. When provided,
# an agent will act as a Consul server. Each Consul cluster must have at least one
# server and ideally no more than 5 per datacenter. All servers participate in the Raft
# consensus algorithm to ensure that transactions occur in a consistent, linearizable
# manner. Transactions modify cluster state, which is maintained on all server nodes to
# ensure availability in the case of node failure. Server nodes also participate in a
# WAN gossip pool with server nodes in other datacenters. Servers act as gateways to
# other datacenters and forward traffic as appropriate.
#server = true
# bootstrap_expect
# This flag provides the number of expected servers in the datacenter. Either this value
# should not be provided or the value must agree with other servers in the cluster. When
# provided, Consul waits until the specified number of servers are available and then
# bootstraps the cluster. This allows an initial leader to be elected automatically.
# This cannot be used in conjunction with the legacy -bootstrap flag. This flag requires
# -server mode.
#bootstrap_expect=3
# encrypt
# Specifies the secret key to use for encryption of Consul network traffic. This key must
# be 32-bytes that are Base64-encoded. The easiest way to create an encryption key is to
# use consul keygen. All nodes within a cluster must share the same encryption key to
# communicate. The provided key is automatically persisted to the data directory and loaded
# automatically whenever the agent is restarted. This means that to encrypt Consul's gossip
# protocol, this option only needs to be provided once on each agent's initial startup
# sequence. If it is provided after Consul has been initialized with an encryption key,
# then the provided key is ignored and a warning will be displayed.
encrypt = "LPKrNBQZnJIc8tJpViI4ug=="
# retry_join
# Similar to -join but allows retrying a join until it is successful. Once it joins
# successfully to a member in a list of members it will never attempt to join again.
# Agents will then solely maintain their membership via gossip. This is useful for
# cases where you know the address will eventually be available. This option can be
# specified multiple times to specify multiple agents to join. The value can contain
# IPv4, IPv6, or DNS addresses. In Consul 1.1.0 and later this can be set to a go-sockaddr
# template. If Consul is running on the non-default Serf LAN port, this must be specified
# as well. IPv6 must use the "bracketed" syntax. If multiple values are given, they are
# tried and retried in the order listed until the first succeeds. Here are some examples:
retry_join = <%= @manager_hosts %>
bind_addr = "<%= @ipaddr %>"
disable_remote_exec = false
disable_update_check = false
enable_local_script_checks = true
log_file = "/var/log/consul/"
log_rotate_max_files = -1
log_level = "INFO"
log_json = false
log_rotate_bytes = 1000000
rejoin_after_leave = true

View File

@ -0,0 +1,85 @@
# Full configuration options can be found at https://www.consul.io/docs/agent/options.html
# datacenter
# This flag controls the datacenter in which the agent is running. If not provided,
# it defaults to "dc1". Consul has first-class support for multiple datacenters, but
# it relies on proper configuration. Nodes in the same datacenter should be on a
# single LAN.
#datacenter = "aws"
# data_dir
# This flag provides a data directory for the agent to store state. This is required
# for all agents. The directory should be durable across reboots. This is especially
# critical for agents that are running in server mode as they must be able to persist
# cluster state. Additionally, the directory must support the use of filesystem
# locking, meaning some types of mounted folders (e.g. VirtualBox shared folders) may
# not be suitable.
data_dir = "/opt/consul"
# client_addr
# The address to which Consul will bind client interfaces, including the HTTP and DNS
# servers. By default, this is "127.0.0.1", allowing only loopback connections. In
# Consul 1.0 and later this can be set to a space-separated list of addresses to bind
# to, or a go-sockaddr template that can potentially resolve to multiple addresses.
client_addr = "0.0.0.0"
# ui
# Enables the built-in web UI server and the required HTTP routes. This eliminates
# the need to maintain the Consul web UI files separately from the binary.
ui = true
# server
# This flag is used to control if an agent is in server or client mode. When provided,
# an agent will act as a Consul server. Each Consul cluster must have at least one
# server and ideally no more than 5 per datacenter. All servers participate in the Raft
# consensus algorithm to ensure that transactions occur in a consistent, linearizable
# manner. Transactions modify cluster state, which is maintained on all server nodes to
# ensure availability in the case of node failure. Server nodes also participate in a
# WAN gossip pool with server nodes in other datacenters. Servers act as gateways to
# other datacenters and forward traffic as appropriate.
server = true
# bootstrap_expect
# This flag provides the number of expected servers in the datacenter. Either this value
# should not be provided or the value must agree with other servers in the cluster. When
# provided, Consul waits until the specified number of servers are available and then
# bootstraps the cluster. This allows an initial leader to be elected automatically.
# This cannot be used in conjunction with the legacy -bootstrap flag. This flag requires
# -server mode.
bootstrap_expect=3
# encrypt
# Specifies the secret key to use for encryption of Consul network traffic. This key must
# be 32-bytes that are Base64-encoded. The easiest way to create an encryption key is to
# use consul keygen. All nodes within a cluster must share the same encryption key to
# communicate. The provided key is automatically persisted to the data directory and loaded
# automatically whenever the agent is restarted. This means that to encrypt Consul's gossip
# protocol, this option only needs to be provided once on each agent's initial startup
# sequence. If it is provided after Consul has been initialized with an encryption key,
# then the provided key is ignored and a warning will be displayed.
encrypt = "LPKrNBQZnJIc8tJpViI4ug=="
# retry_join
# Similar to -join but allows retrying a join until it is successful. Once it joins
# successfully to a member in a list of members it will never attempt to join again.
# Agents will then solely maintain their membership via gossip. This is useful for
# cases where you know the address will eventually be available. This option can be
# specified multiple times to specify multiple agents to join. The value can contain
# IPv4, IPv6, or DNS addresses. In Consul 1.1.0 and later this can be set to a go-sockaddr
# template. If Consul is running on the non-default Serf LAN port, this must be specified
# as well. IPv6 must use the "bracketed" syntax. If multiple values are given, they are
# tried and retried in the order listed until the first succeeds. Here are some examples:
retry_join = <%= @manager_hosts %>
bind_addr = "<%= @ipaddr %>"
disable_remote_exec = false
disable_update_check = false
enable_local_script_checks = true
log_file = "/var/log/consul/"
log_rotate_max_files = -1
log_level = "INFO"
log_json = false
log_rotate_bytes = 1000000
rejoin_after_leave = true

View File

@ -13,11 +13,15 @@ end
# Depoy `consul` service configuration for `gitea`:
remote_file '/etc/consul.d/service-go-mmproxy.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Firewall settings here:

View File

@ -70,11 +70,15 @@ end
# Depoy `consul` service configuration for `gitea`:
remote_file '/etc/consul.d/service-gitea.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Depoy `promtail` configuration for `gitea`:

View File

@ -42,11 +42,15 @@ end
# Deploy `consul` config for `grafana`:
remote_file '/etc/consul.d/service-grafana.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Firewall settings here:

View File

@ -32,13 +32,17 @@ end
# Depoy `consul` service configuration for `loki`:
template '/etc/consul.d/service-loki.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
variables(ipaddr: node['consul']['ipaddr'])
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Depoy `promtail` configuration for `loki`:

View File

@ -18,14 +18,13 @@ end
# Deploy `consul` config for `node_exporter`:
remote_file '/etc/consul.d/service-filestat_exporter.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'supervisor' do
service 'consul' do
action :nothing
end

View File

@ -9,14 +9,14 @@ end
# Deploy `consul` config for `node_exporter`:
remote_file '/etc/consul.d/service-node_exporter.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'supervisor' do
service 'consul' do
action :nothing
end

View File

@ -25,15 +25,15 @@ end
# Depoy `consul` service configuration for `prometheus`:
remote_file '/etc/consul.d/service-prometheus.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
# Restart the `supervisor`:
service 'supervisor' do
# Restart the `consul`:
service 'consul' do
action :nothing
end

View File

@ -14,15 +14,15 @@ end
# Deploy `consul` config:
remote_file '/etc/consul.d/service-snmp_exporter.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
# Restart the `supervisor`:
service 'supervisor' do
# Restart the `reload`:
service 'consul' do
action :nothing
end

View File

@ -262,7 +262,7 @@ scrape_configs:
pipeline_stages:
- match:
selector: '{job="init"} |~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service)"'
selector: '{job="init"} |~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service|docker)"'
stages:
- template:
source: level
@ -272,7 +272,7 @@ scrape_configs:
level:
- match:
selector: '{job="init"} !~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service)"'
selector: '{job="init"} !~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service|docker)"'
stages:
- template:
source: level
@ -356,14 +356,14 @@ scrape_configs:
job: consul
hostname: <%= @HOSTNAME %>
level: info
__path__: /var/log/supervisor/consul.log
__path__: /var/log/consul/consul-*.log
pipeline_stages:
- match:
selector: '{job="consul"}'
stages:
- regex:
expression: '^ +(?P<timestamp>\d+-\d+-[^T]+T\d+:\d+:\d+\.\d+\+\d+) \[(?P<level>[^\]]+)\] *(?P<message>.+)$'
expression: '^(?P<timestamp>\d+-\d+-[^T]+T\d+:\d+:\d+\.\d+\+\d+) \[(?P<level>[^\]]+)\] *(?P<message>.+)$'
- timestamp:
source: timestamp

View File

@ -50,13 +50,17 @@ end
# Depoy `consul` service configuration for `loki`:
template '/etc/consul.d/service-vector-syslog.json' do
owner 'root'
group 'root'
owner 'consul'
group 'consul'
mode '644'
variables(ipaddr: node['vector']['ipaddr'])
notifies :restart, 'service[supervisor]'
notifies :reload, 'service[consul]'
end
service 'cosul' do
action :nothing
end
template '/etc/promtail/syslog.yaml' do