diff --git a/cookbooks/base/default.rb b/cookbooks/base/default.rb index a20739c..c2824b7 100644 --- a/cookbooks/base/default.rb +++ b/cookbooks/base/default.rb @@ -66,17 +66,9 @@ include_recipe './lsyncd.rb' # Install starship command: include_recipe './starship.rb' -# recipes for Ubuntu 16.04 -if node['platform_version'].to_f == 16.04 - # ntp configurations - include_recipe './ntp.rb' - - # misc recipe - include_recipe './unnecessary.rb' -end - -# recipes for Ubuntu 20.04 -if node['platform_version'].to_f == 20.04 +# recipes for Ubuntu 20.04 and later +case node['platform_version'] +when "20.04", "22.04" remote_file '/etc/multipath.conf' do owner 'root' group 'root' @@ -88,6 +80,33 @@ if node['platform_version'].to_f == 20.04 service 'multipath-tools' do action :nothing end + + package 'systemd-timesyncd' + + service 'systemd-timesyncd' do + action :enable + end +end + +case node['platform_version'] +when "20.04" + remote_file '/etc/systemd/timesyncd.conf' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[systemd-timesyncd]' + end +when "22.04" + remote_file '/etc/systemd/timesyncd.conf' do + owner 'root' + group 'root' + mode '0644' + + source 'files/etc/systemd/timesyncd.2204.conf' + + notifies :restart, 'service[systemd-timesyncd]' + end end # AWS EC2 Swap Setting: diff --git a/cookbooks/base/files/etc/apt/apt.conf.d/50unattended-upgrades b/cookbooks/base/files/etc/apt/apt.conf.d/50unattended-upgrades index d31a4d2..307996d 100644 --- a/cookbooks/base/files/etc/apt/apt.conf.d/50unattended-upgrades +++ b/cookbooks/base/files/etc/apt/apt.conf.d/50unattended-upgrades @@ -129,3 +129,15 @@ Unattended-Upgrade::Automatic-Reboot "false"; // Allow package downgrade if Pin-Priority exceeds 1000 // Unattended-Upgrade::Allow-downgrade "false"; + +// When APT fails to mark a package to be upgraded or installed try adjusting +// candidates of related packages to help APT's resolver in finding a solution +// where the package can be upgraded or installed. +// This is a workaround until APT's resolver is fixed to always find a +// solution if it exists. (See Debian bug #711128.) +// The fallback is enabled by default, except on Debian's sid release because +// uninstallable packages are frequent there. +// Disabling the fallback speeds up unattended-upgrades when there are +// uninstallable packages at the expense of rarely keeping back packages which +// could be upgraded or installed. +// Unattended-Upgrade::Allow-APT-Mark-Fallback "true"; diff --git a/cookbooks/base/files/etc/ntp.conf b/cookbooks/base/files/etc/ntp.conf deleted file mode 100644 index cd75b19..0000000 --- a/cookbooks/base/files/etc/ntp.conf +++ /dev/null @@ -1,66 +0,0 @@ -# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help - -driftfile /var/lib/ntp/ntp.drift - -# Enable this if you want statistics to be logged. -#statsdir /var/log/ntpstats/ - -statistics loopstats peerstats clockstats -filegen loopstats file loopstats type day enable -filegen peerstats file peerstats type day enable -filegen clockstats file clockstats type day enable - -# Specify one or more NTP servers. - -# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board -# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for -# more information. -pool 0.ubuntu.pool.ntp.org iburst -pool 1.ubuntu.pool.ntp.org iburst -pool 2.ubuntu.pool.ntp.org iburst -pool 3.ubuntu.pool.ntp.org iburst - -# Use Ubuntu's ntp server as a fallback. -pool ntp.ubuntu.com - -# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for -# details. The web page -# might also be helpful. -# -# Note that "restrict" applies to both servers and clients, so a configuration -# that might be intended to block requests from certain clients could also end -# up blocking replies from your own upstream servers. - -# By default, exchange time with everybody, but don't allow configuration. -restrict -4 default kod notrap nomodify nopeer noquery limited -restrict -6 default kod notrap nomodify nopeer noquery limited - -# Local users may interrogate the ntp server more closely. -restrict 127.0.0.1 -restrict ::1 - -# Needed for adding pool entries -restrict source notrap nomodify noquery - -# Clients from this (example!) subnet have unlimited access, but only if -# cryptographically authenticated. -#restrict 192.168.123.0 mask 255.255.255.0 notrust - - -# If you want to provide time to your local subnet, change the next line. -# (Again, the address is an example only.) -#broadcast 192.168.123.255 - -# If you want to listen to time broadcasts on your local subnet, de-comment the -# next lines. Please do this only if you trust everybody on the network! -#disable auth -#broadcastclient - -#Changes recquired to use pps synchonisation as explained in documentation: -#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918 - -#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS -#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware - -#server 127.127.22.1 # ATOM(PPS) -#fudge 127.127.22.1 flag3 1 # enable PPS API diff --git a/cookbooks/base/files/etc/ssh/sshd_config.2204 b/cookbooks/base/files/etc/ssh/sshd_config.2204 new file mode 100644 index 0000000..bf29049 --- /dev/null +++ b/cookbooks/base/files/etc/ssh/sshd_config.2204 @@ -0,0 +1,123 @@ + +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options override the +# default value. + +Include /etc/ssh/sshd_config.d/*.conf + +Port 10022 +#AddressFamily any +#ListenAddress 0.0.0.0 +#ListenAddress :: + +#HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_ecdsa_key +#HostKey /etc/ssh/ssh_host_ed25519_key + +# Ciphers and keying +#RekeyLimit default none + +# Logging +#SyslogFacility AUTH +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +PermitRootLogin no +#StrictModes yes +#MaxAuthTries 6 +#MaxSessions 10 + +#PubkeyAuthentication yes + +# Expect .ssh/authorized_keys2 to be disregarded by default in future. +#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 + +#AuthorizedPrincipalsFile none + +#AuthorizedKeysCommand none +#AuthorizedKeysCommandUser nobody + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# To disable tunneled clear text passwords, change to no here! +PasswordAuthentication no +#PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +KbdInteractiveAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes +#GSSAPIStrictAcceptorCheck yes +#GSSAPIKeyExchange no + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the KbdInteractiveAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via KbdInteractiveAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and KbdInteractiveAuthentication to 'no'. +UsePAM yes + +#AllowAgentForwarding yes +#AllowTcpForwarding yes +#GatewayPorts no +X11Forwarding yes +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PermitTTY yes +PrintMotd no +#PrintLastLog yes +#TCPKeepAlive yes +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#UseDNS no +#PidFile /run/sshd.pid +#MaxStartups 10:30:100 +#PermitTunnel no +#ChrootDirectory none +#VersionAddendum none + +# no default banner path +#Banner none + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# override default of no subsystems +Subsystem sftp /usr/lib/openssh/sftp-server + +# Example of overriding settings on a per-user basis +#Match User anoncvs +# X11Forwarding no +# AllowTcpForwarding no +# PermitTTY no +# ForceCommand cvs server +#PasswordAuthentication yes diff --git a/cookbooks/base/files/etc/systemd/timesyncd.2204.conf b/cookbooks/base/files/etc/systemd/timesyncd.2204.conf new file mode 100644 index 0000000..0357e4c --- /dev/null +++ b/cookbooks/base/files/etc/systemd/timesyncd.2204.conf @@ -0,0 +1,20 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the timesyncd.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# See timesyncd.conf(5) for details. + +[Time] +NTP=192.168.10.1 +#FallbackNTP=ntp.ubuntu.com +#RootDistanceMaxSec=5 +#PollIntervalMinSec=32 +#PollIntervalMaxSec=2048 diff --git a/cookbooks/base/files/etc/systemd/timesyncd.conf b/cookbooks/base/files/etc/systemd/timesyncd.conf new file mode 100644 index 0000000..d399529 --- /dev/null +++ b/cookbooks/base/files/etc/systemd/timesyncd.conf @@ -0,0 +1,19 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Entries in this file show the compile time defaults. +# You can change settings by editing this file. +# Defaults can be restored by simply deleting this file. +# +# See timesyncd.conf(5) for details. + +[Time] +NTP=192.168.10.1 +#FallbackNTP=ntp.ubuntu.com +#RootDistanceMaxSec=5 +#PollIntervalMinSec=32 +#PollIntervalMaxSec=2048 diff --git a/cookbooks/base/ntp.rb b/cookbooks/base/ntp.rb deleted file mode 100644 index be36310..0000000 --- a/cookbooks/base/ntp.rb +++ /dev/null @@ -1,13 +0,0 @@ -package 'ntp' - -remote_file '/etc/ntp.conf' do - owner 'root' - group 'root' - mode '644' - - notifies :restart, 'service[ntp]' -end - -service 'ntp' do - action :nothing -end diff --git a/cookbooks/base/ssh.rb b/cookbooks/base/ssh.rb index 947c023..60c7c4a 100644 --- a/cookbooks/base/ssh.rb +++ b/cookbooks/base/ssh.rb @@ -8,7 +8,17 @@ execute 'ufw allow 10022' do end # Deploy the `sshd` configuration file: -case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp +case node['platform_version'] +when "22.04" + remote_file '/etc/ssh/sshd_config' do + user 'root' + owner 'root' + group 'root' + mode '644' + + source 'files/etc/ssh/sshd_config.2204' + end + when "20.04" remote_file '/etc/ssh/sshd_config' do user 'root' @@ -28,6 +38,7 @@ when "18.04" source 'files/etc/ssh/sshd_config.1804' end + else remote_file '/etc/ssh/sshd_config' do user 'root' diff --git a/cookbooks/base/timezone.rb b/cookbooks/base/timezone.rb index 2d7f899..548851b 100644 --- a/cookbooks/base/timezone.rb +++ b/cookbooks/base/timezone.rb @@ -1,5 +1,5 @@ -case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp -when "18.04", "20.04" +case node['platform_version'] +when "18.04", "20.04", "22.04" execute 'timedatectl set-timezone Asia/Tokyo' do not_if 'timedatectl | grep Tokyo' end diff --git a/cookbooks/base/unattended-upgrade.rb b/cookbooks/base/unattended-upgrade.rb index 4ab6b59..07adc5f 100644 --- a/cookbooks/base/unattended-upgrade.rb +++ b/cookbooks/base/unattended-upgrade.rb @@ -45,7 +45,7 @@ when "18.04" not_if 'test -e /var/log/cron-apt/log' end -when '20.04' +when '20.04', '22.04' %w(20auto-upgrades 50unattended-upgrades).each do |conf| remote_file "/etc/apt/apt.conf.d/#{conf}" do owner 'root' diff --git a/cookbooks/base/unnecessary.rb b/cookbooks/base/unnecessary.rb deleted file mode 100644 index 2132304..0000000 --- a/cookbooks/base/unnecessary.rb +++ /dev/null @@ -1,5 +0,0 @@ -%w( apparmor iscsid lxc lxcfs lxd-containers lxd open-iscsi ).each do |s| - service s do - action :disable - end -end diff --git a/cookbooks/blog/files/etc/nginx/sites-available/blog b/cookbooks/blog/files/etc/nginx/sites-available/blog deleted file mode 100644 index 549d9b5..0000000 --- a/cookbooks/blog/files/etc/nginx/sites-available/blog +++ /dev/null @@ -1,90 +0,0 @@ -server { - # allow access from localhost - listen 80 reuseport backlog=1024; - listen 443 ssl http2 backlog=1024; - server_name blog.kazu634.com; - - ssl_certificate /etc/letsencrypt/live/blog.kazu634.com/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/blog.kazu634.com/privkey.pem; - ssl_dhparam /etc/letsencrypt/live/blog.kazu634.com/dhparams_4096.pem; - - ssl_session_cache shared:SSL:3m; - ssl_buffer_size 4k; - ssl_session_timeout 10m; - - ssl_session_tickets on; - ssl_session_ticket_key /etc/letsencrypt/live/blog.kazu634.com/ticket.key; - - ssl_protocols TLSv1.2 TLSv1.1 TLSv1; - ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DES-CBC3-SHA; - ssl_prefer_server_ciphers on; - - ssl_stapling on; - ssl_stapling_verify on; - resolver 8.8.4.4 8.8.8.8 valid=300s; - resolver_timeout 10s; - - # Enable HSTS (HTTP Strict Transport Security) - add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload"; - - root /var/www/blog; - index index.html index.htm; - - access_log /var/log/nginx/blog.access.log ltsv; - error_log /var/log/nginx/blog.error.log; - - location / { - gzip on; - gunzip on; - gzip_vary on; - - # http2 server push: - http2_push_preload on; - - http2_push /css/sanitize.css; - http2_push /css/responsive.css; - http2_push /css/highlight_monokai.css; - http2_push /css/theme.css; - http2_push /css/custom.css; - http2_push /images/profile.png; - http2_push /js/highlight.pack.js; - - if (-e "/tmp/maintenance") { - return 503; - } - - location /feed { - return 301 http://blog.kazu634.com/index.xml; - } - - location /wp-content { - return 404; - } - - location ~* \.css { - gzip_static always; - - expires max; - } - - location ~* \.js { - gzip_static always; - - expires max; - } - - location /images { - gzip_static always; - - expires max; - } - - location = /favicon.ico { - access_log off; - empty_gif; - expires max; - } - - try_files $uri $uri/ /index.html; - } -} diff --git a/cookbooks/consul/attributes.rb b/cookbooks/consul/attributes.rb index 26c1599..9520618 100644 --- a/cookbooks/consul/attributes.rb +++ b/cookbooks/consul/attributes.rb @@ -13,7 +13,7 @@ else end ipaddr = run_command(cmd).stdout.chomp -cmd = 'grep nameserver /run/systemd/resolve/resolv.conf | grep -v 8.8.8.8 | grep -v 127.0.0.1 | perl -pe "s/nameserver //g" | perl -pe "s/\n/ /g"' +cmd = 'grep nameserver /run/systemd/resolve/resolv.conf | grep -v 8.8.8.8 | grep -v 127.0.0.1 | perl -pe "s/nameserver //g" | sort | uniq | perl -pe "s/\n/ /g"' dns = run_command(cmd).stdout.chomp node.reverse_merge!({ @@ -23,6 +23,6 @@ node.reverse_merge!({ 'ipaddr' => ipaddr, 'dns' => dns, 'encrypt' => 's2T3XUTb9MjHYOw8I820O5YkN2G6eJrjLjJRTnEAKoM=', - 'token' => 'acb7096c-dcda-775a-b52c-b47c96b38d0e' + 'token' => '63de6edb-0cb0-de95-d5f1-7facf616c26d' } }) diff --git a/cookbooks/consul/dnsmasq.rb b/cookbooks/consul/dnsmasq.rb index 3e08fb1..a048bb2 100644 --- a/cookbooks/consul/dnsmasq.rb +++ b/cookbooks/consul/dnsmasq.rb @@ -7,7 +7,27 @@ package 'dnsmasq' end case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp -when "20.04", "22.04" +when "22.04" + template '/etc/systemd/resolved.conf' do + owner 'root' + group 'root' + mode '644' + + source 'templates/etc/systemd/resolved.conf.2204.erb' + variables(dns: node['consul']['dns']) + + notifies :restart, 'service[systemd-resolved]', :immediately + end + + remote_file '/etc/dnsmasq.conf' do + owner 'root' + group 'root' + mode '644' + + notifies :restart, 'service[dnsmasq]', :immediately + end + +when "20.04" template '/etc/systemd/resolved.conf' do owner 'root' group 'root' diff --git a/cookbooks/consul/files/etc/dnsmasq.conf b/cookbooks/consul/files/etc/dnsmasq.conf index b1e342a..ee53ded 100644 --- a/cookbooks/consul/files/etc/dnsmasq.conf +++ b/cookbooks/consul/files/etc/dnsmasq.conf @@ -63,7 +63,6 @@ strict-order # Add other name servers here, with domain specs if they are for # non-public domains. -#server=/localnet/192.168.0.1 server=/consul/127.0.0.1#8600 # Example of routing PTR queries to nameservers: this will send all @@ -91,7 +90,7 @@ server=/consul/127.0.0.1#8600 # server=10.1.2.3@eth1 # and this sets the source (ie local) address used to talk to -# 10.1.2.3 to 192.168.1.1 port 55 (there must be a interface with that +# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that # IP on the machine, obviously). # server=10.1.2.3@192.168.1.1#55 @@ -190,7 +189,7 @@ server=/consul/127.0.0.1#8600 # add names to the DNS for the IPv6 address of SLAAC-configured dual-stack # hosts. Use the DHCPv4 lease to derive the name, network segment and # MAC address and assume that the host will also have an -# IPv6 address calculated using the SLAAC alogrithm. +# IPv6 address calculated using the SLAAC algorithm. #dhcp-range=1234::, ra-names # Do Router Advertisements, BUT NOT DHCP for this subnet. @@ -211,7 +210,7 @@ server=/consul/127.0.0.1#8600 #dhcp-range=1234::, ra-stateless, ra-names # Do router advertisements for all subnets where we're doing DHCPv6 -# Unless overriden by ra-stateless, ra-names, et al, the router +# Unless overridden by ra-stateless, ra-names, et al, the router # advertisements will have the M and O bits set, so that the clients # get addresses and configuration from DHCPv6, and the A bit reset, so the # clients don't use SLAAC addresses. @@ -252,7 +251,7 @@ server=/consul/127.0.0.1#8600 # the IP address 192.168.0.60 #dhcp-host=id:01:02:02:04,192.168.0.60 -# Always give the Infiniband interface with hardware address +# Always give the InfiniBand interface with hardware address # 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the # ip address 192.168.0.61. The client id is derived from the prefix # ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of @@ -289,7 +288,7 @@ server=/consul/127.0.0.1#8600 # Give a fixed IPv6 address and name to client with # DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2 # Note the MAC addresses CANNOT be used to identify DHCPv6 clients. -# Note also the they [] around the IPv6 address are obilgatory. +# Note also that the [] around the IPv6 address are obligatory. #dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] # Ignore any clients which are not specified in dhcp-host lines @@ -355,11 +354,11 @@ server=/consul/127.0.0.1#8600 # Set option 58 client renewal time (T1). Defaults to half of the # lease time if not specified. (RFC2132) -#dhcp-option=option:T1:1m +#dhcp-option=option:T1,1m # Set option 59 rebinding time (T2). Defaults to 7/8 of the # lease time if not specified. (RFC2132) -#dhcp-option=option:T2:2m +#dhcp-option=option:T2,2m # Set the NTP time server address to be the same machine as # is running dnsmasq @@ -437,22 +436,22 @@ server=/consul/127.0.0.1#8600 #dhcp-option-force=211,30i # Set the boot filename for netboot/PXE. You will only need -# this is you want to boot machines over the network and you will need -# a TFTP server; either dnsmasq's built in TFTP server or an +# this if you want to boot machines over the network and you will need +# a TFTP server; either dnsmasq's built-in TFTP server or an # external one. (See below for how to enable the TFTP server.) #dhcp-boot=pxelinux.0 # The same as above, but use custom tftp-server instead machine running dnsmasq #dhcp-boot=pxelinux,server.name,192.168.1.100 -# Boot for Etherboot gPXE. The idea is to send two different -# filenames, the first loads gPXE, and the second tells gPXE what to -# load. The dhcp-match sets the gpxe tag for requests from gPXE. -#dhcp-match=set:gpxe,175 # gPXE sends a 175 option. -#dhcp-boot=tag:!gpxe,undionly.kpxe -#dhcp-boot=mybootimage +# Boot for iPXE. The idea is to send two different +# filenames, the first loads iPXE, and the second tells iPXE what to +# load. The dhcp-match sets the ipxe tag for requests from iPXE. +#dhcp-boot=undionly.kpxe +#dhcp-match=set:ipxe,175 # iPXE sends a 175 option. +#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php -# Encapsulated options for Etherboot gPXE. All the options are +# Encapsulated options for iPXE. All the options are # encapsulated within option 175 #dhcp-option=encap:175, 1, 5b # priority code #dhcp-option=encap:175, 176, 1b # no-proxydhcp @@ -526,7 +525,7 @@ server=/consul/127.0.0.1#8600 # (using /etc/hosts) then that name can be specified as the # tftp_servername (the third option to dhcp-boot) and in that # case dnsmasq resolves this name and returns the resultant IP -# addresses in round robin fasion. This facility can be used to +# addresses in round robin fashion. This facility can be used to # load balance the tftp load among a set of servers. #dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name @@ -548,6 +547,14 @@ server=/consul/127.0.0.1#8600 # http://www.isc.org/files/auth.html #dhcp-authoritative +# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039. +# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit +# option with a DHCPACK including a Rapid Commit option and fully committed address +# and configuration information. This must only be enabled if either the server is +# the only server for the subnet, or multiple servers are present and they each +# commit a binding for all clients. +#dhcp-rapid-commit + # Run an executable when a DHCP lease is created or destroyed. # The arguments sent to the script are "add" or "del", # then the MAC address, the IP address and finally the hostname @@ -665,3 +672,8 @@ server=/consul/127.0.0.1#8600 # Include all files in a directory which end in .conf #conf-dir=/etc/dnsmasq.d/,*.conf + +# If a DHCP client claims that its name is "wpad", ignore that. +# This fixes a security hole. see CERT Vulnerability VU#598349 +#dhcp-name-match=set:wpad-ignore,wpad +#dhcp-ignore-names=tag:wpad-ignore diff --git a/cookbooks/consul/files/etc/supervisor/conf.d/consul.conf b/cookbooks/consul/files/etc/supervisor/conf.d/consul.conf deleted file mode 100644 index 8e9d360..0000000 --- a/cookbooks/consul/files/etc/supervisor/conf.d/consul.conf +++ /dev/null @@ -1,9 +0,0 @@ -[program:consul] -command=/usr/local/bin/consul agent -pid-file /var/run/consul.pid -config-dir=/etc/consul.d -stdout_logfile=/var/log/supervisor/consul.log -environment=GOMAXPROC="2" -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=TERM diff --git a/cookbooks/consul/templates/etc/systemd/resolved.conf.2204.erb b/cookbooks/consul/templates/etc/systemd/resolved.conf.2204.erb new file mode 100644 index 0000000..7679a22 --- /dev/null +++ b/cookbooks/consul/templates/etc/systemd/resolved.conf.2204.erb @@ -0,0 +1,34 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Entries in this file show the compile time defaults. Local configuration +# should be created by either modifying this file, or by creating "drop-ins" in +# the resolved.conf.d/ subdirectory. The latter is generally recommended. +# Defaults can be restored by simply deleting this file and all drop-ins. +# +# Use 'systemd-analyze cat-config systemd/resolved.conf' to display the full config. +# +# See resolved.conf(5) for details. + +[Resolve] +# Some examples of DNS servers which may be used for DNS= and FallbackDNS=: +# Cloudflare: 1.1.1.1#cloudflare-dns.com 1.0.0.1#cloudflare-dns.com 2606:4700:4700::1111#cloudflare-dns.com 2606:4700:4700::1001#cloudflare-dns.com +# Google: 8.8.8.8#dns.google 8.8.4.4#dns.google 2001:4860:4860::8888#dns.google 2001:4860:4860::8844#dns.google +# Quad9: 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net +DNS=127.0.0.1 <%= @dns %> 8.8.8.8 +#FallbackDNS= +#Domains= +#DNSSEC=no +#DNSOverTLS=no +#MulticastDNS=no +#LLMNR=no +#Cache=no-negative +#CacheFromLocalhost=no +DNSStubListener=no +#DNSStubListenerExtra= +#ReadEtcHosts=yes +#ResolveUnicastSingleLabel=no diff --git a/cookbooks/promtail/files/etc/logrotate.d/promtail b/cookbooks/digdag/files/etc/logrotate.d/digdag similarity index 90% rename from cookbooks/promtail/files/etc/logrotate.d/promtail rename to cookbooks/digdag/files/etc/logrotate.d/digdag index 4ed9451..dc078ee 100644 --- a/cookbooks/promtail/files/etc/logrotate.d/promtail +++ b/cookbooks/digdag/files/etc/logrotate.d/digdag @@ -1,4 +1,4 @@ -/var/log/promtail.log +/var/log/digdag.log { rotate 4 weekly diff --git a/cookbooks/promtail/files/etc/rsyslog.d/30-promtail.conf b/cookbooks/digdag/files/etc/rsyslog.d/30-digdag.conf similarity index 68% rename from cookbooks/promtail/files/etc/rsyslog.d/30-promtail.conf rename to cookbooks/digdag/files/etc/rsyslog.d/30-digdag.conf index 9f6d43f..0a19250 100644 --- a/cookbooks/promtail/files/etc/rsyslog.d/30-promtail.conf +++ b/cookbooks/digdag/files/etc/rsyslog.d/30-digdag.conf @@ -1,5 +1,5 @@ -# Log kernel generated promtail log messages to file -:syslogtag,contains,"promtail" /var/log/promtail.log +# Log kernel generated digdag log messages to file +:syslogtag,contains,"digdag.sh" /var/log/digdag.log # Uncomment the following to stop logging anything that matches the last rule. # Doing this will stop logging kernel generated UFW log messages to the file diff --git a/cookbooks/digdag/files/etc/supervisor/conf.d/digdag.conf b/cookbooks/digdag/files/etc/supervisor/conf.d/digdag.conf deleted file mode 100644 index eaf346d..0000000 --- a/cookbooks/digdag/files/etc/supervisor/conf.d/digdag.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:digdag] -command=/etc/digdag/digdag.sh -stdout_logfile=/var/log/supervisor/digdag.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true diff --git a/cookbooks/digdag/files/etc/vector/digdag.toml b/cookbooks/digdag/files/etc/vector/digdag.toml new file mode 100644 index 0000000..42bb544 --- /dev/null +++ b/cookbooks/digdag/files/etc/vector/digdag.toml @@ -0,0 +1,57 @@ +data_dir = "/var/lib/vector/" + +[sources.digdag] + type = "file" + include = [ "/var/log/digdag.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.digdag_transform] + type = "remap" + inputs = ["digdag"] + source = ''' + . |= parse_syslog!(.message) + + .message = replace(.message, r'^\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2} \+\d{4} ', "") + + if starts_with(.message, "[") { + l = parse_regex!(.message, r'\[(?P[^\]]+)\]') + . = merge(., l) + + .level = downcase(.level) + } else { + .level = "debug" + } + ''' + +[sinks.digdag_output] +type = "file" +inputs = [ "digdag_transform" ] +compression = "none" +path = "/tmp/digdag-%Y-%m-%d.log" + + [sinks.digdag_output.encoding] + codec = "json" + + [sinks.digdag_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.digdag_loki] +type = "loki" +inputs = [ "digdag_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.digdag_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "digdag" + filename = "/var/log/digdag.log" + + [sinks.digdag_loki.encoding] + codec = "json" + + [sinks.digdag_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/digdag/files/lib/systemd/system/digdag.service b/cookbooks/digdag/files/lib/systemd/system/digdag.service new file mode 100644 index 0000000..dd0de72 --- /dev/null +++ b/cookbooks/digdag/files/lib/systemd/system/digdag.service @@ -0,0 +1,15 @@ +[Unit] +Description=digdag +Requires=network-online.target +After=network-online.target + +[Service] +Type=simple +EnvironmentFile=-/etc/default/digdag +Restart=on-failure +ExecStart=/etc/digdag/digdag.sh +KillSignal=process + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/digdag/files/lib/systemd/system/vector-digdag.service b/cookbooks/digdag/files/lib/systemd/system/vector-digdag.service new file mode 100644 index 0000000..ff7bea5 --- /dev/null +++ b/cookbooks/digdag/files/lib/systemd/system/vector-digdag.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/digdag.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=syslog +StandardError=syslog +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/digdag/setup.rb b/cookbooks/digdag/setup.rb index 2f5e920..c42f582 100644 --- a/cookbooks/digdag/setup.rb +++ b/cookbooks/digdag/setup.rb @@ -36,38 +36,53 @@ execute 'ufw reload-or-enable' do action :nothing end -# Deploy the config file for `supervisor`: -remote_file '/etc/supervisor/conf.d/digdag.conf' do +# Deploy the config file for `systemd`: +remote_file '/lib/systemd/system/digdag.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'digdag' do + action [ :enable, :restart ] +end + +# Deploy `rsyslog` config file for `digdag`: +remote_file '/etc/rsyslog.d/30-digdag.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]', :immediately end -service 'supervisor' do - action :nothing -end - -# Deploy /etc/hosts file: -HOSTNAME = run_command('uname -n').stdout.chomp - -template '/etc/promtail/digdag.yaml' do +# Deploy `logrotate` config for `digdag`: +remote_file '/etc/logrotate.d/digdag' do owner 'root' - group 'root' - mode '644' + group 'root' + mode '644' +end - variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint']) - end + +# Deploy the config file for `vector`: +remote_file '/etc/vector/digdag.toml' do + owner 'root' + group 'root' + mode '644' +end # Deploy the `systemd` configuration: -remote_file '/lib/systemd/system/promtail-digdag.service' do +remote_file '/lib/systemd/system/vector-digdag.service' do owner 'root' group 'root' mode '644' end # Service setting: -service 'promtail-digdag' do +service 'vector-digdag' do action [ :enable, :restart ] end + +service 'rsyslog' do + action [ :nothing ] +end diff --git a/cookbooks/digdag/templates/etc/promtail/digdag.yaml b/cookbooks/digdag/templates/etc/promtail/digdag.yaml deleted file mode 100644 index dcdb06c..0000000 --- a/cookbooks/digdag/templates/etc/promtail/digdag.yaml +++ /dev/null @@ -1,61 +0,0 @@ -server: - disable: true - -positions: - filename: /var/opt/promtail/promtail_digdag_position.yaml - - -clients: - - url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push - -scrape_configs: - - job_name: digdag - static_configs: - - targets: - - localhost - labels: - job: digdag - hostname: <%= @HOSTNAME %> - __path__: /var/log/supervisor/digdag.log - - pipeline_stages: - - match: - selector: '{job="digdag"} !~ "^[0-9]{4}-[0-9]{2}-[0-9]{2}"' - action: drop - - - match: - selector: '{job="digdag"} |~ "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} ERROR"' - action: drop - - - match: - selector: '{job="digdag"} !~ "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} ERROR"' - - stages: - - regex: - expression: '^(?P\d+\-\d+\-\d+ \d+:\d+:\d+)([\.\d]+)? (?P[\+\d]+) \[(?P[^\]]+)\] (?P.+)$' - - - template: - source: timestamp - template: '{{ .datetime }} {{ .timezone }}' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 -0700 - - - template: - source: level - template: '{{ if .level }}{{ .level }}{{ else }}notice{{ end }}' - - - template: - source: level - template: '{{ ToLower .level }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}' - - - labels: - level: - - - output: - source: message diff --git a/cookbooks/docker/install.rb b/cookbooks/docker/install.rb index 5839d2e..dddac9a 100644 --- a/cookbooks/docker/install.rb +++ b/cookbooks/docker/install.rb @@ -19,7 +19,7 @@ execute 'apt-get update' do not_if 'which docker' end -%w(docker-ce docker-ce-cli containerd.io).each do |p| +%w(docker-ce docker-ce-cli containerd.io docker-compose-plugin).each do |p| package p end diff --git a/cookbooks/docker/setup.rb b/cookbooks/docker/setup.rb index 422708a..79c6d63 100644 --- a/cookbooks/docker/setup.rb +++ b/cookbooks/docker/setup.rb @@ -1,4 +1,10 @@ # Ignore the certificate +directory '/etc/docker/' do + owner 'root' + group 'root' + mode '0600' +end + remote_file '/etc/docker/daemon.json' do owner 'root' group 'root' diff --git a/cookbooks/everun/attributes.rb b/cookbooks/everun/attributes.rb new file mode 100644 index 0000000..994debb --- /dev/null +++ b/cookbooks/everun/attributes.rb @@ -0,0 +1,9 @@ +# ------------------------------------------- +# Specifying the default settings: +# ------------------------------------------- +node.reverse_merge!({ + 'everun' => { + 'FQDN' => 'everun.club', + 'production' => true + } +}) diff --git a/cookbooks/everun/default.rb b/cookbooks/everun/default.rb new file mode 100644 index 0000000..dd3cbd2 --- /dev/null +++ b/cookbooks/everun/default.rb @@ -0,0 +1,5 @@ +include_recipe './attributes.rb' + +if node['everun']['production'] + include_recipe './nginx.rb' +end diff --git a/cookbooks/everun/files/etc/cron.d/everun-blog b/cookbooks/everun/files/etc/cron.d/everun-blog new file mode 100644 index 0000000..e62f6cb --- /dev/null +++ b/cookbooks/everun/files/etc/cron.d/everun-blog @@ -0,0 +1 @@ +@reboot webadm cp -pr /home/webadm/works/everun/* /var/www/everun/ diff --git a/cookbooks/everun/nginx.rb b/cookbooks/everun/nginx.rb new file mode 100644 index 0000000..8e64d95 --- /dev/null +++ b/cookbooks/everun/nginx.rb @@ -0,0 +1,38 @@ +# Create the nginx directory: +%w( everun test-everun ).each do |d| + directory "/var/www/#{d}" do + owner 'www-data' + group 'webadm' + mode '770' + end +end + +# Add the fstab entry: +file '/etc/fstab' do + action :edit + + block do |content| + content << "tmpfs /var/www/everun tmpfs size=250m,noatime 0 0\n" + end + + not_if 'grep /var/www/everun /etc/fstab' + + notifies :run, 'execute[mount -a]' +end + +execute 'mount -a' do + action :nothing +end + +remote_file '/etc/cron.d/everun-blog' do + owner 'root' + group 'root' + mode '644' +end + +# Create storage directory for blog data +directory '/home/webadm/works/everun' do + owner 'webadm' + group 'webadm' + mode '775' +end diff --git a/cookbooks/gitea/attributes.rb b/cookbooks/gitea/attributes.rb deleted file mode 100644 index 618852a..0000000 --- a/cookbooks/gitea/attributes.rb +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------- -# Specifying the default settings: -# ------------------------------------------- -node.reverse_merge!({ - 'gitea' => { - 'url' => 'https://github.com/go-gitea/gitea/releases/download/', - 'prefix' => 'gitea-', - 'postfix' => '-linux-amd64', - 'storage' => '/opt/gitea/', - 'location' => '/usr/local/bin/' - }, - 'go-mmproxy' => { - 'url' => 'https://github.com/path-network/go-mmproxy/releases/', - 'bin_url' => 'https://github.com/path-network/go-mmproxy/releases/download/2.0/go-mmproxy-2.0-centos8-x86_64', - 'storage' => '/opt/go-mmproxy/', - 'location' => '/usr/local/bin/' - }, -}) diff --git a/cookbooks/gitea/default.rb b/cookbooks/gitea/default.rb deleted file mode 100644 index c7465cf..0000000 --- a/cookbooks/gitea/default.rb +++ /dev/null @@ -1,10 +0,0 @@ -# Loading the attributes: -include_recipe './attributes.rb' - -# Install: -include_recipe './install.rb' -include_recipe './install-go-mmproxy.rb' - -# Setup: -include_recipe './setup.rb' -include_recipe './setup-go-mmproxy.rb' diff --git a/cookbooks/gitea/files/etc/consul.d/service-go-mmproxy.json b/cookbooks/gitea/files/etc/consul.d/service-go-mmproxy.json deleted file mode 100644 index f45b065..0000000 --- a/cookbooks/gitea/files/etc/consul.d/service-go-mmproxy.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "service": { - "name": "go-mmproxy", - "port": 50021, - "check":{ - "tcp": "localhost:50021", - "interval": "60s", - "timeout": "1s", - "success_before_passing": 3 - } - } -} diff --git a/cookbooks/gitea/files/etc/gitea/app.ini b/cookbooks/gitea/files/etc/gitea/app.ini deleted file mode 100644 index b5f7ba0..0000000 --- a/cookbooks/gitea/files/etc/gitea/app.ini +++ /dev/null @@ -1,78 +0,0 @@ -APP_NAME = Gitea: Git with a cup of tea -RUN_USER = git -RUN_MODE = prod - -[oauth2] -JWT_SECRET = Cyb3GmSaoJpkaHhA5X6wiNCK7KsngKEr6w_v37WZ1a4 - -[security] -INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE1NjMzNzYxNzR9.Z8_xg9eBZt8fSMTQLQB2xxGUx7GB5M3_v_Tsc441LOg -INSTALL_LOCK = true -SECRET_KEY = Br3eWgKaVIvM2TiHgvRnDbeZTSvBeVxSOS2VbjsPiyZ8Egigqre4dq0ZqaIKoxlB - -[database] -DB_TYPE = mysql -HOST = 192.168.10.200:3307 -NAME = gitea -USER = root -PASSWD = Holiday88 -SSL_MODE = disable -PATH = /var/lib/gitea/data/gitea.db - -[repository] -ROOT = /var/lib/git - -[server] -SSH_DOMAIN = gitea.kazu634.com -DOMAIN = gitea.kazu634.com -HTTP_PORT = 3000 -ROOT_URL = https://gitea.kazu634.com/ -DISABLE_SSH = false -SSH_PORT = 50022 -LFS_START_SERVER = true -LFS_CONTENT_PATH = /var/lib/gitea/data/lfs -LFS_JWT_SECRET = hcxZi2iadhyYTdRtAOJXXWPckR-lK2rFHPCbA1isvV0 -OFFLINE_MODE = false - -[mailer] -ENABLED = false - -[service] -REGISTER_EMAIL_CONFIRM = false -ENABLE_NOTIFY_MAIL = false -DISABLE_REGISTRATION = true -ALLOW_ONLY_EXTERNAL_REGISTRATION = false -ENABLE_CAPTCHA = true -REQUIRE_SIGNIN_VIEW = false -DEFAULT_KEEP_EMAIL_PRIVATE = false -DEFAULT_ALLOW_CREATE_ORGANIZATION = true -DEFAULT_ENABLE_TIMETRACKING = true -NO_REPLY_ADDRESS = noreply.example.org - -[picture] -DISABLE_GRAVATAR = false -ENABLE_FEDERATED_AVATAR = true - -[openid] -ENABLE_OPENID_SIGNIN = false -ENABLE_OPENID_SIGNUP = false - -[session] -PROVIDER = file - -[log] -MODE = file -LEVEL = Info -ROOT_PATH = /var/lib/gitea/log - -[other] -SHOW_FOOTER_VERSION = false - -[attachment] -ENABLED = true -ALLOWED_TYPES = */* -MAX_SIZE = 1024 -MAX_FILES = 25 - -[metrics] -ENABLED = true diff --git a/cookbooks/gitea/files/etc/lsyncd/lsyncd.conf.lua b/cookbooks/gitea/files/etc/lsyncd/lsyncd.conf.lua deleted file mode 100644 index fea517a..0000000 --- a/cookbooks/gitea/files/etc/lsyncd/lsyncd.conf.lua +++ /dev/null @@ -1,26 +0,0 @@ -settings { - logfile = "/var/log/lsyncd/lsyncd.log", - statusFile = "/var/log/lsyncd/lsyncd.status", - statusInterval = 20, - nodaemon = false -} - -sync { - default.rsync, - source = "/var/lib/git/", - target = "admin@192.168.10.200:/volume1/Shared/AppData/gitea/git/", - rsync = { - archive = true, - compress = true - } -} - -sync { - default.rsync, - source = "/var/lib/gitea/", - target = "admin@192.168.10.200:/volume1/Shared/AppData/gitea/gitea-data/", - rsync = { - archive = true, - compress = true - } -} diff --git a/cookbooks/gitea/files/etc/supervisor/conf.d/gitea.conf b/cookbooks/gitea/files/etc/supervisor/conf.d/gitea.conf deleted file mode 100644 index d251545..0000000 --- a/cookbooks/gitea/files/etc/supervisor/conf.d/gitea.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:gitea] -command=/usr/local/bin/gitea web -c /etc/gitea/app.ini -user=git -stdout_logfile=/var/log/supervisor/gitea.log -environment=GITEA_WORK_DIR="/var/lib/gitea/", HOME="/home/git", USER="git" -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=TERM diff --git a/cookbooks/gitea/files/etc/systemd/system/go-mmproxy.service b/cookbooks/gitea/files/etc/systemd/system/go-mmproxy.service deleted file mode 100644 index cc14caf..0000000 --- a/cookbooks/gitea/files/etc/systemd/system/go-mmproxy.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=go-mmproxy -After=network.target - -[Service] -Type=simple -LimitNOFILE=65535 -ExecStartPost=/sbin/ip rule add from 127.0.0.1/8 iif lo table 123 -ExecStartPost=/sbin/ip route add local 0.0.0.0/0 dev lo table 123 -ExecStart=/usr/local/bin/go-mmproxy -l 0.0.0.0:50021 -4 127.0.0.1:10022 -v 2 -ExecStopPost=/sbin/ip rule del from 127.0.0.1/8 iif lo table 123 -ExecStopPost=/sbin/ip route del local 0.0.0.0/0 dev lo table 123 -Restart=on-failure -RestartSec=10s - -[Install] -WantedBy=multi-user.target diff --git a/cookbooks/gitea/files/etc/systemd/system/promtail-gitea.service b/cookbooks/gitea/files/etc/systemd/system/promtail-gitea.service deleted file mode 100644 index e1380bd..0000000 --- a/cookbooks/gitea/files/etc/systemd/system/promtail-gitea.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Grafana Promtail -Documentation=https://github.com/grafana/loki -After=network-online.target - -[Service] -User=root -Restart=always -ExecStart=/usr/local/bin/promtail --config.file=/etc/promtail/gitea.yaml - -[Install] -WantedBy=multi-user.target diff --git a/cookbooks/gitea/files/home/git/.ssh/authorized_keys b/cookbooks/gitea/files/home/git/.ssh/authorized_keys deleted file mode 100644 index c7d6cab..0000000 --- a/cookbooks/gitea/files/home/git/.ssh/authorized_keys +++ /dev/null @@ -1,6 +0,0 @@ -# gitea public key -command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-4",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKaziOfXcs96+p5WU67S/v3RD2HvuRN9iqROba8REj3fZygCrVHnboF6I3O5fmO7FXh2Nj8iLW/aQT0LxondM2hch67g6D4sM4qcshriYYRfMHTc+w7jVE6bhzpl78kCUM/Scy/IwCXqMNwWDoji8Yt2MMIBsAoUPhP1DdseHsBpxXDtKVcaHy35SM+uEsl34yvcXiobitYtrclxI8D7AiRHQ77VoHzlv8m93WFKBYlJ4JbtaQpVPncpJzcqhs1gD0eIHCHHF8xg8VsrDyiWVBoh+4ixnr+HYUbhRRBalvDuGdgFdccDt1RIWWrlZNelRecR1LNgyvWL5x9H/4YMh9 WorkingCopy@KazuhirosiPad-24032019 -# gitea public key -command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-5",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxKUdftBP05WHbz2wIbYqhgYpmvR/tcIrnVngM2vH3hvbFfS6Es2TBswqTml5+gRzyZrjaii3rJaNfQxcXEfW8lPHzp3weMDBgNrcuVby5Nix5N7EeEoPZyzPk1BvpzoIudE/zIO++ttpTIS3uMBLcqCny4M/mY8IHiLs/c1osP7nQ1QA96xBHTk3xxr9vVbVyCI68uQ79aumJbhP/nKO068HmBJ5M+4kRLNQ6US6dvd8/zbf2tyi0SqCJcLrUvF2AINlIc9T3oApftYdrcZpNeexQdb4HYkH4lwQg4oWbCMH/iDgc8KLJR21nXLZZrVkbSxcDvwcYsMeGwZrVOpuR Chef -# gitea public key -command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-9",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuib90/h5aKtL411mOOTe7Ja5udeSTfF9mmTVuVsw5TEbOZPEI0O+PWuCCg6DKHVtAb0POoBjg+I8V4NS2VGIAur0mUyKIY7Zadk/3Y/jdbEtS0IGMwdJOgfTEBFvNNXhx+di3cUPTOvSBlnXpIi3vMetzOmqXvb285RUTcPlmLQsdpnJGcShnWIwUXKqWwQB5QZ8MREPgdGedON4yyWsOIrCVJJjBRCwyWCbLQTNE6TKoWKauabPtNgdqFFcBmp6NYfR8Ob2qp0RVq2vi8FFxoEaFFbJUHlJIbiInVypPf3zwpXx8Gdw+Rr7Hs8YAGCjEqE8J8ZI0iXDhaE4HcrQPQ== kazu634@macbookpro.local diff --git a/cookbooks/gitea/install-go-mmproxy.rb b/cookbooks/gitea/install-go-mmproxy.rb deleted file mode 100644 index 5ccf64c..0000000 --- a/cookbooks/gitea/install-go-mmproxy.rb +++ /dev/null @@ -1,29 +0,0 @@ -# Download: -TMP = "/tmp/go-mmproxy" - -execute "wget #{node['go-mmproxy']['bin_url']} -O #{TMP}" do - not_if "test -e #{node['go-mmproxy']['storage']}/go-mmproxy" -end - -# Install: -directory node['go-mmproxy']['storage'] do - owner 'root' - group 'root' - mode '755' -end - -execute "mv #{TMP} #{node['go-mmproxy']['storage']}/go-mmproxy" do - not_if "test -e #{node['go-mmproxy']['storage']}/go-mmproxy" -end - -# Change Owner and Permissions: -file "#{node['go-mmproxy']['storage']}/go-mmproxy" do - owner 'root' - group 'root' - mode '755' -end - -# Create Link -link "#{node['go-mmproxy']['location']}/go-mmproxy" do - to "#{node['go-mmproxy']['storage']}/go-mmproxy" -end diff --git a/cookbooks/gitea/install.rb b/cookbooks/gitea/install.rb deleted file mode 100644 index 433914e..0000000 --- a/cookbooks/gitea/install.rb +++ /dev/null @@ -1,55 +0,0 @@ -gitea_url = '' -gitea_bin = '' - -vtag = '' -tag = '' - -# Calculate the Download URL: -begin - require 'net/http' - - uri = URI.parse('https://github.com/go-gitea/gitea/releases/latest') - - Timeout.timeout(3) do - response = Net::HTTP.get_response(uri) - - vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} - tag = vtag.sub(/^v/, '') - - gitea_bin = "#{node['gitea']['prefix']}#{tag}#{node['gitea']['postfix']}" - gitea_url = "#{node['gitea']['url']}/#{vtag}/#{gitea_bin}" - end -rescue - # Abort the chef client process: - raise 'Cannot connect to http://github.com.' -end - -# バージョン確認して、アップデート必要かどうか確認 -result = run_command("gitea --version 2>&1 | grep #{tag}", error: false) -if result.exit_status != 0 - # Download: - TMP = "/tmp/#{gitea_bin}" - - execute "wget #{gitea_url} -O #{TMP}" - - # Install: - directory node['gitea']['storage'] do - owner 'root' - group 'root' - mode '755' - end - - execute "mv #{TMP} #{node['gitea']['storage']}/gitea" - - # Change Owner and Permissions: - file "#{node['gitea']['storage']}/gitea" do - owner 'root' - group 'root' - mode '755' - end - - # Create Link - link "#{node['gitea']['location']}/gitea" do - to "#{node['gitea']['storage']}/gitea" - end -end diff --git a/cookbooks/gitea/setup-go-mmproxy.rb b/cookbooks/gitea/setup-go-mmproxy.rb deleted file mode 100644 index 2006378..0000000 --- a/cookbooks/gitea/setup-go-mmproxy.rb +++ /dev/null @@ -1,43 +0,0 @@ -# Deploy `supervisord` config`: -remote_file '/etc/systemd/system/go-mmproxy.service' do - owner 'root' - group 'root' - mode '644' - - notifies :restart, 'service[go-mmproxy]' -end - -service 'go-mmproxy' do - action [ :enable, :restart ] -end - -# Depoy `consul` service configuration for `gitea`: -remote_file '/etc/consul.d/service-go-mmproxy.json' do - owner 'consul' - group 'consul' - mode '644' - - notifies :reload, 'service[consul]' -end - -service 'consul' do - action :nothing -end - -# Firewall settings here: -%w( 50021/tcp ).each do |p| - execute "ufw allow #{p}" do - user 'root' - - not_if "LANG=c ufw status | grep #{p}" - - notifies :run, 'execute[ufw reload-or-enable]' - end -end - -execute 'ufw reload-or-enable' do - user 'root' - command 'LANG=C ufw reload | grep skipping && ufw --force enable || exit 0' - - action :nothing -end diff --git a/cookbooks/gitea/setup.rb b/cookbooks/gitea/setup.rb deleted file mode 100644 index 2bf1cbe..0000000 --- a/cookbooks/gitea/setup.rb +++ /dev/null @@ -1,135 +0,0 @@ -# Create `git` user: -user 'git' do - create_home true - home '/home/git/' - - system_user true - - shell '/bin/bash' -end - -directory '/home/git/.ssh/' do - owner 'git' - group 'git' - mode '0700' -end - -remote_file '/home/git/.ssh/authorized_keys' do - owner 'git' - group 'git' - mode '0600' -end - -# Create `/etc/gitea/`: -%w(/etc/gitea).each do |d| - directory d do - owner 'root' - group 'root' - mode '0755' - end -end - -%w(/var/lib/git /var/lib/gitea).each do |d| - directory d do - owner 'git' - group 'git' - mode '0755' - end -end - -execute 'rsync -vrz --delete admin@192.168.10.200:/volume1/Shared/AppData/gitea/gitea-data/ /var/lib/gitea/' do - not_if 'test -e /var/lib/gitea/log' -end - -execute 'rsync -vrz --delete admin@192.168.10.200:/volume1/Shared/AppData/gitea/git/ /var/lib/git/' do - not_if 'test -e /var/lib/git/kazu634/' -end - -execute 'chown -R git:git /var/lib/gitea/' -execute 'chown -R git:git /var/lib/git/' - -# Deploy `app.ini`: -remote_file '/etc/gitea/app.ini' do - owner 'git' - group 'git' - mode '644' -end - -# Deploy `supervisord` config`: -remote_file '/etc/supervisor/conf.d/gitea.conf' do - owner 'root' - group 'root' - mode '644' - - notifies :restart, 'service[supervisor]' -end - -service 'supervisor' do - action :nothing -end - -# Depoy `consul` service configuration for `gitea`: -remote_file '/etc/consul.d/service-gitea.json' do - owner 'consul' - group 'consul' - mode '644' - - notifies :reload, 'service[consul]' -end - -service 'consul' do - action :nothing -end - -# Depoy `promtail` configuration for `gitea`: -template '/etc/promtail/gitea.yaml' do - owner 'root' - group 'root' - mode '644' - - variables(HOSTNAME: node[:hostname], LOKIENDPOINT: node['promtail']['lokiendpoint']) - - notifies :restart, 'service[promtail-gitea]' -end - -# Deploy `systemd` configuration for `promtail-gitea`: -remote_file '/etc/systemd/system/promtail-gitea.service' do - owner 'root' - group 'root' - mode '644' -end - -# Service setting: -service 'promtail-gitea' do - action [ :enable, :restart ] -end - -# Deploy `systemd` configuration for `promtail-gitea`: -remote_file '/etc/lsyncd/lsyncd.conf.lua' do - owner 'root' - group 'root' - mode '644' -end - -# Service setting: -service 'lsyncd' do - action [ :enable, :restart ] -end - -# Firewall settings here: -%w( 3000/tcp ).each do |p| - execute "ufw allow #{p}" do - user 'root' - - not_if "LANG=c ufw status | grep #{p}" - - notifies :run, 'execute[ufw reload-or-enable]' - end -end - -execute 'ufw reload-or-enable' do - user 'root' - command 'LANG=C ufw reload | grep skipping && ufw --force enable || exit 0' - - action :nothing -end diff --git a/cookbooks/gitea/templates/etc/promtail/gitea.yaml b/cookbooks/gitea/templates/etc/promtail/gitea.yaml deleted file mode 100644 index 48d7dd0..0000000 --- a/cookbooks/gitea/templates/etc/promtail/gitea.yaml +++ /dev/null @@ -1,61 +0,0 @@ -server: - disable: true - -positions: - filename: /var/opt/promtail/promtail_gitea_position.yaml - -clients: - - url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push - -scrape_configs: - - job_name: gitea - static_configs: - - targets: - - localhost - labels: - job: gitea - hostname: <%= @HOSTNAME %> - vhost: gitea.kazu634.com - __path__: /var/log/supervisor/gitea.log - - pipeline_stages: - - match: - selector: '{job="gitea"}' - - stages: - - drop: - expression: "(Static|robots.txt|sitemap.xml)" - - - regex: - expression: '^\[Macaron\] (?P[0-9]+\-[0-9]+\-[0-9]+ +[0-9]+:[0-9]+:[0-9]+): (?P[^\/]+) (?P\/[^ ]*) (?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 - location: Asia/Tokyo - - - template: - source: message - template: '{{ .message1 }} {{ .uri }} ({{ .message2 }})' - - - template: - source: level - template: '{{ .response }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "(2[0-9]+|3[0-9]+|for)" .Value "info" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "4[0-9]+" .Value "warning" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "5[0-9]+" .Value "error" }}' - - - labels: - level: - - - output: - source: message diff --git a/cookbooks/grafana/files/etc/grafana/grafana.ini b/cookbooks/grafana/files/etc/grafana/grafana.ini index 78656d4..efa4350 100644 --- a/cookbooks/grafana/files/etc/grafana/grafana.ini +++ b/cookbooks/grafana/files/etc/grafana/grafana.ini @@ -9,6 +9,9 @@ # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty ;instance_name = ${HOSTNAME} +# force migration will run migrations that might cause dataloss +;force_migration = false + #################################### Paths #################################### [paths] # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) @@ -64,9 +67,25 @@ ;cert_file = ;cert_key = +# Unix socket gid +# Changing the gid of a file without privileges requires that the target group is in the group of the process and that the process is the file owner +# It is recommended to set the gid as http server user gid +# Not set when the value is -1 +;socket_gid = + +# Unix socket mode +;socket_mode = + # Unix socket path ;socket = +# CDN Url +;cdn_url = + +# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections. +# `0` means there is no timeout for reading the request. +;read_timeout = 0 + #################################### Database #################################### [database] # You can configure the database connection by specifying type, host, name, user and password @@ -84,9 +103,16 @@ password = 123qwe$%&RTY # Example: mysql://user:secret@host:port/database ;url = -# For "postgres" only, either "disable", "require" or "verify-full" +# For "postgres", use either "disable", "require" or "verify-full" +# For "mysql", use either "true", "false", or "skip-verify". ;ssl_mode = disable +# Database drivers may support different transaction isolation levels. +# Currently, only "mysql" driver supports isolation levels. +# If the value is empty - driver's default isolation level is applied. +# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE". +;isolation_level = + ;ca_cert_path = ;client_key_path = ;client_cert_path = @@ -110,6 +136,20 @@ password = 123qwe$%&RTY # For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared) ;cache_mode = private +# For "mysql" only if migrationLocking feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0. +;locking_attempt_timeout_sec = 0 + +# For "sqlite" only. How many times to retry query in case of database is locked failures. Default is 0 (disabled). +;query_retries = 0 + +# For "sqlite" only. How many times to retry transaction in case of database is locked failures. Default is 5. +;transaction_retries = 5 + +################################### Data sources ######################### +[datasources] +# Upper limit of data sources that Grafana will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API. +;datasource_limit = 5000 + #################################### Cache server ############################# [remote_cache] # Either "redis", "memcached" or "database" default is "database" @@ -127,10 +167,13 @@ password = 123qwe$%&RTY # This enables data proxy logging, default is false ;logging = false -# How long the data proxy waits before timing out, default is 30 seconds. +# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds. # This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set. ;timeout = 30 +# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds. +;dialTimeout = 10 + # How many seconds the data proxy waits before sending a keepalive probe request. ;keep_alive_seconds = 30 @@ -143,6 +186,11 @@ password = 123qwe$%&RTY # waiting for the server to approve. ;expect_continue_timeout_seconds = 1 +# Optionally limits the total number of connections per host, including connections in the dialing, +# active, and idle states. On limit violation, dials will block. +# A value of zero (0) means no limit. +;max_conns_per_host = 0 + # The maximum number of idle connections that Grafana will keep alive. ;max_idle_connections = 100 @@ -152,6 +200,12 @@ password = 123qwe$%&RTY # If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false. ;send_user_header = false +# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests. +;response_limit = 0 + +# Limits the number of rows that Grafana will process from SQL data sources. +;row_limit = 1000000 + #################################### Analytics #################################### [analytics] # Server reporting, sends usage counters to stats.grafana.org every 24 hours. @@ -160,19 +214,50 @@ password = 123qwe$%&RTY # Change this option to false to disable reporting. ;reporting_enabled = true -# Set to false to disable all checks to https://grafana.net -# for new versions (grafana itself and plugins), check is used -# in some UI views to notify that grafana or plugin update exists +# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs +;reporting_distributor = grafana-labs + +# Set to false to disable all checks to https://grafana.com +# for new versions of grafana. The check is used +# in some UI views to notify that a grafana update exists. # This option does not cause any auto updates, nor send any information -# only a GET request to http://grafana.com to get latest versions +# only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version. ;check_for_updates = true +# Set to false to disable all checks to https://grafana.com +# for new versions of plugins. The check is used +# in some UI views to notify that a plugin update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://grafana.com to get the latest versions. +;check_for_plugin_updates = true + # Google Analytics universal tracking code, only enabled if you specify an id here ;google_analytics_ua_id = +# Google Analytics 4 tracking code, only enabled if you specify an id here +;google_analytics_4_id = + +# When Google Analytics 4 Enhanced event measurement is enabled, we will try to avoid sending duplicate events and let Google Analytics 4 detect navigation changes, etc. +;google_analytics_4_send_manual_page_views = false + # Google Tag Manager ID, only enabled if you specify an id here ;google_tag_manager_id = +# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set +;rudderstack_write_key = + +# Rudderstack data plane url, enabled only if rudderstack_write_key is also set +;rudderstack_data_plane_url = + +# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set +;rudderstack_sdk_url = + +# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config +;rudderstack_config_url = + +# Controls if the UI contains any links to user feedback forms +;feedback_links_enabled = true + #################################### Security #################################### [security] # disable creation of admin user on first start of grafana @@ -184,9 +269,18 @@ password = 123qwe$%&RTY # default admin password, can be changed before first start of grafana, or in profile settings ;admin_password = admin +# default admin email, created on startup +;admin_email = admin@localhost + # used for signing ;secret_key = SW2YcwTIb9zpOOhoPsMm +# current key provider used for envelope encryption, default to static value specified by secret_key +;encryption_provider = secretKey.v1 + +# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1 +;available_encryption_providers = + # disable gravatar profile images ;disable_gravatar = false @@ -206,7 +300,6 @@ password = 123qwe$%&RTY ;allow_embedding = false # Set to true if you want to enable http strict transport security (HSTS) response header. -# This is only sent when HTTPS is enabled in this configuration. # HSTS tells browsers that the site should only be accessed using HTTPS. ;strict_transport_security = false @@ -228,12 +321,39 @@ password = 123qwe$%&RTY # when they detect reflected cross-site scripting (XSS) attacks. ;x_xss_protection = true +# Enable adding the Content-Security-Policy header to your requests. +# CSP allows to control resources the user agent is allowed to load and helps prevent XSS attacks. +;content_security_policy = false + +# Set Content Security Policy template used when adding the Content-Security-Policy header to your requests. +# $NONCE in the template includes a random nonce. +# $ROOT_PATH is server.root_url without the protocol. +;content_security_policy_template = """script-src 'self' 'unsafe-eval' 'unsafe-inline' 'strict-dynamic' $NONCE;object-src 'none';font-src 'self';style-src 'self' 'unsafe-inline' blob:;img-src * data:;base-uri 'self';connect-src 'self' grafana.com ws://$ROOT_PATH wss://$ROOT_PATH;manifest-src 'self';media-src 'none';form-action 'self';""" + +# Controls if old angular plugins are supported or not. This will be disabled by default in future release +;angular_support_enabled = true + +# List of additional allowed URLs to pass by the CSRF check, separated by spaces. Suggested when authentication comes from an IdP. +;csrf_trusted_origins = example.com + +# List of allowed headers to be set by the user, separated by spaces. Suggested to use for if authentication lives behind reverse proxies. +;csrf_additional_headers = + +[security.encryption] +# Defines the time-to-live (TTL) for decrypted data encryption keys stored in memory (cache). +# Please note that small values may cause performance issues due to a high frequency decryption operations. +;data_keys_cache_ttl = 15m + +# Defines the frequency of data encryption keys cache cleanup interval. +# On every interval, decrypted data encryption keys that reached the TTL are removed from the cache. +;data_keys_cache_cleanup_interval = 1m + #################################### Snapshots ########################### [snapshots] # snapshot sharing options ;external_enabled = true -;external_snapshot_url = https://snapshots-origin.raintank.io -;external_snapshot_name = Publish to snapshot.raintank.io +;external_snapshot_url = https://snapshots.raintank.io +;external_snapshot_name = Publish to snapshots.raintank.io # Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for # creating and deleting snapshots. @@ -281,6 +401,12 @@ password = 123qwe$%&RTY # Default UI theme ("dark" or "light") ;default_theme = dark +# Default locale (supported IETF language tag, such as en-US) +;default_locale = en-US + +# Path to a custom home page. Users are only redirected to this if the default home dashboard is used. It should match a frontend route and contain a leading slash. +;home_page = + # External user management, these options affect the organization users view ;external_manage_link_url = ;external_manage_link_name = @@ -295,10 +421,16 @@ password = 123qwe$%&RTY # The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes). ;user_invite_max_lifetime_duration = 24h +# Enter a comma-separated list of users login to hide them in the Grafana UI. These users are shown to Grafana admins and themselves. +; hidden_users = + [auth] # Login cookie name ;login_cookie_name = grafana_session +# Disable usage of Grafana build-in login solution. +;disable_login = false + # The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation. ;login_maximum_inactive_lifetime_duration = @@ -311,7 +443,7 @@ password = 123qwe$%&RTY # Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false ;disable_login_form = false -# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false +# Set to true to disable the sign out link in the side menu. Useful if you use auth.proxy or auth.jwt, defaults to false ;disable_signout_menu = false # URL to redirect the user to after sign out @@ -324,12 +456,21 @@ password = 123qwe$%&RTY # OAuth state max age cookie duration in seconds. Defaults to 600 seconds. ;oauth_state_cookie_max_age = 600 +# Skip forced assignment of OrgID 1 or 'auto_assign_org_id' for social logins +;oauth_skip_org_role_update_sync = false + # limit of api_key seconds to live before expiration ;api_key_max_seconds_to_live = -1 # Set to true to enable SigV4 authentication option for HTTP-based datasources. ;sigv4_auth_enabled = false +# Set to true to enable verbose logging of SigV4 request signing +;sigv4_verbose_logging = false + +# Set to true to enable Azure authentication option for HTTP-based datasources. +;azure_auth_enabled = false + #################################### Anonymous Auth ###################### [auth.anonymous] # enable anonymous access @@ -357,6 +498,9 @@ password = 123qwe$%&RTY ;allowed_domains = ;team_ids = ;allowed_organizations = +;role_attribute_path = +;role_attribute_strict = false +;allow_assign_grafana_admin = false #################################### GitLab Auth ######################### [auth.gitlab] @@ -370,6 +514,9 @@ password = 123qwe$%&RTY ;api_url = https://gitlab.com/api/v4 ;allowed_domains = ;allowed_groups = +;role_attribute_path = +;role_attribute_strict = false +;allow_assign_grafana_admin = false #################################### Google Auth ########################## [auth.google] @@ -405,6 +552,8 @@ password = 123qwe$%&RTY ;token_url = https://login.microsoftonline.com//oauth2/v2.0/token ;allowed_domains = ;allowed_groups = +;role_attribute_strict = false +;allow_assign_grafana_admin = false #################################### Okta OAuth ####################### [auth.okta] @@ -420,6 +569,8 @@ password = 123qwe$%&RTY ;allowed_domains = ;allowed_groups = ;role_attribute_path = +;role_attribute_strict = false +;allow_assign_grafana_admin = false #################################### Generic OAuth ########################## [auth.generic_oauth] @@ -429,21 +580,30 @@ password = 123qwe$%&RTY ;client_id = some_id ;client_secret = some_secret ;scopes = user:email,read:org +;empty_scopes = false ;email_attribute_name = email:primary ;email_attribute_path = ;login_attribute_path = +;name_attribute_path = ;id_token_attribute_name = ;auth_url = https://foo.bar/login/oauth/authorize ;token_url = https://foo.bar/login/oauth/access_token ;api_url = https://foo.bar/user +;teams_url = ;allowed_domains = ;team_ids = ;allowed_organizations = ;role_attribute_path = +;role_attribute_strict = false +;groups_attribute_path = +;team_ids_attribute_path = ;tls_skip_verify_insecure = false ;tls_client_cert = ;tls_client_key = ;tls_client_ca = +;use_pkce = false +;auth_style = +;allow_assign_grafana_admin = false #################################### Basic Auth ########################## [auth.basic] @@ -458,20 +618,70 @@ password = 123qwe$%&RTY ;sync_ttl = 60 ;whitelist = 192.168.1.1, 192.168.2.1 ;headers = Email:X-User-Email, Name:X-User-Name +# Non-ASCII strings in header values are encoded using quoted-printable encoding +;headers_encoded = false # Read the auth proxy docs for details on what the setting below enables ;enable_login_token = false +#################################### Auth JWT ########################## +[auth.jwt] +;enabled = true +;header_name = X-JWT-Assertion +;email_claim = sub +;username_claim = sub +;jwk_set_url = https://foo.bar/.well-known/jwks.json +;jwk_set_file = /path/to/jwks.json +;cache_ttl = 60m +;expect_claims = {"aud": ["foo", "bar"]} +;key_file = /path/to/key/file +;role_attribute_path = +;role_attribute_strict = false +;auto_sign_up = false +;url_login = false +;allow_assign_grafana_admin = false + #################################### Auth LDAP ########################## [auth.ldap] ;enabled = false ;config_file = /etc/grafana/ldap.toml ;allow_sign_up = true +# prevent synchronizing ldap users organization roles +;skip_org_role_sync = false -# LDAP backround sync (Enterprise only) +# LDAP background sync (Enterprise only) # At 1 am every day -;sync_cron = "0 0 1 * * *" +;sync_cron = "0 1 * * *" ;active_sync_enabled = true +#################################### AWS ########################### +[aws] +# Enter a comma-separated list of allowed AWS authentication providers. +# Options are: default (AWS SDK Default), keys (Access && secret key), credentials (Credentials field), ec2_iam_role (EC2 IAM Role) +; allowed_auth_providers = default,keys,credentials + +# Allow AWS users to assume a role using temporary security credentials. +# If true, assume role will be enabled for all AWS authentication providers that are specified in aws_auth_providers +; assume_role_enabled = true + +#################################### Azure ############################### +[azure] +# Azure cloud environment where Grafana is hosted +# Possible values are AzureCloud, AzureChinaCloud, AzureUSGovernment and AzureGermanCloud +# Default value is AzureCloud (i.e. public cloud) +;cloud = AzureCloud + +# Specifies whether Grafana hosted in Azure service with Managed Identity configured (e.g. Azure Virtual Machines instance) +# If enabled, the managed identity can be used for authentication of Grafana in Azure services +# Disabled by default, needs to be explicitly enabled +;managed_identity_enabled = false + +# Client ID to use for user-assigned managed identity +# Should be set for user-assigned identity and should be empty for system-assigned identity +;managed_identity_client_id = + +#################################### Role-based Access Control ########### +[rbac] +;permission_cache = true #################################### SMTP / Emailing ########################## [smtp] ;enabled = false @@ -491,7 +701,8 @@ password = 123qwe$%&RTY [emails] ;welcome_email_on_sign_up = false -;templates_pattern = emails/*.html +;templates_pattern = emails/*.html, emails/*.txt +;content_types = text/html #################################### Logging ########################## [log] @@ -550,6 +761,40 @@ password = 123qwe$%&RTY # Syslog tag. By default, the process' argv[0] is used. ;tag = +[log.frontend] +# Should Sentry javascript agent be initialized +;enabled = false + +# Defines which provider to use, default is Sentry +;provider = sentry + +# Sentry DSN if you want to send events to Sentry. +;sentry_dsn = + +# Custom HTTP endpoint to send events captured by the Sentry agent to. Default will log the events to stdout. +;custom_endpoint = /log + +# Rate of events to be reported between 0 (none) and 1 (all), float +;sample_rate = 1.0 + +# Requests per second limit enforced an extended period, for Grafana backend log ingestion endpoint (/log). +;log_endpoint_requests_per_second_limit = 3 + +# Max requests accepted per short interval of time for Grafana backend log ingestion endpoint (/log). +;log_endpoint_burst_limit = 15 + +# Should error instrumentation be enabled, only affects Grafana Javascript Agent +;instrumentations_errors_enabled = true + +# Should console instrumentation be enabled, only affects Grafana Javascript Agent +;instrumentations_console_enabled = false + +# Should webvitals instrumentation be enabled, only affects Grafana Javascript Agent +;instrumentations_webvitals_enabled = false + +# Api Key, only applies to Grafana Javascript Agent provider +;api_key = testApiKey + #################################### Usage Quotas ######################## [quota] ; enabled = false @@ -567,6 +812,9 @@ password = 123qwe$%&RTY # limit number of api_keys per Org. ; org_api_key = 10 +# limit number of alerts per Org. +;org_alert_rule = 100 + # limit number of orgs a user can create. ; user_org = 10 @@ -585,11 +833,75 @@ password = 123qwe$%&RTY # global limit on number of logged in users. ; global_session = -1 +# global limit of alerts +;global_alert_rule = -1 + +#################################### Unified Alerting #################### +[unified_alerting] +#Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed.``` +;enabled = true + +# Comma-separated list of organization IDs for which to disable unified alerting. Only supported if unified alerting is enabled. +;disabled_orgs = + +# Specify the frequency of polling for admin config changes. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;admin_config_poll_interval = 60s + +# Specify the frequency of polling for Alertmanager config changes. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;alertmanager_config_poll_interval = 60s + +# Listen address/hostname and port to receive unified alerting messages for other Grafana instances. The port is used for both TCP and UDP. It is assumed other Grafana instances are also running on the same port. The default value is `0.0.0.0:9094`. +;ha_listen_address = "0.0.0.0:9094" + +# Listen address/hostname and port to receive unified alerting messages for other Grafana instances. The port is used for both TCP and UDP. It is assumed other Grafana instances are also running on the same port. The default value is `0.0.0.0:9094`. +;ha_advertise_address = "" + +# Comma-separated list of initial instances (in a format of host:port) that will form the HA cluster. Configuring this setting will enable High Availability mode for alerting. +;ha_peers = "" + +# Time to wait for an instance to send a notification via the Alertmanager. In HA, each Grafana instance will +# be assigned a position (e.g. 0, 1). We then multiply this position with the timeout to indicate how long should +# each instance wait before sending the notification to take into account replication lag. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;ha_peer_timeout = "15s" + +# The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated +# across cluster more quickly at the expense of increased bandwidth usage. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;ha_gossip_interval = "200ms" + +# The interval between gossip full state syncs. Setting this interval lower (more frequent) will increase convergence speeds +# across larger clusters at the expense of increased bandwidth usage. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;ha_push_pull_interval = "60s" + +# Enable or disable alerting rule execution. The alerting UI remains visible. This option has a legacy version in the `[alerting]` section that takes precedence. +;execute_alerts = true + +# Alert evaluation timeout when fetching data from the datasource. This option has a legacy version in the `[alerting]` section that takes precedence. +# The timeout string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;evaluation_timeout = 30s + +# Number of times we'll attempt to evaluate an alert rule before giving up on that evaluation. This option has a legacy version in the `[alerting]` section that takes precedence. +;max_attempts = 3 + +# Minimum interval to enforce between rule evaluations. Rules will be adjusted if they are less than this value or if they are not multiple of the scheduler interval (10s). Higher values can help with resource management as we'll schedule fewer evaluations over time. This option has a legacy version in the `[alerting]` section that takes precedence. +# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. +;min_interval = 10s + +[unified_alerting.reserved_labels] +# Comma-separated list of reserved labels added by the Grafana Alerting engine that should be disabled. +# For example: `disabled_labels=grafana_folder` +;disabled_labels = + #################################### Alerting ############################ [alerting] -# Disable alerting engine & UI features -;enabled = true -# Makes it possible to turn off alert rule execution but alerting UI is visible +# Disable legacy alerting engine & UI features +;enabled = false + +# Makes it possible to turn off alert execution but alerting UI is visible ;execute_alerts = true # Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state) @@ -602,7 +914,6 @@ password = 123qwe$%&RTY # This limit will protect the server from render overloading and make sure notifications are sent out quickly ;concurrent_render_limit = 5 - # Default setting for alert calculation timeout. Default value is 30 ;evaluation_timeout_seconds = 30 @@ -623,6 +934,13 @@ password = 123qwe$%&RTY ;max_annotations_to_keep = #################################### Annotations ######################### +[annotations] +# Configures the batch size for the annotation clean-up job. This setting is used for dashboard, API, and alert annotations. +;cleanupjob_batchsize = 100 + +# Enforces the maximum allowed length of the tags for any newly introduced annotations. It can be between 500 and 4096 inclusive (which is the respective's column length). Default value is 500. +# Setting it to a higher value would impact performance therefore is not recommended. +;tags_length = 500 [annotations.dashboard] # Dashboard annotations means that annotations are associated with the dashboard they are created on. @@ -650,8 +968,23 @@ password = 123qwe$%&RTY # Enable the Explore section ;enabled = true +#################################### Help ############################# +[help] +# Enable the Help section +;enabled = true + +#################################### Profile ############################# +[profile] +# Enable the Profile section +;enabled = true + +#################################### Query History ############################# +[query_history] +# Enable the Query history +;enabled = true + #################################### Internal Grafana Metrics ########################## -# Metrics available at HTTP API Url /metrics +# Metrics available at HTTP URL /metrics and /metrics/plugins/:pluginId [metrics] # Disable / Enable internal metrics ;enabled = true @@ -660,7 +993,7 @@ password = 123qwe$%&RTY # Disable total stats (stat_totals_*) metrics to be generated ;disable_total_stats = false -#If both are set, basic auth will be required for the metrics endpoint. +#If both are set, basic auth will be required for the metrics endpoints. ; basic_auth_username = ; basic_auth_password = @@ -682,6 +1015,7 @@ password = 123qwe$%&RTY ;url = https://grafana.com #################################### Distributed tracing ############ +# Opentracing is deprecated use opentelemetry instead [tracing.jaeger] # Enable by setting the address sending traces to jaeger (ex localhost:6831) ;address = localhost:6831 @@ -705,6 +1039,23 @@ password = 123qwe$%&RTY # Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure. ;disable_shared_zipkin_spans = false +[tracing.opentelemetry] +# attributes that will always be included in when creating new spans. ex (key1:value1,key2:value2) +;custom_attributes = key1:value1,key2:value2 + +[tracing.opentelemetry.jaeger] +# jaeger destination (ex http://localhost:14268/api/traces) +; address = http://localhost:14268/api/traces +# Propagation specifies the text map propagation format: w3c, jaeger +; propagation = jaeger + +# This is a configuration for OTLP exporter with GRPC protocol +[tracing.opentelemetry.otlp] +# otlp destination (ex localhost:4317) +; address = localhost:4317 +# Propagation specifies the text map propagation format: w3c, jaeger +; propagation = w3c + #################################### External image storage ########################## [external_image_storage] # Used for uploading images to public servers so they can be included in slack/email messages. @@ -735,6 +1086,7 @@ password = 123qwe$%&RTY ;account_name = ;account_key = ;container_name = +;sas_token_expiration_days = [external_image_storage.local] # does not require any configuration @@ -745,9 +1097,16 @@ password = 123qwe$%&RTY ;server_url = # If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/. ;callback_url = +# An auth token that will be sent to and verified by the renderer. The renderer will deny any request without an auth token matching the one configured on the renderer side. +;renderer_token = - # Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server, # which this setting can help protect against by only allowing a certain amount of concurrent requests. ;concurrent_render_request_limit = 30 +# Determines the lifetime of the render key used by the image renderer to access and render Grafana. +# This setting should be expressed as a duration. Examples: 10s (seconds), 5m (minutes), 2h (hours). +# Default is 5m. This should be more than enough for most deployments. +# Change the value only if image rendering is failing and you see `Failed to get the render key from cache` in Grafana logs. +;render_key_lifetime = 5m [panels] # If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities. @@ -756,9 +1115,35 @@ password = 123qwe$%&RTY [plugins] ;enable_alpha = false ;app_tls_skip_verify_insecure = false -# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature. +# Enter a comma-separated list of plugin identifiers to identify plugins to load even if they are unsigned. Plugins with modified signatures are never loaded. ;allow_loading_unsigned_plugins = -;marketplace_url = https://grafana.com/grafana/plugins/ +# Enable or disable installing / uninstalling / updating plugins directly from within Grafana. +;plugin_admin_enabled = false +;plugin_admin_external_manage_enabled = false +;plugin_catalog_url = https://grafana.com/grafana/plugins/ +# Enter a comma-separated list of plugin identifiers to hide in the plugin catalog. +;plugin_catalog_hidden_plugins = + +#################################### Grafana Live ########################################## +[live] +# max_connections to Grafana Live WebSocket endpoint per Grafana server instance. See Grafana Live docs +# if you are planning to make it higher than default 100 since this can require some OS and infrastructure +# tuning. 0 disables Live, -1 means unlimited connections. +;max_connections = 100 + +# allowed_origins is a comma-separated list of origins that can establish connection with Grafana Live. +# If not set then origin will be matched over root_url. Supports wildcard symbol "*". +;allowed_origins = + +# engine defines an HA (high availability) engine to use for Grafana Live. By default no engine used - in +# this case Live features work only on a single Grafana server. Available options: "redis". +# Setting ha_engine is an EXPERIMENTAL feature. +;ha_engine = + +# ha_engine_address sets a connection address for Live HA engine. Depending on engine type address format can differ. +# For now we only support Redis connection address in "host:port" format. +# This option is EXPERIMENTAL. +;ha_engine_address = "127.0.0.1:6379" #################################### Grafana Image Renderer Plugin ########################## [plugin.grafana-image-renderer] @@ -803,12 +1188,14 @@ password = 123qwe$%&RTY # Mode 'reusable' will have one browser instance and will create a new incognito page on each request. ;rendering_mode = -# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser' +# When rendering_mode = clustered, you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser' # and will cluster using browser instances. # Mode 'context' will cluster using incognito pages. ;rendering_clustering_mode = -# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently.. +# When rendering_mode = clustered, you can define the maximum number of browser instances/incognito pages that can execute concurrently. Default is '5'. ;rendering_clustering_max_concurrency = +# When rendering_mode = clustered, you can specify the duration a rendering request can take before it will time out. Default is `30` seconds. +;rendering_clustering_timeout = # Limit the maximum viewport width, height and device scale factor that can be requested. ;rendering_viewport_max_width = @@ -825,8 +1212,15 @@ password = 123qwe$%&RTY ;license_path = [feature_toggles] -# enable features, separated by spaces -;enable = +# there are currently two ways to enable feature toggles in the `grafana.ini`. +# you can either pass an array of feature you want to enable to the `enable` field or +# configure each toggle by setting the name of the toggle to true/false. Toggles set to true/false +# will take presidence over toggles in the `enable` list. + +;enable = feature1,feature2 + +;feature1 = true +;feature2 = false [date_formats] # For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/ @@ -847,3 +1241,31 @@ password = 123qwe$%&RTY # Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc. ;default_timezone = browser + +[expressions] +# Enable or disable the expressions functionality. +;enabled = true + +[geomap] +# Set the JSON configuration for the default basemap +;default_baselayer_config = `{ +; "type": "xyz", +; "config": { +; "attribution": "Open street map", +; "url": "https://tile.openstreetmap.org/{z}/{x}/{y}.png" +; } +;}` + +# Enable or disable loading other base map layers +;enable_custom_baselayers = true + +# Move an app plugin referenced by its id (including all its pages) to a specific navigation section +# Dependencies: needs the `topnav` feature to be enabled +[navigation.app_sections] +# The following will move an app plugin with the id of `my-app-id` under the `starred` section +# my-app-id = admin + +# Move a specific app plugin page (referenced by its `path` field) to a specific navigation section +[navigation.app_standalone_pages] +# The following will move the page with the path "/a/my-app-id/starred-content" from `my-app-id` to the `starred` section +# /a/my-app-id/starred-content = starred diff --git a/cookbooks/loki/install.rb b/cookbooks/loki/install.rb index 289fcee..3f5abbb 100644 --- a/cookbooks/loki/install.rb +++ b/cookbooks/loki/install.rb @@ -13,7 +13,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} tag = vtag.sub(/^v/, '') loki_bin = "#{node['loki']['zip']}" diff --git a/cookbooks/loki/setup.rb b/cookbooks/loki/setup.rb index 368abf4..36eaff3 100644 --- a/cookbooks/loki/setup.rb +++ b/cookbooks/loki/setup.rb @@ -89,11 +89,6 @@ remote_file '/etc/logrotate.d/loki' do mode '644' end -# Restart the `supervisor`: -service 'supervisor' do - action :nothing -end - # Firewall settings here: %w( 3100/tcp ).each do |p| execute "ufw allow #{p}" do diff --git a/cookbooks/nginx/attributes.rb b/cookbooks/nginx/attributes.rb index a18c75b..c12428c 100644 --- a/cookbooks/nginx/attributes.rb +++ b/cookbooks/nginx/attributes.rb @@ -3,8 +3,8 @@ # ------------------------------------------- node.reverse_merge!({ 'nginx' => { - 'version' => '1.21.3', - 'skip_lego' => 'false', - 'skip_webadm' => 'false' + 'version' => '1.25.0', + 'skip_lego' => 'true', + 'skip_webadm' => 'true' } }) diff --git a/cookbooks/nginx/build.rb b/cookbooks/nginx/build.rb index 0851101..d316ccb 100644 --- a/cookbooks/nginx/build.rb +++ b/cookbooks/nginx/build.rb @@ -38,7 +38,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} vtag = $1 tag_version = vtag.sub('v', '') @@ -78,7 +78,7 @@ directory MODULEDIR do end # Build starts here: -execute "#{NGINXBUILD} -d working -v #{version} -c configure.sh -zlib -pcre -openssl -opensslversion=1.1.1d" do +execute "#{NGINXBUILD} -d working -v #{version} -c configure.sh -zlib -pcre -libressl -libresslversion 3.8.0" do cwd WORKDIR user USER diff --git a/cookbooks/nginx/default.rb b/cookbooks/nginx/default.rb index e777b2e..eb36bd5 100644 --- a/cookbooks/nginx/default.rb +++ b/cookbooks/nginx/default.rb @@ -33,12 +33,14 @@ end end # Prerequisites for Building nginx: -if node['nginx']['skip_webadm'] +if !node['nginx']['skip_webadm'] include_recipe './webadm.rb' + include_recipe '../blog/default.rb' + include_recipe '../everun/default.rb' end # Install Let's Encrypt: -if node['nginx']['skip_lego'] +if !node['nginx']['skip_lego'] include_recipe './lego.rb' end diff --git a/cookbooks/nginx/deploy.rb b/cookbooks/nginx/deploy.rb deleted file mode 100644 index 12637fc..0000000 --- a/cookbooks/nginx/deploy.rb +++ /dev/null @@ -1,84 +0,0 @@ -##################################### -# LEGO Settings -##################################### -execute "#{LEGO_STORAGE}/lego_run.sh" do - user 'root' - cwd LEGO_STORAGE - not_if "test -d #{LEGO_STORAGE}/.lego" -end - -encrypted_remote_file '/etc/cron.d/lego' do - owner 'root' - group 'root' - mode '644' - source 'files/etc/cron.d/lego' - password ENV['ITAMAE_PASSWORD'] -end - -remote_file "/etc/lego/dhparams_4096.pem" do - owner 'root' - group 'root' - mode '444' -end - -execute "openssl rand 48 > /etc/lego/ticket.key" - - -##################################### -# Deploy nginx Settings -##################################### - -# Deploy the `sudoers` file: -remote_file '/etc/sudoers.d/webadm' do - owner 'root' - group 'root' - mode '440' -end - -# Create directories: -%w(/home/webadm/.ssh /home/webadm/repo).each do |d| - directory d do - owner 'webadm' - group 'webadm' - mode '700' - end -end - -# Deploy `~/.ssh/.ssh/authorized_keys`: -encrypted_remote_file '/home/webadm/.ssh/authorized_keys' do - owner 'webadm' - group 'webadm' - mode '600' - source 'files/home/webadm/.ssh/authorized_keys' - password ENV['ITAMAE_PASSWORD'] -end - -# Deploy secret keys -%w( id_rsa.github id_rsa.chef ).each do |conf| - encrypted_remote_file "/home/webadm/.ssh/#{conf}" do - owner 'webadm' - group 'webadm' - mode '600' - source "files/home/webadm/.ssh/#{conf}" - password ENV['ITAMAE_PASSWORD'] - end -end - -# Create `repo` directory: -git '/home/webadm/repo/nginx-config' do - user 'webadm' - repository 'https://gitea.kazu634.com/kazu634/nginx-config.git' -end - -execute '/home/webadm/repo/nginx-config/deploy.sh' do - user 'root' - cwd '/home/webadm/repo/nginx-config/' -end - -service 'consul-template' do - action :restart -end - -service 'nginx' do - action :restart -end diff --git a/cookbooks/nginx/files/etc/cron.d/everun b/cookbooks/nginx/files/etc/cron.d/everun new file mode 100644 index 0000000..56a9206 --- /dev/null +++ b/cookbooks/nginx/files/etc/cron.d/everun @@ -0,0 +1,10 @@ +md5:e2c4b92cac6937e5c2e14bcb166748cf:salt:35-2-158-147-217-138-24-188:aes-256-cfb:m5WUGUv4kMl3U4EpsDCZbTmqfDQEp3CGzBk84671Dhxt0rRtETnCY2ECGD7W ++O9MMKk0jCDCUxz7EZoggsHQL40dwvcCKs5qgcFFmZYOMygfxBVJ+cqBZ//0 +Zdav0tp1Qc3ejX2x3kmZBgAn4WCRVCmIZYtPYj0w4nrAohXSITJOo6MKNfsB +ASvoywRNHTYJAxT/UrYrJudR3Yq2a0gIcVgGZAYBKOUb2syMTixo245x128p +pX2QtcHjE87g9uGeUVWLkIM9m5uvBGULgdKknO03PXF0jWHxQvv/RRN+aG0H +To70zhqrlJWibKlO9PgPyVhoQSgxBG9i2f18hw2Kcnr0xSYvfC3yfkvem5C2 +Zgpj+xRIfbB6tw7k/ePdguBJ5e94Y5nDtavMr58Wxgtnleyc3/k/iRgK1wpD +BUrf83ZWMt3QPwDL4J5npo+4YDCObrsvO3BD14XMUHpSpCvVdKCnMnngQdRt +7TERfhMMRCPcHbUD9gFh6HcsT+GzU6a9iwyJ03nYweWB/nXGGfwnTfrklwfJ +CuTSFnA= \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/cron.d/kazu634 b/cookbooks/nginx/files/etc/cron.d/kazu634 new file mode 100644 index 0000000..7cd897f --- /dev/null +++ b/cookbooks/nginx/files/etc/cron.d/kazu634 @@ -0,0 +1,10 @@ +md5:3429dd1d1b7fae6ff356c639afaeaa7c:salt:114-48-239-183-69-3-57-50:aes-256-cfb:/mKhySMGT7hiRIYO45LOqBxEmwI6wCQKvrwdK+sOJq5p5xbn7wDiYwUWnhGT +feCcW0iiVS0Qq5Wpnf01KTBaQWPditaR/CBYxCToV0EZ+7lA6HUTaX7qELGP +nPTkPn6CmTgW7I/kI9XfkeeSbT0Ti+2xo3XSpce1kftGp67aBcxM6XLSCKiS +IUMFoQIBHbUlJxJ5y6vj3uA/2v/r99y/dHymoKS695abnFPfq6rDqnJC7PKe +wEeLoObLSauqgnTF4CZUgZxaSSVUCNRjkV3WTHiu3UIEsHjiwJFBqJfWzVr9 +dvgzZAFt5YUwwGHEhZjtO66/Tp8Po4SZzRRDbftCBSS8nIZQ66qYwmKHGK98 +eYOFtpbQYnVMJKWd+orSDse61CcaT2tPgTZ4fjln/a4Ru4V5Kr4/HRyRmn6J +bIzBVuBVuh8T0oh36GSefSjfL7KyProS4waFlX53qwrMPHBmP873cJ2ZO1GN +HYk2QEUPP1BWEWiv9kfNu6mZPKVHIL7CEkvOAxlDWaKjgll5eNbbfzDw1hh3 +Hn3RPGs= \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/cron.d/lego b/cookbooks/nginx/files/etc/cron.d/lego deleted file mode 100644 index 24ce6b7..0000000 --- a/cookbooks/nginx/files/etc/cron.d/lego +++ /dev/null @@ -1,10 +0,0 @@ -md5:c198ab6d9a81886d9cdf034ced32690d:salt:60-164-230-14-22-35-114-134:aes-256-cfb:atMw5FrhPmOlYIuYK/874SqBpeIBEjWtsDIyOFJzrmWywTHKczcGsev1hngX -zKyL249Q78t+aoFmNIBxZq/SxToTPbeP5hsBs5ELn2IVHwR0/uBfJD4e0irN -cGfeOMDo8n7CLwYruoAzHeTlFxJUVzZg49h5ZdlIlWjnOsgXffdi8SRvD2v8 -hBFppx5ynNaI7Cue4YBf0DtjejuWiimXZZz0GkDjHHHK5ie2/BdHUGrLLPEQ -o8ZvVUxUhxQ3Kk0flqiwlXVKZs16qB589lG4nCJ5NV0KbkAdJ1GPHK0+yBTz -D21Cz0ilals+WhrVhFXZjZyUM73auhCXJC98vffdrhDfoQZyZTUD3NGWpgnP -Jy1T4nEi5HhXKgvvBEob2M85BfBlP1x1ll4ear/c+18Uf98k08/Rsya4xYqf -W6Uq4RHeVEgK4QsLbWxENhstjpc/RIW0yds6WEdJzwayu5MSVeQ4A24HALke -Kxv5MUs81lzlMjwBWmv+AAgdn842A1OpkHPoqRUr6incxQwkqRvSDQw8R02Y -MJovgrblcRVpPo/HYsdGkqAlEv4FcCK3XRd8yiczghUlkhNokB8= \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/lego/everun_run.sh b/cookbooks/nginx/files/etc/lego/everun_run.sh new file mode 100644 index 0000000..a957647 --- /dev/null +++ b/cookbooks/nginx/files/etc/lego/everun_run.sh @@ -0,0 +1,8 @@ +md5:c97addd9484611e9038f4d21490f95ef:salt:46-243-167-154-98-197-19-76:aes-256-cfb:MulsiIrRht0HvexrIXKc6q6pW9B4LnSaNB3FQyOghiAmaQKafmjvaPycv3nl +O/2FKcYHZ9g4sRysBo9t/Yttd7Q+ytGKz5MWG0w7vddvVsijaBjcqltS5Zvh +r6gTozBur13iBqsk7AYlU/wjyH62Zdgmo0rJBHp70Zqx4Bk81bDrqHbypzcK +XcM1Qg1jU1Y0bJgUyCLkpTYOjtNBug0sRYQ/Slv0/UbzgEA5WtTO7sRAEPuj +Y0qvUJVDz+0zYRinOwCOA+IGARqB5GsDtQ4YgGR9kKSmoUPPRSjIg7xSKB0S +rn1CUSjKEbmPIHeOMWSg7CXmOzzVPMTNqM6MLjGHmOyWGSDPvwRiPI5AacNu +AmOsFNY2EiWUJolrz5RpZZXjkGFmcwnxn+7ZtoWO7nD8JhaCrPpxC6C/rnav +ZGg= \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/lego/kazu634_run.sh b/cookbooks/nginx/files/etc/lego/kazu634_run.sh new file mode 100644 index 0000000..ea6ddf5 --- /dev/null +++ b/cookbooks/nginx/files/etc/lego/kazu634_run.sh @@ -0,0 +1,8 @@ +md5:032af53422a767d4edf60d5d2f8ec84e:salt:231-40-60-67-6-253-79-25:aes-256-cfb:PiAZ+U6IHA4GvL3gDsLzeV48MvnaAaEbAqWqYLq4TrsrbRj8J2QT6ANUjZoC +IxHgZ8yn/jNmpGrqj1ZPvF3V2qGG9RomI5txRf3oEWaiM1EGoHrcgj5GSEeF +7izz9sPV+DGA/aY0VTZOSIIdogZ7yY8KGRJ5w30KTmJtvZ6zzYUFzBtzqLup +Ax3I5OzDJUuIOWr0wcE+SPAuBq4VWzfY2gTUUeepy+VMDilN2dltRAlPL+6R +t8wy4JjIuQ8y/fYVYkSVACWgL9cXWWQWgyk8yr+KJFV3ejL0UxwCGtpy54cj +kVtt1b3i/VhntaSFKMzY6BtRKrSbtd1nvuMT8gSrY9Kq6MFUNorjlAkAznkK +R4Jw6aWF8aMor3JhCp0aqc109K9pvmvkCRvCkYKH/Fs9DLGD1AsEDPFrdndi +AWs= \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/lego/lego_run.sh b/cookbooks/nginx/files/etc/lego/lego_run.sh deleted file mode 100644 index afceee3..0000000 --- a/cookbooks/nginx/files/etc/lego/lego_run.sh +++ /dev/null @@ -1,8 +0,0 @@ -md5:c3ff40a35a072ebcdf4b00de0c62eede:salt:220-201-162-125-99-148-31-141:aes-256-cfb:P5sXyTi2l8dAegj6vwcxQlxAoXCz9ynBa/f2BATSr+ViTEQmlgqiMi6N7Zud -URbZGWBf94Wr0QqN3JMDqKX3d/ajr1C6tSoG25NL7r293PjR6icNaGklP4S+ -WjNZWnEslsIfarfZZoSDw557BPo52r8nkEwSPfgdsZQiZgIUvSYAwZbVCp99 -Frwyg9fc9riQ3zxOcYxygCVKZGyEKj0R+W4BBTeoMXzfzVu+kQUR+ZS1HVco -pEHAufUq4zI7P1EHFhZBM6A/E9c048Xr6ClshStsQA51qLwbnjhrBMZzQbJt -IJ9fcoTpHQq4NTD6XItiB7vFVbe6DDlQUPP4JQ0e3rxeX0Pwontjipqk2ucM -L5aN8Q+4H3JdH3x9Z2H0YlDJZ6i1XbIp2vp7ijtMlJR/pEc9ryEvBkbGH2yW -4DuvEQHOeQcb \ No newline at end of file diff --git a/cookbooks/nginx/files/etc/nginx/nginx.conf b/cookbooks/nginx/files/etc/nginx/nginx.conf index c9c5eae..62c9bd0 100644 --- a/cookbooks/nginx/files/etc/nginx/nginx.conf +++ b/cookbooks/nginx/files/etc/nginx/nginx.conf @@ -96,16 +96,27 @@ http { # Logging Settings ## - log_format ltsv "time:$time_local\thost:$remote_addr" - "\tforwardedfor:$http_x_forwarded_for\t" - "method:$request_method\tpath:$request_uri\tprotocol:$server_protocol" - "\tstatus:$status\tsize:$body_bytes_sent\treferer:$http_referer" - "\tua:$http_user_agent\ttaken_sec:$request_time" - "\tbackend:$upstream_addr\tbackend_status:$upstream_status" - "\tcache:$upstream_http_x_cache\tbackend_runtime:$upstream_response_time" - "\tvhost:$host"; + log_format json escape=json + '{' + '"time":"$time_local",' + '"host":"$remote_addr",' + '"forwardedfor":"$http_x_forwarded_for",' + '"method":"$request_method",' + '"path":"$request_uri",' + '"protocol":"$server_protocol",' + '"status":"$status",' + '"size":"$body_bytes_sent",' + '"referer":"$http_referer",' + '"ua":"$http_user_agent",' + '"taken_sec":"$request_time",' + '"backend":"$upstream_addr",' + '"backend_status":"$upstream_status",' + '"cache":"$upstream_http_x_cache",' + '"backend_runtime":"$upstream_response_time",' + '"vhost":"$host"' + '}'; - access_log /var/log/nginx/access.log ltsv; + access_log /var/log/nginx/access.log json; error_log /var/log/nginx/error.log; ## diff --git a/cookbooks/nginx/files/etc/systemd/system/vector-nginx-access.service b/cookbooks/nginx/files/etc/systemd/system/vector-nginx-access.service new file mode 100644 index 0000000..6ec76bc --- /dev/null +++ b/cookbooks/nginx/files/etc/systemd/system/vector-nginx-access.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/nginx-access.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/nginx/files/etc/systemd/system/vector-nginx-error.service b/cookbooks/nginx/files/etc/systemd/system/vector-nginx-error.service new file mode 100644 index 0000000..2debe77 --- /dev/null +++ b/cookbooks/nginx/files/etc/systemd/system/vector-nginx-error.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/nginx-error.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/nginx/files/etc/vector/nginx-access.toml b/cookbooks/nginx/files/etc/vector/nginx-access.toml new file mode 100644 index 0000000..9326888 --- /dev/null +++ b/cookbooks/nginx/files/etc/vector/nginx-access.toml @@ -0,0 +1,65 @@ +data_dir = "/var/lib/vector/" + +[sources.nginx] + type = "file" + include = [ "/var/log/nginx/*access.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.nginx_transform] + type = "remap" + inputs = ["nginx"] + source = ''' + .hostname = .host + + l = parse_json!(.message) + . = merge!(., l) + + del(.message) + del(.host) + + .status = string!(.status) + if match(.status, r'^[23]') { + .level = "info" + } else if match(.status, r'^[4]') { + .level = "warn" + } else { + .level = "error" + } + + .timestamp = parse_timestamp!(.time, format: "%d/%b/%Y:%T %z") + del(.time) + ''' + +[sinks.nginx_output] +type = "file" +inputs = [ "nginx_transform" ] +compression = "none" +path = "/tmp/nginx-access-%Y-%m-%d.log" + + [sinks.nginx_output.encoding] + codec = "json" + + [sinks.nginx_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.nginx_loki] +type = "loki" +inputs = [ "nginx_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.nginx_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "nginx" + vhost = "{{ vhost }}" + + [sinks.nginx_loki.encoding] + codec = "json" + + [sinks.nginx_loki.buffer] + max_size = 268435488 + type = "disk" + diff --git a/cookbooks/nginx/files/etc/vector/nginx-error.toml b/cookbooks/nginx/files/etc/vector/nginx-error.toml new file mode 100644 index 0000000..d4b2f2b --- /dev/null +++ b/cookbooks/nginx/files/etc/vector/nginx-error.toml @@ -0,0 +1,56 @@ +data_dir = "/var/lib/vector/" + +[sources.nginx-error] + type = "file" + include = [ "/var/log/nginx/*error.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.nginx-error_transform] + type = "remap" + inputs = ["nginx-error"] + source = ''' + .hostname = .host + del(.host) + + el, err = parse_regex(.message, r'^(?P[^ ]+ [^ ]+) (?P[^ ]+) (?P.*)$') + . = merge(., el) + + tmp, err = replace(.level, "[", "") + .level = replace(tmp, "]", "") + + .timestamp = parse_timestamp!(.timestamp, "%Y/%m/%d %T") + ''' + +[sinks.nginx-error_output] +type = "file" +inputs = [ "nginx-error_transform" ] +compression = "none" +path = "/tmp/nginx-error-%Y-%m-%d.log" + + [sinks.nginx-error_output.encoding] + codec = "json" + + [sinks.nginx-error_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.nginx-error_loki] +type = "loki" +inputs = [ "nginx-error_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.nginx-error_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + vhost = "{{ vhost }}" + job = "nginx" + + [sinks.nginx-error_loki.encoding] + codec = "json" + + [sinks.nginx-error_loki.buffer] + max_size = 268435488 + type = "disk" + diff --git a/cookbooks/nginx/files/home/webadm/nginx-build/configure.sh b/cookbooks/nginx/files/home/webadm/nginx-build/configure.sh old mode 100644 new mode 100755 index 6930690..71fb9f3 --- a/cookbooks/nginx/files/home/webadm/nginx-build/configure.sh +++ b/cookbooks/nginx/files/home/webadm/nginx-build/configure.sh @@ -8,4 +8,4 @@ --http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit --with-ipv6 --with-http_ssl_module \ --with-http_v2_module --with-http_stub_status_module --with-http_realip_module --with-http_auth_request_module \ --with-http_addition_module --with-http_geoip_module --with-http_gunzip_module --with-http_gzip_static_module \ - --with-http_sub_module --with-stream --with-stream_ssl_module + --with-http_sub_module --with-stream --with-stream_ssl_module --with-http_v3_module diff --git a/cookbooks/nginx/lego.rb b/cookbooks/nginx/lego.rb index 9edb3ed..dbaf358 100644 --- a/cookbooks/nginx/lego.rb +++ b/cookbooks/nginx/lego.rb @@ -24,7 +24,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} vtag = $1 tag_version = vtag.sub('v', '') @@ -78,26 +78,27 @@ directory "#{LEGO_STORAGE}" do mode '755' end -encrypted_remote_file "#{LEGO_STORAGE}/lego_run.sh" do - owner 'root' - group 'root' - mode '500' - source "files/#{LEGO_STORAGE}/lego_run.sh" - password ENV['ITAMAE_PASSWORD'] -end +%w( kazu634 everun ).each do |domain| + encrypted_remote_file "#{LEGO_STORAGE}/#{domain}_run.sh" do + owner 'root' + group 'root' + mode '500' + source "files/#{LEGO_STORAGE}/#{domain}_run.sh" + password ENV['ITAMAE_PASSWORD'] + end -execute "#{LEGO_STORAGE}/lego_run.sh" do - user 'root' - cwd LEGO_STORAGE - not_if "test -d #{LEGO_STORAGE}/.lego" -end + execute "#{LEGO_STORAGE}/#{domain}_run.sh" do + user 'root' + cwd LEGO_STORAGE + end -encrypted_remote_file '/etc/cron.d/lego' do - owner 'root' - group 'root' - mode '644' - source 'files/etc/cron.d/lego' - password ENV['ITAMAE_PASSWORD'] + encrypted_remote_file "/etc/cron.d/#{domain}" do + owner 'root' + group 'root' + mode '644' + source "files/etc/cron.d/#{domain}" + password ENV['ITAMAE_PASSWORD'] + end end remote_file "/etc/lego/dhparams_4096.pem" do diff --git a/cookbooks/nginx/setup.rb b/cookbooks/nginx/setup.rb index 0ef7337..1433459 100644 --- a/cookbooks/nginx/setup.rb +++ b/cookbooks/nginx/setup.rb @@ -13,7 +13,7 @@ remote_file '/lib/systemd/system/nginx.service' do end # Firewall Setting: -%w( 80/tcp 443/tcp ).each do |port| +%w( 80/tcp 443/tcp 443/udp ).each do |port| execute "ufw allow #{port}" do user 'root' @@ -35,25 +35,36 @@ service 'nginx' do action [ :enable, :start ] end -# Deploy `promtail` config file: -HOSTNAME = run_command('uname -n').stdout.chomp - -template '/etc/promtail/nginx.yaml' do - owner 'root' - group 'root' - mode '644' - - variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint']) -end - -# Deploy the `systemd` configuration: -remote_file '/lib/systemd/system/promtail-nginx.service' do +# Deploy `vector` config: +remote_file '/etc/vector/nginx-access.toml' do owner 'root' group 'root' mode '644' end -# Service setting: -service 'promtail-nginx' do - action [ :enable, :restart ] +remote_file '/etc/systemd/system/vector-nginx-access.service' do + owner 'root' + group 'root' + mode '644' end + +service 'vector-nginx-access' do + action [ :enable, :start ] +end + +remote_file '/etc/vector/nginx-error.toml' do + owner 'root' + group 'root' + mode '644' +end + +remote_file '/etc/systemd/system/vector-nginx-error.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'vector-nginx-error' do + action [ :enable, :start ] +end + diff --git a/cookbooks/nginx/webadm.rb b/cookbooks/nginx/webadm.rb index 7b16ccd..e8336ca 100644 --- a/cookbooks/nginx/webadm.rb +++ b/cookbooks/nginx/webadm.rb @@ -6,3 +6,58 @@ user 'webadm' do create_home true end +##################################### +# Deploy nginx Settings +##################################### + +# Deploy the `sudoers` file: +remote_file '/etc/sudoers.d/webadm' do + owner 'root' + group 'root' + mode '440' +end + +# Create directories: +%w(/home/webadm/.ssh /home/webadm/repo).each do |d| + directory d do + owner 'webadm' + group 'webadm' + mode '700' + end +end + +# Deploy `~/.ssh/.ssh/authorized_keys`: +encrypted_remote_file '/home/webadm/.ssh/authorized_keys' do + owner 'webadm' + group 'webadm' + mode '600' + source 'files/home/webadm/.ssh/authorized_keys' + password ENV['ITAMAE_PASSWORD'] +end + +# Deploy secret keys +%w( id_rsa.github id_rsa.chef ).each do |conf| + encrypted_remote_file "/home/webadm/.ssh/#{conf}" do + owner 'webadm' + group 'webadm' + mode '600' + source "files/home/webadm/.ssh/#{conf}" + password ENV['ITAMAE_PASSWORD'] + end +end + +# Create `repo` directory: +git '/home/webadm/repo/nginx-config' do + user 'webadm' + repository 'https://github.com/kazu634/nginx-config.git' +end + +execute '/home/webadm/repo/nginx-config/deploy.sh' do + user 'root' + cwd '/home/webadm/repo/nginx-config/' +end + +service 'consul-template' do + action :restart +end + diff --git a/cookbooks/prometheus-exporters/exporter_proxy.rb b/cookbooks/prometheus-exporters/exporter_proxy.rb index 55ffca2..9893c30 100644 --- a/cookbooks/prometheus-exporters/exporter_proxy.rb +++ b/cookbooks/prometheus-exporters/exporter_proxy.rb @@ -1,4 +1,4 @@ -URL = 'https://github.com/rrreeeyyy/exporter_proxy/releases/download/v0.1.0/exporter_proxy_linux_amd64' +URL = 'https://github.com/rrreeeyyy/exporter_proxy/releases/download/v0.4.1/exporter_proxy_linux_amd64' BIN = '/usr/local/bin/exporter_proxy' CONFDIR = '/etc/prometheus_exporters.d/exporter_proxy/' CONF = 'config.yml' @@ -28,16 +28,27 @@ remote_file "#{CONFDIR}#{CONF}" do mode '644' end -remote_file '/etc/supervisor/conf.d/exporter_proxy.conf' do +remote_file '/etc/systemd/system/exporter_proxy.service' do + user 'root' + group 'root' + + mode '644' +end + +service 'exporter_proxy' do + action [:enable, :start] +end + +remote_file '/etc/consul.d/service-exporter_proxy.json' do user 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[consul]' end -service 'supervisor' do +service 'consul' do action :nothing end diff --git a/cookbooks/gitea/files/etc/consul.d/service-gitea.json b/cookbooks/prometheus-exporters/files/etc/consul.d/service-exporter_proxy.json similarity index 53% rename from cookbooks/gitea/files/etc/consul.d/service-gitea.json rename to cookbooks/prometheus-exporters/files/etc/consul.d/service-exporter_proxy.json index add1be2..5b906d3 100644 --- a/cookbooks/gitea/files/etc/consul.d/service-gitea.json +++ b/cookbooks/prometheus-exporters/files/etc/consul.d/service-exporter_proxy.json @@ -1,11 +1,12 @@ { "service": { - "name": "gitea", - "port": 3000, + "name": "exporter-proxy", + "port": 60000, "check":{ - "tcp": "localhost:3000", + "tcp": "localhost:60000", "interval": "60s", "timeout": "1s", + "status": "passing", "success_before_passing": 3 } } diff --git a/cookbooks/prometheus-exporters/files/etc/default/node_exporter b/cookbooks/prometheus-exporters/files/etc/default/node_exporter new file mode 100644 index 0000000..abb1a6b --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/default/node_exporter @@ -0,0 +1 @@ +OPTIONS=' --web.listen-address="127.0.0.1:9100" --collector.systemd' diff --git a/cookbooks/prometheus-exporters/files/etc/logrotate.d/filestat_exporter b/cookbooks/prometheus-exporters/files/etc/logrotate.d/filestat_exporter new file mode 100644 index 0000000..0cb21ec --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/logrotate.d/filestat_exporter @@ -0,0 +1,13 @@ +/var/log/filestat_exporter.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus-exporters/files/etc/logrotate.d/node_exporter b/cookbooks/prometheus-exporters/files/etc/logrotate.d/node_exporter new file mode 100644 index 0000000..78838a9 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/logrotate.d/node_exporter @@ -0,0 +1,13 @@ +/var/log/node_exporter.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus-exporters/files/etc/prometheus_exporters.d/exporter_proxy/config.yml b/cookbooks/prometheus-exporters/files/etc/prometheus_exporters.d/exporter_proxy/config.yml index a6d0d27..f970a4a 100644 --- a/cookbooks/prometheus-exporters/files/etc/prometheus_exporters.d/exporter_proxy/config.yml +++ b/cookbooks/prometheus-exporters/files/etc/prometheus_exporters.d/exporter_proxy/config.yml @@ -3,13 +3,13 @@ listen: "0.0.0.0:60000" # access_log (optional) access_log: - path: "/dev/stdout" + path: "/var/log/exporter_proxy_access.log" format: "ltsv" fields: ['time', 'time_nsec', 'status', 'size', 'reqtime_nsec', 'backend', 'path', 'query', 'method'] # error_log (required) error_log: - path: "/dev/stderr" + path: "/var/log/exporter_proxy_access.log" # exporters: The path of exporter_proxy and the URL of the destination exporter exporters: @@ -19,3 +19,4 @@ exporters: filestat_exporter: path: "/filestat_exporter/metrics" url: "http://127.0.0.1:9943/metrics" + diff --git a/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-filestat_exporter.conf b/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-filestat_exporter.conf new file mode 100644 index 0000000..d0167fd --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-filestat_exporter.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"filestat_exporter" /var/log/filestat_exporter.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-node_exporter.conf b/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-node_exporter.conf new file mode 100644 index 0000000..0027a17 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/rsyslog.d/30-node_exporter.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"node_exporter" /var/log/node_exporter.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/exporter_proxy.conf b/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/exporter_proxy.conf deleted file mode 100644 index 60b6944..0000000 --- a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/exporter_proxy.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:exporter_proxy] -command=/usr/local/bin/exporter_proxy -config /etc/prometheus_exporters.d/exporter_proxy/config.yml -stdout_logfile=/var/log/supervisor/exporter_proxy.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/filestat_exporter.conf b/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/filestat_exporter.conf deleted file mode 100644 index c61c76c..0000000 --- a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/filestat_exporter.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:filestat_exporter] -command=/usr/local/bin/filestat_exporter --config.file=/etc/prometheus_exporters.d/filestat.yml -stdout_logfile=/var/log/supervisor/filestat_exporter.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/node_exporter.conf b/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/node_exporter.conf deleted file mode 100644 index b5e5c04..0000000 --- a/cookbooks/prometheus-exporters/files/etc/supervisor/conf.d/node_exporter.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:node_exporter] -command=/usr/local/bin/node_exporter --web.listen-address="127.0.0.1:9100" -stdout_logfile=/var/log/supervisor/node_exporter.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus-exporters/files/etc/systemd/system/exporter_proxy.service b/cookbooks/prometheus-exporters/files/etc/systemd/system/exporter_proxy.service new file mode 100644 index 0000000..1fc3c77 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/systemd/system/exporter_proxy.service @@ -0,0 +1,11 @@ +[Unit] +Description=Exporter Proxy + +[Service] +User=root +Group=root +ExecStart=/usr/local/bin/exporter_proxy -config /etc/prometheus_exporters.d/exporter_proxy/config.yml + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/prometheus-exporters/files/etc/systemd/system/filestat_exporter.service b/cookbooks/prometheus-exporters/files/etc/systemd/system/filestat_exporter.service new file mode 100644 index 0000000..6136353 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/systemd/system/filestat_exporter.service @@ -0,0 +1,11 @@ +[Unit] +Description=Filestat Exporter + +[Service] +User=root +Group=root +ExecStart=/usr/local/bin/filestat_exporter --config.file=/etc/prometheus_exporters.d/filestat.yml + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/prometheus-exporters/files/etc/systemd/system/node_exporter.service b/cookbooks/prometheus-exporters/files/etc/systemd/system/node_exporter.service new file mode 100644 index 0000000..f3b7aa6 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/systemd/system/node_exporter.service @@ -0,0 +1,11 @@ +[Unit] +Description=Node Exporter + +[Service] +User=root +Group=root +EnvironmentFile=-/etc/default/node_exporter +ExecStart=/usr/local/bin/node_exporter $OPTIONS + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-filestat_exporter.service b/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-filestat_exporter.service new file mode 100644 index 0000000..f060904 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-filestat_exporter.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/filestat_exporter.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-node_exporter.service b/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-node_exporter.service new file mode 100644 index 0000000..d5679db --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/systemd/system/vector-node_exporter.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/node_exporter.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/prometheus-exporters/files/etc/vector/filestat_exporter.toml b/cookbooks/prometheus-exporters/files/etc/vector/filestat_exporter.toml new file mode 100644 index 0000000..42f47d8 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/vector/filestat_exporter.toml @@ -0,0 +1,55 @@ +data_dir = "/var/lib/vector/" + +[sources.filestat_exporter] + type = "file" + include = [ "/var/log/filestat_exporter.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.filestat_exporter_transform] + type = "remap" + inputs = ["filestat_exporter"] + source = ''' + . |= parse_syslog!(.message) + + . |= parse_key_value!(.message) + + del(.message) + del(.host) + + .message = .msg + del(.msg) + ''' + +[sinks.filestat_exporter_output] +type = "file" +inputs = [ "filestat_exporter_transform" ] +compression = "none" +path = "/tmp/filestat_exporter-%Y-%m-%d.log" + + [sinks.filestat_exporter_output.encoding] + codec = "json" + + [sinks.filestat_exporter_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.filestat_exporter_loki] +type = "loki" +inputs = [ "filestat_exporter_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.filestat_exporter_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "filestat_exporter" + filename = "/var/log/filestat_exporter.log" + + [sinks.filestat_exporter_loki.encoding] + codec = "json" + + [sinks.filestat_exporter_loki.buffer] + max_size = 268435488 + type = "disk" + diff --git a/cookbooks/prometheus-exporters/files/etc/vector/node_exporter.toml b/cookbooks/prometheus-exporters/files/etc/vector/node_exporter.toml new file mode 100644 index 0000000..539dd46 --- /dev/null +++ b/cookbooks/prometheus-exporters/files/etc/vector/node_exporter.toml @@ -0,0 +1,54 @@ +data_dir = "/var/lib/vector/" + +[sources.node_exporter] + type = "file" + include = [ "/var/log/node_exporter.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.node_exporter_transform] + type = "remap" + inputs = ["node_exporter"] + source = ''' + . |= parse_syslog!(.message) + + . |= parse_key_value!(.message) + + del(.message) + del(.host) + + .message = .msg + del(.msg) + ''' + +[sinks.node_exporter_output] +type = "file" +inputs = [ "node_exporter_transform" ] +compression = "none" +path = "/tmp/node_exporter-%Y-%m-%d.log" + + [sinks.node_exporter_output.encoding] + codec = "json" + + [sinks.node_exporter_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.node_exporter_loki] +type = "loki" +inputs = [ "node_exporter_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.node_exporter_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "node_exporter" + filename = "/var/log/node_exporter.log" + + [sinks.node_exporter_loki.encoding] + codec = "json" + + [sinks.node_exporter_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/prometheus-exporters/filestat_exporter_setup.rb b/cookbooks/prometheus-exporters/filestat_exporter_setup.rb index 71b2a9f..dd35c9f 100644 --- a/cookbooks/prometheus-exporters/filestat_exporter_setup.rb +++ b/cookbooks/prometheus-exporters/filestat_exporter_setup.rb @@ -1,22 +1,61 @@ -# Deploy the `supervisord` configuration: +# Deploy the configuration: remote_file '/etc/prometheus_exporters.d/filestat.yml' do owner 'root' group 'root' mode '644' - - notifies :restart, 'service[supervisor]' end -# Deploy the `supervisord` configuration: -remote_file '/etc/supervisor/conf.d/filestat_exporter.conf' do +# Deploy the `systemd` configuration: +remote_file '/etc/systemd/system/filestat_exporter.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'filestat_exporter' do + action [:enable, :start] +end + +# Deploy `rsyslog` config: +remote_file '/etc/rsyslog.d/30-filestat_exporter.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]' end -# Deploy `consul` config for `node_exporter`: +service 'rsyslog' do + action :nothing +end + +# Deploy `logrotate` config: +remote_file '/etc/logrotate.d/filestat_exporter' do + owner 'root' + group 'root' + mode '644' +end + +# Deploy `vector` config: +remote_file '/etc/vector/filestat_exporter.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-filestat_exporter]' +end + +remote_file '/etc/systemd/system/vector-filestat_exporter.service' do + owner 'root' + group 'root' + mode '0644' +end + +service 'vector-filestat_exporter' do + action [:enable, :start] +end + +# Deploy `consul` config for `filestat_exporter`: remote_file '/etc/consul.d/service-filestat_exporter.json' do owner 'consul' group 'consul' diff --git a/cookbooks/prometheus-exporters/node_exporter_setup.rb b/cookbooks/prometheus-exporters/node_exporter_setup.rb index c907a3f..fa575f4 100644 --- a/cookbooks/prometheus-exporters/node_exporter_setup.rb +++ b/cookbooks/prometheus-exporters/node_exporter_setup.rb @@ -1,10 +1,55 @@ -# Deploy the `supervisord` configuration: -remote_file '/etc/supervisor/conf.d/node_exporter.conf' do +# Deploy the `systemd` configuration: +remote_file '/etc/systemd/system/node_exporter.service' do + owner 'root' + group 'root' + mode '644' +end + +remote_file '/etc/default/node_exporter' do + owner 'root' + group 'root' + mode '644' +end + +service 'node_exporter' do + action [ :enable, :start] +end + +# Deploy `rsyslog` config for `node_exporter`: +remote_file '/etc/rsyslog.d/30-node_exporter.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]' +end + +service 'rsyslog' do + action :nothing +end + +# Deploy `logrotate` config for `node_exporter`: +remote_file '/etc/logrotate.d/node_exporter' do + owner 'root' + group 'root' + mode '0644' +end + +# Deploy the `systemd` config for `vector`: +remote_file '/etc/vector/node_exporter.toml' do + owner 'root' + group 'root' + mode '644' +end + +remote_file '/etc/systemd/system/vector-node_exporter.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'vector-node_exporter' do + action [ :enable, :start] end # Deploy `consul` config for `node_exporter`: diff --git a/cookbooks/prometheus/alertmanager_install.rb b/cookbooks/prometheus/alertmanager_install.rb index b182f28..4848e02 100644 --- a/cookbooks/prometheus/alertmanager_install.rb +++ b/cookbooks/prometheus/alertmanager_install.rb @@ -13,7 +13,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} tag = vtag.sub(/^v/, '') alertmanager_bin = "#{node['alertmanager']['prefix']}#{tag}#{node['alertmanager']['postfix']}" diff --git a/cookbooks/prometheus/alertmanager_setup.rb b/cookbooks/prometheus/alertmanager_setup.rb index 6968d62..c40ad33 100644 --- a/cookbooks/prometheus/alertmanager_setup.rb +++ b/cookbooks/prometheus/alertmanager_setup.rb @@ -8,39 +8,80 @@ end # Deploy `alertmanager` file: -remote_file '/etc/prometheus.d/alertmanager.yml' do - owner 'root' - group 'root' - mode '644' +encrypted_remote_file '/etc/prometheus.d/alertmanager.yml' do + owner 'root' + group 'root' + mode '644' - notifies :restart, 'service[supervisor]' + source 'files/etc/prometheus.d/alertmanager.yml/' + password ENV['ITAMAE_PASSWORD'] + + notifies :restart, 'service[alertmanager]' end # Deploy alert setting file: -%w(node_exporter prometheus filestat).each do |conf| +%w(node_exporter prometheus filestat services snmp).each do |conf| remote_file "/etc/prometheus.d/alerts/#{conf}.yml" do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[prometheus]' end end -# Deploy `supervisord` config: -remote_file '/etc/supervisor/conf.d/alertmanager.conf' do +# Deploy `systemd` config for `alertmanager`: +remote_file '/etc/systemd/system/alertmanager.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'alertmanager' do + action [:enable, :start] +end + +# Deploy `rsyslog` config for `alertmanager`: +remote_file '/etc/rsyslog.d/30-alertmanager.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]' end -# Restart the `supervisor`: -service 'supervisor' do +service 'rsyslog' do action :nothing end +# Deploy `logroted` config for `alertmanager`: +remote_file '/etc/logrotate.d/alertmanager' do + owner 'root' + group 'root' + mode '644' +end + +# Deploy `vector` config for `alertmanager`: +remote_file '/etc/vector/alertmanager.toml' do + owner 'root' + group 'root' + mode '644' + + notifies :restart, 'service[vector-alertmanager]' +end + +remote_file '/etc/systemd/system/vector-alertmanager.service' do + owner 'root' + group 'root' + mode '644' + + notifies :restart, 'service[vector-alertmanager]' +end + +service 'vector-alertmanager' do + action [:enable, :start] +end + # Firewall settings here: %w( 9093/tcp ).each do |p| execute "ufw allow #{p}" do diff --git a/cookbooks/prometheus/alertmanager_webhook_install.rb b/cookbooks/prometheus/alertmanager_webhook_install.rb index 0420e26..060aeb3 100644 --- a/cookbooks/prometheus/alertmanager_webhook_install.rb +++ b/cookbooks/prometheus/alertmanager_webhook_install.rb @@ -12,7 +12,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - tag = $1 if response.body =~ %r{tag\/(\d+\.\d+)} + tag = $1 if response['location'] =~ %r{tag\/(\d+\.\d+)} alertmanager_webhook_bin = "#{node['alertmanager_webhook']['prefix']}#{tag}#{node['alertmanager_webhook']['postfix']}" diff --git a/cookbooks/prometheus/alertmanager_webhook_setup.rb b/cookbooks/prometheus/alertmanager_webhook_setup.rb index 8dc8042..e820406 100644 --- a/cookbooks/prometheus/alertmanager_webhook_setup.rb +++ b/cookbooks/prometheus/alertmanager_webhook_setup.rb @@ -1,14 +1,32 @@ -# Deploy `supervisor` config for `Alert Manager Webhook Logger` -remote_file '/etc/supervisor/conf.d/alertmanager_webhook_logger.conf' do +# Deploy `systemd` config for `Alert Manager Webhook Logger` +remote_file '/etc/systemd/system/webhook.service' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[webhook]' end -# Restart the `supervisor`: -service 'supervisor' do +service 'webhook' do + action [:enable, :start] +end + +# Deploy `rsyslog` config for `Alert Manager Webhook Logger`: +remote_file '/etc/rsyslog.d/30-webhook.conf' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[rsyslog]' +end + +service 'rsyslog' do action :nothing end +# Deploy `logrotate` config for `Alert Manager Webhook Logger`: +remote_file '/etc/logrotate.d/webhook' do + owner 'root' + group 'root' + mode '0644' +end diff --git a/cookbooks/prometheus/default.rb b/cookbooks/prometheus/default.rb index 3ee9c9d..bb000c9 100644 --- a/cookbooks/prometheus/default.rb +++ b/cookbooks/prometheus/default.rb @@ -12,28 +12,3 @@ include_recipe './alertmanager_webhook_setup.rb' include_recipe './snmp_exporter_install.rb' include_recipe './snmp_exporter_setup.rb' - -# Deploy /etc/hosts file: -HOSTNAME = run_command('uname -n').stdout.chomp - -template '/etc/promtail/prometheus.yaml' do - owner 'root' - group 'root' - mode '644' - - variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint']) - - notifies :restart, 'service[promtail-prometheus]' -end - -# Deploy the `systemd` configuration: -remote_file '/lib/systemd/system/promtail-prometheus.service' do - owner 'root' - group 'root' - mode '644' -end - -# Service setting: -service 'promtail-prometheus' do - action [ :enable, :restart ] -end diff --git a/cookbooks/prometheus/files/etc/logrotate.d/alertmanager b/cookbooks/prometheus/files/etc/logrotate.d/alertmanager new file mode 100644 index 0000000..31c9dcf --- /dev/null +++ b/cookbooks/prometheus/files/etc/logrotate.d/alertmanager @@ -0,0 +1,13 @@ +/var/log/alertmanager.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus/files/etc/logrotate.d/prometheus b/cookbooks/prometheus/files/etc/logrotate.d/prometheus new file mode 100644 index 0000000..d9fd673 --- /dev/null +++ b/cookbooks/prometheus/files/etc/logrotate.d/prometheus @@ -0,0 +1,13 @@ +/var/log/prometheus.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus/files/etc/logrotate.d/snmp_exporter b/cookbooks/prometheus/files/etc/logrotate.d/snmp_exporter new file mode 100644 index 0000000..1366177 --- /dev/null +++ b/cookbooks/prometheus/files/etc/logrotate.d/snmp_exporter @@ -0,0 +1,13 @@ +/var/log/snmp_exporter.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus/files/etc/logrotate.d/webhook b/cookbooks/prometheus/files/etc/logrotate.d/webhook new file mode 100644 index 0000000..c1173b0 --- /dev/null +++ b/cookbooks/prometheus/files/etc/logrotate.d/webhook @@ -0,0 +1,13 @@ +/var/log/alertmanager-webhook.log +{ + rotate 4 + weekly + missingok + notifempty + compress + delaycompress + sharedscripts + postrotate + /usr/lib/rsyslog/rsyslog-rotate + endscript +} diff --git a/cookbooks/prometheus/files/etc/prometheus.d/alertmanager.yml b/cookbooks/prometheus/files/etc/prometheus.d/alertmanager.yml index c3e0395..b41d2e7 100644 --- a/cookbooks/prometheus/files/etc/prometheus.d/alertmanager.yml +++ b/cookbooks/prometheus/files/etc/prometheus.d/alertmanager.yml @@ -1,21 +1,13 @@ -global: - slack_api_url: 'https://hooks.slack.com/services/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' - -route: - receiver: 'test-route' - group_by: [alertname] - group_wait: 10s - group_interval: 1m - repeat_interval: 6h - -receivers: -- name: 'test-route' - slack_configs: - - channel: '#ops' - title: "{{ range .Alerts }}{{ .Annotations.summary }}\n{{ end }}" - text: "{{ range .Alerts }}{{ .Annotations.description }}\n{{ end }}" - send_resolved: true - - webhook_configs: - - send_resolved: true - url: 'http://localhost:6725' +md5:28ec9f4b96884f37cbd904fb91f5ee7d:salt:161-52-232-3-248-143-138-217:aes-256-cfb:Zl1SiauJIVlZ5Nl/QwFyZN4DzwSfaWC12a3AEioxgNPUdXHKPeRCb7u2o4Bw +JrjA7SKRxeDjMBqYyQUnDG9AZ88l1xWkXMIm2cIjWdjj+5aRYpOls7QjCXfQ +iN7d9tpw7tuqg3kMplKvEAHPGSDLK+1kBxCD37gYpQMa/VjL4Rnf9PD8Duae +te52AbngvCRk7RfIIINIv6fiECYvC1dgnGs7lzwio4vC6ssO6O1uelt5IYvz +1OQdywvSEYhh7KWSBRGwLhBAFmzDfdRcuJJD0qT6Sv0VkSKp4VP/MQsjuit6 +qESm5AAiiP9qyoRmWZgBuN3yyjAudmkNE6d+vJ3/5WXg0u8c54G+yQ0/eREe +tKfIsn9hYna87GfM5Cwtwn5Iw1DYxWFTImsGV/aM3XsCLiA6Z9pMXdJR8YWY +6pyODiTlpL8F/SRV6r+tABgJVq3Dc+C05xBoulYOx8LhHbBHo6oo9nupvHak +YTe9RKctdZ8Qpf3QtsVQmBmOPhYMuBB9yMQ3EYZUErsm4aKcXpYOjjViO0uP +DIY9CwmQbcPymFOS5nMUR85T6Qnuu0huGMVB0dIKh7vPECVIMd/0IxEkNacQ +GaHbC9Cuav5vB0gyqTdg5xHWA89dC7jbz5anqPMBmpStPajGoGoH3vXmMrSf +zylLMtkRb+EVXVSrdo9emjomJfzPxrmKk3hxfjnP4P/KxhIS8H/kjVEiDOpO +IjO7 \ No newline at end of file diff --git a/cookbooks/prometheus/files/etc/prometheus.d/alerts/services.yml b/cookbooks/prometheus/files/etc/prometheus.d/alerts/services.yml new file mode 100644 index 0000000..12bca52 --- /dev/null +++ b/cookbooks/prometheus/files/etc/prometheus.d/alerts/services.yml @@ -0,0 +1,101 @@ +groups: +- name: services + rules: + - alert: Digdag + expr: node_systemd_unit_state{name="digdag.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "Digdag is not running: {{ $labels.instance }}." + description: "Digdag is not running: {{ $labels.instance }}." + + - alert: node_exporter + expr: node_systemd_unit_state{name="node_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "node_exporter is not running: {{ $labels.instance }}." + description: "node_exporter is not running: {{ $labels.instance }}." + + - alert: vector-node_exporter + expr: node_systemd_unit_state{name="vector-node_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "vector-node_exporter is not running: {{ $labels.instance }}." + description: "vector-node_exporter is not running: {{ $labels.instance }}." + + - alert: snmp_exporter + expr: node_systemd_unit_state{name="snmp_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "snmp_exporter is not running: {{ $labels.instance }}." + description: "snmp_exporter is not running: {{ $labels.instance }}." + + - alert: vector-snmp_exporter + expr: node_systemd_unit_state{name="vector-snmp_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "vector-snmp_exporter is not running: {{ $labels.instance }}." + description: "vector-snmp_exporter is not running: {{ $labels.instance }}." + + - alert: filestat_exporter + expr: node_systemd_unit_state{name="filestat_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "filestat_exporter is not running: {{ $labels.instance }}." + description: "filestat_exporter is not running: {{ $labels.instance }}." + + - alert: vector-filestat_exporter + expr: node_systemd_unit_state{name="vector-filestat_exporter.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "vector-filestat_exporter is not running: {{ $labels.instance }}." + description: "vector-filestat_exporter is not running: {{ $labels.instance }}." + + - alert: exporter_proxy + expr: node_systemd_unit_state{name="exporter_proxy.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "exporter_proxy is not running: {{ $labels.instance }}." + description: "exporter_proxy is not running: {{ $labels.instance }}." + + - alert: prometheus + expr: node_systemd_unit_state{name="prometheus.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "prometheus is not running: {{ $labels.instance }}." + description: "prometheus is not running: {{ $labels.instance }}." + + - alert: vector-prometheus + expr: node_systemd_unit_state{name="vector-prometheus.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "vector-prometheus is not running: {{ $labels.instance }}." + description: "vector-prometheus is not running: {{ $labels.instance }}." + + - alert: vault + expr: node_systemd_unit_state{name="vault.service", state="active"} != 1 + for: 5m + labels: + severity: error + annotations: + summary: "vault is not running: {{ $labels.instance }}." + description: "vault is not running: {{ $labels.instance }}." diff --git a/cookbooks/prometheus/files/etc/rsyslog.d/30-alertmanager.conf b/cookbooks/prometheus/files/etc/rsyslog.d/30-alertmanager.conf new file mode 100644 index 0000000..097c66b --- /dev/null +++ b/cookbooks/prometheus/files/etc/rsyslog.d/30-alertmanager.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"alertmanager" /var/log/alertmanager.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus/files/etc/rsyslog.d/30-prometheus.conf b/cookbooks/prometheus/files/etc/rsyslog.d/30-prometheus.conf new file mode 100644 index 0000000..01b30bb --- /dev/null +++ b/cookbooks/prometheus/files/etc/rsyslog.d/30-prometheus.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"prometheus" /var/log/prometheus.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus/files/etc/rsyslog.d/30-snmp_exporter.conf b/cookbooks/prometheus/files/etc/rsyslog.d/30-snmp_exporter.conf new file mode 100644 index 0000000..2cba006 --- /dev/null +++ b/cookbooks/prometheus/files/etc/rsyslog.d/30-snmp_exporter.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"snmp_exporter" /var/log/snmp_exporter.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus/files/etc/rsyslog.d/30-webhook.conf b/cookbooks/prometheus/files/etc/rsyslog.d/30-webhook.conf new file mode 100644 index 0000000..ac9353e --- /dev/null +++ b/cookbooks/prometheus/files/etc/rsyslog.d/30-webhook.conf @@ -0,0 +1,7 @@ +# Log kernel generated digdag log messages to file +:syslogtag,contains,"webhook" /var/log/alertmanager-webhook-logger.log + +# Uncomment the following to stop logging anything that matches the last rule. +# Doing this will stop logging kernel generated UFW log messages to the file +# normally containing kern.* messages (eg, /var/log/kern.log) +& stop diff --git a/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager.conf b/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager.conf deleted file mode 100644 index 394808c..0000000 --- a/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:alertmanager] -command=/usr/local/bin/alertmanager --config.file /etc/prometheus.d/alertmanager.yml -stdout_logfile=/var/log/supervisor/alertmanager.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager_webhook_logger.conf b/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager_webhook_logger.conf deleted file mode 100644 index 16d99ac..0000000 --- a/cookbooks/prometheus/files/etc/supervisor/conf.d/alertmanager_webhook_logger.conf +++ /dev/null @@ -1,7 +0,0 @@ -[program:alertmanager-webhook-logger] -command=/usr/local/bin/alertmanager-webhook-logger -stdout_logfile=/var/log/supervisor/alertmanager-webhook-logger.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true diff --git a/cookbooks/prometheus/files/etc/supervisor/conf.d/prometheus.conf b/cookbooks/prometheus/files/etc/supervisor/conf.d/prometheus.conf deleted file mode 100644 index 5b8c837..0000000 --- a/cookbooks/prometheus/files/etc/supervisor/conf.d/prometheus.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:prometheus] -command=/usr/local/bin/prometheus --config.file /etc/prometheus.d/prometheus.yml --storage.tsdb.path /var/opt/prometheus/ -stdout_logfile=/var/log/supervisor/prometheus.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus/files/etc/supervisor/conf.d/snmp_exporter.conf b/cookbooks/prometheus/files/etc/supervisor/conf.d/snmp_exporter.conf deleted file mode 100644 index 38e00ac..0000000 --- a/cookbooks/prometheus/files/etc/supervisor/conf.d/snmp_exporter.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:snmp_exporter] -command=/usr/local/bin/snmp_exporter --config.file /etc/prometheus_exporters.d/snmp.yml -stdout_logfile=/var/log/supervisor/snmp_exporter.log -redirect_stderr=true -stdout_logfile_maxbytes=1MB -stdout_logfile_backups=5 -autorestart=true -stopsignal=HUP diff --git a/cookbooks/prometheus/files/etc/systemd/system/alertmanager.service b/cookbooks/prometheus/files/etc/systemd/system/alertmanager.service new file mode 100644 index 0000000..c272073 --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/alertmanager.service @@ -0,0 +1,11 @@ +[Unit] +Description=Alertmanager +Wants=network-online.target +After=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/alertmanager --config.file /etc/prometheus.d/alertmanager.yml + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/etc/systemd/system/prometheus.service b/cookbooks/prometheus/files/etc/systemd/system/prometheus.service new file mode 100644 index 0000000..8aa3070 --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/prometheus.service @@ -0,0 +1,13 @@ +[Unit] +Description=Prometheus Server +Documentation=https://prometheus.io/docs/introduction/overview/ +After=network-online.target + +[Service] +Restart=on-failure +ExecStart=/usr/local/bin/prometheus \ + --config.file=/etc/prometheus.d/prometheus.yml \ + --storage.tsdb.path=/var/opt/prometheus/ + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/etc/systemd/system/snmp_exporter.service b/cookbooks/prometheus/files/etc/systemd/system/snmp_exporter.service new file mode 100644 index 0000000..7e64c7d --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/snmp_exporter.service @@ -0,0 +1,10 @@ +[Unit] +Description=SNMP Exporter + +[Service] +User=root +Group=root +ExecStart=/usr/local/bin/snmp_exporter --config.file /etc/prometheus_exporters.d/snmp.yml + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/etc/systemd/system/vector-alertmanager.service b/cookbooks/prometheus/files/etc/systemd/system/vector-alertmanager.service new file mode 100644 index 0000000..5a0cba8 --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/vector-alertmanager.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/alertmanager.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/etc/systemd/system/vector-prometheus.service b/cookbooks/prometheus/files/etc/systemd/system/vector-prometheus.service new file mode 100644 index 0000000..52a3c29 --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/vector-prometheus.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/prometheus.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/prometheus/files/etc/systemd/system/vector-snmp_exporter.service b/cookbooks/prometheus/files/etc/systemd/system/vector-snmp_exporter.service new file mode 100644 index 0000000..96a781a --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/vector-snmp_exporter.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector SNMP Exporter +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/snmp_exporter.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/prometheus/files/etc/systemd/system/webhook.service b/cookbooks/prometheus/files/etc/systemd/system/webhook.service new file mode 100644 index 0000000..442b043 --- /dev/null +++ b/cookbooks/prometheus/files/etc/systemd/system/webhook.service @@ -0,0 +1,11 @@ +[Unit] +Description=AlertmanagerWebhook +Wants=network-online.target +After=network-online.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/alertmanager-webhook-logger + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/etc/vector/alertmanager.toml b/cookbooks/prometheus/files/etc/vector/alertmanager.toml new file mode 100644 index 0000000..249a5b1 --- /dev/null +++ b/cookbooks/prometheus/files/etc/vector/alertmanager.toml @@ -0,0 +1,63 @@ +data_dir = "/var/lib/vector/" + +[sources.alertmanager] + type = "file" + include = [ "/var/log/alertmanager.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.alertmanager_transform] + type = "remap" + inputs = ["alertmanager"] + source = ''' + . |= parse_syslog!(.message) + del(.host) + + errmsg = parse_regex(.message, r'err=(?P.+)$') ?? {} + . = merge(., errmsg) + .message = replace(.message, r'err=.+$', "") + + . |= parse_logfmt!(.message) + del(.message) + + .message = .msg + del(.msg) + + minutes = parse_json(.minutes, ) ?? {} + . = merge!(., minutes) + + .timestamp = .ts + del(.ts) + ''' + +[sinks.alertmanager_output] +type = "file" +inputs = [ "alertmanager_transform" ] +compression = "none" +path = "/tmp/alertmanager-%Y-%m-%d.log" + + [sinks.alertmanager_output.encoding] + codec = "json" + + [sinks.alertmanager_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.alertmanager_loki] +type = "loki" +inputs = [ "alertmanager_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.alertmanager_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "alertmanager" + filename = "/var/log/alertmanager.log" + + [sinks.alertmanager_loki.encoding] + codec = "json" + + [sinks.alertmanager_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/prometheus/files/etc/vector/prometheus.toml b/cookbooks/prometheus/files/etc/vector/prometheus.toml new file mode 100644 index 0000000..4a5aa46 --- /dev/null +++ b/cookbooks/prometheus/files/etc/vector/prometheus.toml @@ -0,0 +1,61 @@ +data_dir = "/var/lib/vector/" + +[sources.prometheus] + type = "file" + include = [ "/var/log/prometheus.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.prometheus_transform] + type = "remap" + inputs = ["prometheus"] + source = ''' + . |= parse_syslog!(.message) + del(.host) + + errmsg = parse_regex(.message, r'err=(?P.+)$') ?? {} + . = merge(., errmsg) + .message = replace(.message, r'err=.+$', "") + + . |= parse_logfmt!(.message) + del(.message) + + .message = .msg + del(.msg) + + .timestamp = .ts + del(.ts) + ''' + +[sinks.prometheus_output] +type = "file" +inputs = [ "prometheus_transform" ] +compression = "none" +path = "/tmp/prometheus-%Y-%m-%d.log" + + [sinks.prometheus_output.encoding] + codec = "json" + + [sinks.prometheus_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.prometheus_loki] +type = "loki" +inputs = [ "prometheus_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.prometheus_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "prometheus" + filename = "/var/log/prometheus.log" + + [sinks.prometheus_loki.encoding] + codec = "json" + + [sinks.prometheus_loki.buffer] + max_size = 268435488 + type = "disk" + diff --git a/cookbooks/prometheus/files/etc/vector/snmp_exporter.toml b/cookbooks/prometheus/files/etc/vector/snmp_exporter.toml new file mode 100644 index 0000000..35db8d1 --- /dev/null +++ b/cookbooks/prometheus/files/etc/vector/snmp_exporter.toml @@ -0,0 +1,54 @@ +data_dir = "/var/lib/vector/" + +[sources.snmp_exporter] + type = "file" + include = [ "/var/log/snmp_exporter.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.snmp_exporter_transform] + type = "remap" + inputs = ["snmp_exporter"] + source = ''' + . |= parse_syslog!(.message) + + . |= parse_key_value!(.message) + + del(.message) + del(.host) + + .message = .msg + del(.msg) + ''' + +[sinks.snmp_exporter_output] +type = "file" +inputs = [ "snmp_exporter_transform" ] +compression = "none" +path = "/tmp/snmp_exporter-%Y-%m-%d.log" + + [sinks.snmp_exporter_output.encoding] + codec = "json" + + [sinks.snmp_exporter_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.snmp_exporter_loki] +type = "loki" +inputs = [ "snmp_exporter_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.snmp_exporter_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "snmp_exporter" + filename = "/var/log/snmp_exporter.log" + + [sinks.snmp_exporter_loki.encoding] + codec = "json" + + [sinks.snmp_exporter_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/prometheus/files/lib/systemd/system/promtail-prometheus.service b/cookbooks/prometheus/files/lib/systemd/system/promtail-prometheus.service deleted file mode 100644 index a52887f..0000000 --- a/cookbooks/prometheus/files/lib/systemd/system/promtail-prometheus.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Grafana Promtail for Prometheus -Documentation=https://github.com/grafana/loki -After=network-online.target - -[Service] -User=root -Restart=always -ExecStart=/usr/local/bin/promtail --config.file=/etc/promtail/prometheus.yaml - -[Install] -WantedBy=multi-user.target diff --git a/cookbooks/prometheus/files/lib/systemd/system/promtail-snmp_exporter.service b/cookbooks/prometheus/files/lib/systemd/system/promtail-snmp_exporter.service deleted file mode 100644 index 0fbf1ab..0000000 --- a/cookbooks/prometheus/files/lib/systemd/system/promtail-snmp_exporter.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Grafana Promtail for snmp_exporter -Documentation=https://github.com/grafana/loki -After=network-online.target - -[Service] -User=root -Restart=always -ExecStart=/usr/local/bin/promtail --config.file=/etc/promtail/snmp_exporter.yaml - -[Install] -WantedBy=multi-user.target diff --git a/cookbooks/prometheus/prometheus_install.rb b/cookbooks/prometheus/prometheus_install.rb index 3891116..7cb4896 100644 --- a/cookbooks/prometheus/prometheus_install.rb +++ b/cookbooks/prometheus/prometheus_install.rb @@ -13,7 +13,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} tag = vtag.sub(/^v/, '') prometheus_bin = "#{node['prometheus']['prefix']}#{tag}#{node['prometheus']['postfix']}" diff --git a/cookbooks/prometheus/prometheus_setup.rb b/cookbooks/prometheus/prometheus_setup.rb index 975270d..5b34d52 100644 --- a/cookbooks/prometheus/prometheus_setup.rb +++ b/cookbooks/prometheus/prometheus_setup.rb @@ -1,26 +1,71 @@ +# Create User and group: +user 'prometheus' do + system_user true + shell '/sbin/nologin' +end + # Create `/etc/prometheus.d/`: -%w(/etc/prometheus.d).each do |d| +%w( /etc/prometheus.d /var/opt/prometheus ).each do |d| directory d do - owner 'root' - group 'root' - mode '0755' + owner 'prometheus' + group 'prometheus' + mode '0744' end end # Deploy `prometheus` files: remote_file '/etc/prometheus.d/prometheus.yml' do + owner 'prometheus' + group 'prometheus' + mode '644' +end + +# Deploy `systemd` configuration for `prometheus`: +remote_file '/etc/systemd/system/prometheus.service' do owner 'root' group 'root' mode '644' end -# Deploy `supervisor` configuration for `prometheus`: -remote_file '/etc/supervisor/conf.d/prometheus.conf' do +service 'prometheus' do + action [:enable, :start] +end + +# Depoy `rsyslog` configuration for `prometheus`: +remote_file '/etc/rsyslog.d/30-prometheus.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]' +end + +service 'rsyslog' do + action :nothing +end + +# Depoy `logrotate` configuration for `prometheus`: +remote_file '/etc/logrotate.d/prometheus' do + owner 'root' + group 'root' + mode '644' +end + +# Depoy `vector` configuration for `prometheus`: +remote_file '/etc/vector/prometheus.toml' do + owner 'root' + group 'root' + mode '644' +end + +remote_file '/etc/systemd/system/vector-prometheus.service' do + owner 'root' + group 'root' + mode '0644' +end + +service 'vector-prometheus' do + action [:enable, :start] end # Depoy `consul` service configuration for `prometheus`: diff --git a/cookbooks/prometheus/snmp_exporter_install.rb b/cookbooks/prometheus/snmp_exporter_install.rb index fb75001..e9876e0 100644 --- a/cookbooks/prometheus/snmp_exporter_install.rb +++ b/cookbooks/prometheus/snmp_exporter_install.rb @@ -13,7 +13,7 @@ begin Timeout.timeout(3) do response = Net::HTTP.get_response(uri) - vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)} + vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} tag = vtag.sub(/^v/, '') snmp_bin = "#{node['snmp_exporter']['prefix']}#{tag}#{node['snmp_exporter']['postfix']}" diff --git a/cookbooks/prometheus/snmp_exporter_setup.rb b/cookbooks/prometheus/snmp_exporter_setup.rb index 5b11a89..8b01796 100644 --- a/cookbooks/prometheus/snmp_exporter_setup.rb +++ b/cookbooks/prometheus/snmp_exporter_setup.rb @@ -3,13 +3,52 @@ link '/etc/prometheus_exporters.d/snmp.yml' do to "#{node['snmp_exporter']['storage']}snmp.yml" end -# Deploy `supervisord` config: -remote_file '/etc/supervisor/conf.d/snmp_exporter.conf' do +# Deploy `systemd` config: +remote_file '/etc/systemd/system/snmp_exporter.service' do + owner 'root' + group 'root' + mode '644' +end + +service 'snmp_exporter' do + action [:enable, :start] +end + +# Deploy `rsyslog` config for `snmp_exporter`: +remote_file '/etc/rsyslog.d/30-snmp_exporter.conf' do owner 'root' group 'root' mode '644' - notifies :restart, 'service[supervisor]' + notifies :restart, 'service[rsyslog]' +end + +service 'rsyslog' do + action :nothing +end + +# Deploy `logrotate` config for `snmp_exporter`: +remote_file '/etc/logrotate.d/snmp_exporter' do + owner 'root' + group 'root' + mode '644' +end + +# Deploy `vector` config for `snmp_exporter`: +remote_file '/etc/vector/snmp_exporter.toml' do + owner 'root' + group 'root' + mode '644' +end + +remote_file '/etc/systemd/system/vector-snmp_exporter.service' do + owner 'root' + group 'root' + mode '0644' +end + +service 'vector-snmp_exporter' do + action [:enable, :start] end # Deploy `consul` config: @@ -25,26 +64,3 @@ end service 'consul' do action :nothing end - -# Deploy /etc/hosts file: -template '/etc/promtail/snmp_exporter.yaml' do - owner 'root' - group 'root' - mode '644' - - variables(HOSTNAME: node[:hostname], LOKIENDPOINT: node['promtail']['lokiendpoint']) - - notifies :restart, 'service[promtail-snmp_exporter]' -end - -# Deploy the `systemd` configuration: -remote_file '/lib/systemd/system/promtail-snmp_exporter.service' do - owner 'root' - group 'root' - mode '644' -end - -# Service setting: -service 'promtail-snmp_exporter' do - action [ :enable, :restart ] -end diff --git a/cookbooks/prometheus/templates/etc/promtail/prometheus.yaml b/cookbooks/prometheus/templates/etc/promtail/prometheus.yaml deleted file mode 100644 index e48839b..0000000 --- a/cookbooks/prometheus/templates/etc/promtail/prometheus.yaml +++ /dev/null @@ -1,110 +0,0 @@ -server: - disable: true - -positions: - filename: /var/opt/promtail/promtail_prometheus_position.yaml - -clients: - - url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push - -scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost - labels: - job: prometheus - hostname: <%= @HOSTNAME %> - __path__: /var/log/supervisor/prometheus.log - - pipeline_stages: - - match: - selector: '{job="prometheus"}' - stages: - - drop: - expression: "^[^l]" - - - regex: - expression: '^level=(?P[^ ]+) ts=(?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.999Z - location: Etc/UTC - - - labels: - level: - - - output: - source: message - - - job_name: alertmanager - static_configs: - - targets: - - localhost - labels: - job: prometheus - hostname: <%= @HOSTNAME %> - __path__: /var/log/supervisor/alertmanager.log - - pipeline_stages: - - match: - selector: '{job="prometheus"}' - stages: - - drop: - expression: "^[^l]" - - - regex: - expression: '^level=(?P[^ ]+) ts=(?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.999Z - location: Etc/UTC - - - labels: - level: - - - output: - source: message - - - job_name: alertmanager-webhook-logger - static_configs: - - targets: - - localhost - labels: - job: prometheus - hostname: <%= @HOSTNAME %> - __path__: /var/log/supervisor/alertmanager-webhook-logger.log - - pipeline_stages: - - match: - selector: '{job="prometheus"}' - stages: - - drop: - expression: "^[0-9]+" - - - json: - expressions: - timestamp: timestamp - message: description - level: status - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.999999999Z - location: Etc/UTC - - - template: - source: level - template: '{{ regexReplaceAllLiteral "firing" .Value "error" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "resolved" .Value "notice" }}' - - - labels: - level: - - - output: - source: message diff --git a/cookbooks/prometheus/templates/etc/promtail/snmp_exporter.yaml b/cookbooks/prometheus/templates/etc/promtail/snmp_exporter.yaml deleted file mode 100644 index 6009a02..0000000 --- a/cookbooks/prometheus/templates/etc/promtail/snmp_exporter.yaml +++ /dev/null @@ -1,40 +0,0 @@ -server: - disable: true - -positions: - filename: /var/opt/promtail/promtail_snmp_exporter_position.yaml - -clients: - - url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push - -scrape_configs: - - job_name: snmp_exporter - static_configs: - - targets: - - localhost - labels: - job: snmp_exporter - hostname: <%= @HOSTNAME %> - __path__: /var/log/supervisor/snmp_exporter.log - - pipeline_stages: - - match: - selector: '{job="snmp_exporter"}' - stages: - - regex: - expression: '^level=(?P[^ ]+) ts=(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.000Z - location: Etc/GMT - - - template: - source: level - template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}' - - - labels: - level: - - - output: - source: message diff --git a/cookbooks/promtail/attributes.rb b/cookbooks/promtail/attributes.rb deleted file mode 100644 index 71adf26..0000000 --- a/cookbooks/promtail/attributes.rb +++ /dev/null @@ -1,14 +0,0 @@ -# ------------------------------------------- -# Specifying the default settings: -# ------------------------------------------- -node.reverse_merge!({ - 'promtail' => { - 'url' => 'https://github.com/grafana/loki/releases/download/', - 'bin' => 'promtail-linux-amd64.zip', - 'storage' => '/opt/promtail/bin/', - 'location' => '/usr/local/bin/', - 'data' => '/var/opt/promtail/', - 'lokiendpoint' => 'loki.service.consul:3100' - }, -}) - diff --git a/cookbooks/promtail/default.rb b/cookbooks/promtail/default.rb deleted file mode 100644 index 340c25a..0000000 --- a/cookbooks/promtail/default.rb +++ /dev/null @@ -1,6 +0,0 @@ -# Loading the attributes: -include_recipe './attributes.rb' - -include_recipe './install.rb' - -include_recipe './setup.rb' diff --git a/cookbooks/promtail/files/lib/systemd/system/promtail-base.service b/cookbooks/promtail/files/lib/systemd/system/promtail-base.service deleted file mode 100644 index 1766380..0000000 --- a/cookbooks/promtail/files/lib/systemd/system/promtail-base.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Grafana Promtail -Documentation=https://github.com/grafana/loki -After=network-online.target - -[Service] -User=root -Restart=always -ExecStart=/usr/local/bin/promtail --config.file=/etc/promtail/base.yaml - -[Install] -WantedBy=multi-user.target diff --git a/cookbooks/promtail/install.rb b/cookbooks/promtail/install.rb deleted file mode 100644 index 8b60269..0000000 --- a/cookbooks/promtail/install.rb +++ /dev/null @@ -1,55 +0,0 @@ -promtail_url = '' -promtail_bin = '' - -tag = '' -vtag = '' - -# Calculate the Download URL: -begin - require 'net/http' - - uri = URI.parse('https://github.com/grafana/loki/releases/latest') - - Timeout.timeout(3) do - response = Net::HTTP.get_response(uri) - - vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)} - tag = vtag.sub(/^v/, '') - - promtail_url = "#{node['promtail']['url']}/#{vtag}/#{node['promtail']['bin']}" - end -rescue - # Abort the chef client process: - raise 'Cannot connect to http://github.com.' -end - -# バージョン確認して、アップデート必要かどうか確認 -result = run_command("promtail --version 2>&1 | grep #{tag}", error: false) -if result.exit_status != 0 - # Download: - TMP = "/tmp/#{node['promtail']['bin']}" - - execute "wget #{promtail_url} -O #{TMP}" - - # Install: - directory node['promtail']['storage'] do - owner 'root' - group 'root' - mode '755' - end - - execute "unzip #{TMP} -d #{node['promtail']['storage']}" - execute "mv #{node['promtail']['storage']}promtail-linux-amd64 #{node['promtail']['storage']}promtail" - - # Change Owner and Permissions: - file "#{node['promtail']['storage']}promtail" do - owner 'root' - group 'root' - mode '755' - end - - # Create Link - link "#{node['promtail']['location']}promtail" do - to "#{node['promtail']['storage']}promtail" - end -end diff --git a/cookbooks/promtail/setup.rb b/cookbooks/promtail/setup.rb deleted file mode 100644 index cdfee71..0000000 --- a/cookbooks/promtail/setup.rb +++ /dev/null @@ -1,53 +0,0 @@ -# Deploy the configuration file: -%w( /etc/promtail /var/opt/promtail ).each do |d| - directory d do - owner 'root' - group 'root' - mode '755' - end -end - -# Deploy /etc/hosts file: -HOSTNAME = run_command('uname -n').stdout.chomp - -template '/etc/promtail/base.yaml' do - owner 'root' - group 'root' - mode '644' - - variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint']) - - notifies :restart, 'service[promtail-base]' -end - -# Deploy the `systemd` configuration: -remote_file '/lib/systemd/system/promtail-base.service' do - owner 'root' - group 'root' - mode '644' -end - -# Service setting: -service 'promtail-base' do - action [ :enable, :restart ] -end - -# Deploy the `systemd` configuration: -remote_file '/etc/rsyslog.d/30-promtail.conf' do - owner 'root' - group 'root' - mode '644' - - notifies :restart, 'service[rsyslog]' -end - -service 'rsyslog' do - action [ :nothing ] -end - -# Deploy the `logrotated` configuration: -remote_file '/etc/logrotate.d/promtail' do - owner 'root' - group 'root' - mode '644' -end diff --git a/cookbooks/promtail/templates/etc/promtail/base.yaml b/cookbooks/promtail/templates/etc/promtail/base.yaml deleted file mode 100644 index 8eb7cb8..0000000 --- a/cookbooks/promtail/templates/etc/promtail/base.yaml +++ /dev/null @@ -1,390 +0,0 @@ -server: - disable: true - -positions: - filename: /var/opt/promtail/promtail_base_position.yaml - -clients: - - url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push - -scrape_configs: - - job_name: apt - static_configs: - - targets: - - localhost - labels: - job: apt - hostname: <%= @HOSTNAME %> - level: notice - __path__: /var/log/apt/history.log - - - job_name: sudo - static_configs: - - targets: - - localhost - labels: - job: sudo - hostname: <%= @HOSTNAME %> - __path__: /var/log/auth.log - - pipeline_stages: - - match: - selector: '{job="sudo"} |~ "(CRON|sshd|session|securetty|systemd-logind|/bin/sh)"' - action: drop - - - match: - selector: '{job="sudo"} !~ "/bin/sh"' - stages: - - regex: - expression: '^(?P\w+ +[0-9]+ [0-9]+:[0-9]+:[0-9]+) [^ ]+ sudo: +(?P[^ ]+) : TTY=(?P[^ ]+) ; PWD=(?P[^ ]+) ; USER=(?P[^ ]+) ; COMMAND=(?P.+)$' - - - timestamp: - source: timestamp - format: Jan 2 15:04:05 - location: Asia/Tokyo - - - template: - source: message - template: 'USER={{ .user }} PWD={{ .pwd }} CMD={{ .cmd }}' - - - template: - source: level - template: 'notice' - - - labels: - level: - - - output: - source: message - - - job_name: sshd - static_configs: - - targets: - - localhost - labels: - job: sshd - hostname: <%= @HOSTNAME %> - level: info - __path__: /var/log/auth.log - - pipeline_stages: - - match: - selector: '{job="sshd"} |~ "(CRON|sudo|session)"' - action: drop - - - match: - selector: '{job="sshd"}' - - stages: - - regex: - expression: '^(?P\w+ +[0-9]+ [0-9]+:[0-9]+:[0-9]+) [^:]+: (?P.+)$' - - - timestamp: - source: timestamp - format: Jan 2 15:04:05 - location: Asia/Tokyo - - - output: - source: message - - - job_name: supervisord - static_configs: - - targets: - - localhost - labels: - job: supervisord - hostname: <%= @HOSTNAME %> - level: notice - __path__: /var/log/supervisor/supervisord.log - - pipeline_stages: - - match: - selector: '{job="supervisord"}' - stages: - - regex: - expression: '^(?P[0-9]+\-[0-9]+\-[0-9]+ [0-9]+:[0-9]+:[0-9]+),[0-9]+ (?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 - location: Asia/Tokyo - - - template: - source: level - template: '{{ ToLower .level }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "crit" .Value "critical" }}' - - - labels: - level: - - - output: - source: message - - - job_name: fail2ban - static_configs: - - targets: - - localhost - labels: - job: fail2ban - hostname: <%= @HOSTNAME %> - level: info - __path__: /var/log/fail2ban.log - - pipeline_stages: - - match: - selector: '{job="fail2ban"} !~ "already banned"' - stages: - - regex: - expression: '^(?P[0-9]+\-[0-9]+\-[0-9]+ [0-9]+:[0-9]+:[0-9]+),[0-9]+ [^:]+: (?P[^ ]+)[^\[]+(?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 - location: Asia/Tokyo - - - template: - source: level - template: '{{ ToLower .level }}' - - - labels: - level: - - - output: - source: message - - - match: - selector: '{job="fail2ban"} |~ "already banned"' - stages: - - regex: - expression: '^(?P[0-9]+\-[0-9]+\-[0-9]+ [0-9]+:[0-9]+:[0-9]+),[0-9]+ [^:]+: (?P[^ ]+)[^\[]+(?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 - location: Asia/Tokyo - - - output: - source: message - - - job_name: promtail - static_configs: - - targets: - - localhost - labels: - job: promtail - hostname: <%= @HOSTNAME %> - __path__: /var/log/promtail.log - - pipeline_stages: - - match: - selector: '{job="promtail"}' - stages: - - - drop: - expression: 'entry out of order' - - - regex: - expression: '^[^ ]+ +[0-9]+ [0-9]+:[0-9]+:[0-9]+ [^ ]+ promtail[^ ]+ .*ts=(?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.999999999Z - location: Etc/GMT - - - regex: - expression: '^[^ ]+ +[0-9]+ [0-9]+:[0-9]+:[0-9]+ [^ ]+ promtail[^ ]+ .*level=(?P[^\\" ]+).*$' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}' - - - labels: - level: - - - output: - source: message - - - job_name: login - journal: - json: false - max_age: 12h - path: /var/log/journal - labels: - job: login - level: notice - hostname: <%= @HOSTNAME %> - - relabel_configs: - - action: keep - regex: 'systemd-logind.service' - source_labels: - - __journal__systemd_unit - - - job_name: init - journal: - json: false - max_age: 12h - path: /var/log/journal - labels: - job: init - hostname: <%= @HOSTNAME %> - - pipeline_stages: - - match: - selector: '{job="init"} |~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service|docker|tmp-sanity|libcontainer container)"' - stages: - - template: - source: level - template: 'info' - - - labels: - level: - - - match: - selector: '{job="init"} !~ "(apt|Message of the Day|motd-news|Temporary Directories|man-db|fwupd|Firmware update daemon|systemd-tmpfiles-clean.service|Rotate log files|logrotate.service|[Pp]ackage[Kk]it|/run/dbus/system_bus_socket|[Ss]nap|lxd|Reloading|Mount unit|ext4 Metadata|e2scrub_all.service|docker|tmp-sanity|libcontainer container)"' - stages: - - template: - source: level - template: 'notice' - - - labels: - level: - - relabel_configs: - - action: keep - regex: 'init\.scope' - source_labels: - - __journal__systemd_unit - - - job_name: systemd - journal: - json: false - max_age: 12h - path: /var/log/journal - labels: - job: systemd - level: info - hostname: <%= @HOSTNAME %> - - pipeline_stages: - - match: - selector: '{job="systemd"} !~ "(temperature|nf_conntrack)"' - stages: - - drop: - expression: (CMD|UFW|session|TTY) - - - match: - selector: '{job="systemd"} |~ "nf_conntrack"' - stages: - - template: - source: level - template: 'error' - - - labels: - level: - - - match: - selector: '{job="systemd"} |~ "temperature"' - - stages: - - regex: - expression: '^(?P.+)$' - - - template: - source: level - template: '{{ .message }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral ".*normal.*" .Value "notice" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral ".*temperature.*" .Value "error" }}' - - - labels: - level: - - - output: - source: message - - relabel_configs: - - source_labels: ['__journal__systemd_unit'] - target_label: 'unit' - - - action: drop - regex: '.*(cron|supervisor|ssh|promtail|local|grafana|motd|dnsmasq|snapd|logind|init|session|loki|monit|consul).*' - source_labels: - - __journal__systemd_unit - - - job_name: consul - static_configs: - - targets: - - localhost - labels: - job: consul - hostname: <%= @HOSTNAME %> - level: info - __path__: /var/log/consul/consul-*.log - - pipeline_stages: - - match: - selector: '{job="consul"}' - stages: - - regex: - expression: '^(?P\d+-\d+-[^T]+T\d+:\d+:\d+\.\d+\+\d+) \[(?P[^\]]+)\] *(?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02T15:04:05.000-0700 - - - template: - source: level - template: '{{ ToLower .level }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}' - - - template: - source: level - template: '{{ regexReplaceAllLiteral "crit" .Value "critical" }}' - - - labels: - level: - - - output: - source: message - - - job_name: unattended-upgrades - static_configs: - - targets: - - localhost - labels: - job: unattended-upgrades - hostname: <%= @HOSTNAME %> - level: notice - __path__: /var/log/unattended-upgrades/unattended-upgrades.log - - pipeline_stages: - - match: - selector: '{job="unattended-upgrades"}' - stages: - - regex: - expression: '^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}),\d{3} (?P[^ ]+) (?P.+)$' - - - timestamp: - source: timestamp - format: 2006-01-02 15:04:05 - location: Asia/Tokyo - - - output: - source: message - diff --git a/cookbooks/supervisor/default.rb b/cookbooks/supervisor/default.rb deleted file mode 100644 index a5e440b..0000000 --- a/cookbooks/supervisor/default.rb +++ /dev/null @@ -1,5 +0,0 @@ -package 'supervisor' - -service 'supervisor' do - action [ :enable, :start] -end diff --git a/cookbooks/vector/attributes.rb b/cookbooks/vector/attributes.rb index e48f7ef..38f0084 100644 --- a/cookbooks/vector/attributes.rb +++ b/cookbooks/vector/attributes.rb @@ -19,6 +19,7 @@ node.reverse_merge!({ 'url' => 'https://github.com/vectordotdev/vector/releases/download/', 'ipaddr' => ipaddr, 'debPrefix' => 'vector-', - 'debPostfix' => '-amd64.deb' + 'debPostfix' => '-amd64.deb', + 'isSyslog' => false }, }) diff --git a/cookbooks/vector/default.rb b/cookbooks/vector/default.rb index f0eb864..8f04d2a 100644 --- a/cookbooks/vector/default.rb +++ b/cookbooks/vector/default.rb @@ -5,3 +5,6 @@ include_recipe './attributes.rb' include_recipe './install.rb' include_recipe './setup.rb' +if node['vector']['isSyslog'] + include_recipe './syslog_setup.rb' +end diff --git a/cookbooks/vector/files/etc/systemd/system/vector-apt.service b/cookbooks/vector/files/etc/systemd/system/vector-apt.service new file mode 100644 index 0000000..e09c1de --- /dev/null +++ b/cookbooks/vector/files/etc/systemd/system/vector-apt.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/apt.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/vector/files/etc/systemd/system/vector-auth.service b/cookbooks/vector/files/etc/systemd/system/vector-auth.service new file mode 100644 index 0000000..b38af42 --- /dev/null +++ b/cookbooks/vector/files/etc/systemd/system/vector-auth.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/auth.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/vector/files/etc/systemd/system/vector-consul.service b/cookbooks/vector/files/etc/systemd/system/vector-consul.service new file mode 100644 index 0000000..cc12547 --- /dev/null +++ b/cookbooks/vector/files/etc/systemd/system/vector-consul.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/consul.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/vector/files/etc/systemd/system/vector-journald.service b/cookbooks/vector/files/etc/systemd/system/vector-journald.service new file mode 100644 index 0000000..d985ecf --- /dev/null +++ b/cookbooks/vector/files/etc/systemd/system/vector-journald.service @@ -0,0 +1,16 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/journald.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target diff --git a/cookbooks/vector/files/etc/systemd/system/vector-unattended-upgrade.service b/cookbooks/vector/files/etc/systemd/system/vector-unattended-upgrade.service new file mode 100644 index 0000000..cf3f0fa --- /dev/null +++ b/cookbooks/vector/files/etc/systemd/system/vector-unattended-upgrade.service @@ -0,0 +1,17 @@ +[Unit] +Description=Vector +Documentation=https://vector.dev +After=network-online.target +Requires=network-online.target + +[Service] +ExecStart=/usr/bin/vector --config /etc/vector/unattended-upgrade.toml +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +StandardOutput=journal +StandardError=journal +SyslogIdentifier=vector + +[Install] +WantedBy=multi-user.target + diff --git a/cookbooks/vector/files/etc/vector/apt.toml b/cookbooks/vector/files/etc/vector/apt.toml new file mode 100644 index 0000000..85a96a4 --- /dev/null +++ b/cookbooks/vector/files/etc/vector/apt.toml @@ -0,0 +1,48 @@ +data_dir = "/var/lib/vector/" + +[sources.apt] + type = "file" + include = [ "/var/log/apt/history.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.apt_transform] + type = "remap" + inputs = ["apt"] + source = ''' + .hostname = .host + del(.host) + ''' + +[sinks.apt_output] +type = "file" +inputs = [ "apt_transform" ] +compression = "none" +path = "/tmp/apt-%Y-%m-%d.log" + + [sinks.apt_output.encoding] + codec = "json" + + [sinks.apt_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.apt_loki] +type = "loki" +inputs = [ "apt_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.apt_loki.labels] + level = "notice" + hostname = "{{ hostname }}" + job = "apt" + filename = "{{ file }}" + + [sinks.apt_loki.encoding] + codec = "json" + + [sinks.apt_loki.buffer] + max_size = 268435488 + type = "disk" + diff --git a/cookbooks/vector/files/etc/vector/auth.toml b/cookbooks/vector/files/etc/vector/auth.toml new file mode 100644 index 0000000..4085cfc --- /dev/null +++ b/cookbooks/vector/files/etc/vector/auth.toml @@ -0,0 +1,49 @@ +data_dir = "/var/lib/vector/" + +[sources.auth] + type = "file" + include = [ "/var/log/auth.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.auth_transform] + type = "remap" + inputs = ["auth"] + source = ''' + . |= parse_linux_authorization!(.message) + del(.host) + + .appname = downcase!(.appname) + ''' + +[sinks.auth_output] +type = "file" +inputs = [ "auth_transform" ] +compression = "none" +path = "/tmp/auth-%Y-%m-%d.log" + + [sinks.auth_output.encoding] + codec = "json" + + [sinks.auth_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.auth_loki] +type = "loki" +inputs = [ "auth_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.auth_loki.labels] + level = "notice" + hostname = "{{ hostname }}" + job = "{{ appname }}" + filename = "/var/log/auth.log" + + [sinks.auth_loki.encoding] + codec = "json" + + [sinks.auth_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/vector/files/etc/vector/consul.toml b/cookbooks/vector/files/etc/vector/consul.toml new file mode 100644 index 0000000..d59184c --- /dev/null +++ b/cookbooks/vector/files/etc/vector/consul.toml @@ -0,0 +1,62 @@ +data_dir = "/var/lib/vector/" + +[sources.consul] + type = "file" + include = [ "/var/log/consul/consul-*.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.consul_transform] + type = "remap" + inputs = ["consul"] + source = ''' + dt, err = parse_regex(.message, r'^(?P
\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}\+\d{4}) ') + . = merge(., dt) + + .timestamp = .dt + del(.dt) + + .message = replace!(.message, r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}\+\d{4} ', "") + + .hostname = .host + del(.host) + + lv, err = parse_regex(.message, r'\[(?P[^\]]+)\]') + . = merge(., lv) + + .message = replace(.message, r'\[(?P[^\]]+)\] ', "") + + .level = downcase!(.level) + ''' + +[sinks.consul_output] +type = "file" +inputs = [ "consul_transform" ] +compression = "none" +path = "/tmp/consul-%Y-%m-%d.log" + + [sinks.consul_output.encoding] + codec = "json" + + [sinks.consul_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.consul_loki] +type = "loki" +inputs = [ "consul_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.consul_loki.labels] + level = "{{ level }}" + hostname = "{{ hostname }}" + job = "consul" + filename = "{{ file }}" + + [sinks.consul_loki.encoding] + codec = "json" + + [sinks.consul_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/vector/files/etc/vector/journald.toml b/cookbooks/vector/files/etc/vector/journald.toml new file mode 100644 index 0000000..8245571 --- /dev/null +++ b/cookbooks/vector/files/etc/vector/journald.toml @@ -0,0 +1,63 @@ +data_dir = "/var/lib/vector/" + +[sources.journald] + type = "journald" + exclude_units = [ + "prometheus", + "consul", + "cron", + "snmp_exporter", + "alertmanager", + ] + + [sources.journald.exclude_matches] + "SYSLOG_IDENTIFIER" = [ "kernel", "vector", "sudo", "vault" ] + +[transforms.journald_transform] + type = "remap" + inputs = ["journald"] + source = ''' + .hostname = .host + del(.host) + + .SYSLOG_FACILITY = to_int!(.SYSLOG_FACILITY) + .SYSLOG_FACILITY = to_syslog_facility!(.SYSLOG_FACILITY) + + .PRIORITY = to_int!(.PRIORITY) + .PRIORITY = to_syslog_level!(.PRIORITY) + + .SYSLOG_IDENTIFIER = downcase!(.SYSLOG_IDENTIFIER) + + .job = replace!(._SYSTEMD_UNIT, r'(\.service|\.scope)', "") + ''' + +[sinks.journald_output] +type = "file" +inputs = [ "journald_transform" ] +compression = "none" +path = "/tmp/journald-%Y-%m-%d.log" + + [sinks.journald_output.encoding] + codec = "json" + + [sinks.journald_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.journald_loki] +type = "loki" +inputs = [ "journald_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.journald_loki.labels] + level = "{{ PRIORITY }}" + hostname = "{{ hostname }}" + job = "{{ job }}" + + [sinks.journald_loki.encoding] + codec = "json" + + [sinks.journald_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/vector/files/etc/vector/syslog.toml b/cookbooks/vector/files/etc/vector/syslog.toml index 0db363d..ddb3d79 100644 --- a/cookbooks/vector/files/etc/vector/syslog.toml +++ b/cookbooks/vector/files/etc/vector/syslog.toml @@ -9,7 +9,7 @@ data_dir = "/var/lib/vector" type = "remap" inputs = [ "syslog" ] source = """ - if contains(.severity, "err") { + if contains(.severity, "err") ?? false { .severity = "error" } @@ -22,7 +22,7 @@ data_dir = "/var/lib/vector" inputs = [ "reformat-syslog" ] condition = '.sev_filter == true && .msg_filter == true' -[sinks.docker-logs] +[sinks.send-logs] type = "loki" inputs = ["filter-syslog"] endpoint = "http://192.168.10.101:3100" @@ -35,10 +35,3 @@ data_dir = "/var/lib/vector" labels.job = "syslog" labels.hostname = "{{ host }}" -[sinks.file] - type = "file" - inputs = ["reformat-syslog"] - compression = "none" - path = "/tmp/vector-%Y-%m-%d.log" - encoding = "ndjson" - diff --git a/cookbooks/vector/files/etc/vector/unattended-upgrade.toml b/cookbooks/vector/files/etc/vector/unattended-upgrade.toml new file mode 100644 index 0000000..936d3c2 --- /dev/null +++ b/cookbooks/vector/files/etc/vector/unattended-upgrade.toml @@ -0,0 +1,50 @@ +data_dir = "/var/lib/vector/" + +[sources.unattended-upgrade] + type = "file" + include = [ "/var/log/unattended-upgrades/unattended-upgrades.log" ] + ignore_older_secs = 600 + read_from = "beginning" + +[transforms.unattended-upgrade_transform] + type = "remap" + inputs = ["unattended-upgrade"] + source = ''' + parse = parse_regex(.message, r'^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}), \d{3} (?P[^ ]+) (?P.+)$') ?? {} + . = merge(., parse) + + .hostname = .host + del(.host) + ''' + +[sinks.unattended-upgrade_output] +type = "file" +inputs = [ "unattended-upgrade_transform" ] +compression = "none" +path = "/tmp/unattended-upgrade-%Y-%m-%d.log" + + [sinks.unattended-upgrade_output.encoding] + codec = "json" + + [sinks.unattended-upgrade_output.buffer] + max_size = 268435488 + type = "disk" + +[sinks.unattended-upgrade_loki] +type = "loki" +inputs = [ "unattended-upgrade_transform" ] +endpoint = "http://loki.service.consul:3100" +compression = "snappy" + + [sinks.unattended-upgrade_loki.labels] + level = "notice" + hostname = "{{ hostname }}" + job = "unattended-upgrade" + filename = "{{ file }}" + + [sinks.unattended-upgrade_loki.encoding] + codec = "json" + + [sinks.unattended-upgrade_loki.buffer] + max_size = 268435488 + type = "disk" diff --git a/cookbooks/vector/setup.rb b/cookbooks/vector/setup.rb index af29c6b..73901b3 100644 --- a/cookbooks/vector/setup.rb +++ b/cookbooks/vector/setup.rb @@ -7,6 +7,111 @@ end end +# Deploy config for `apt`: +remote_file '/etc/vector/apt.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-apt]' +end + +remote_file '/etc/systemd/system/vector-apt.service' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-apt]' +end + +service 'vector-apt' do + action [:enable, :start] +end + +# Deploy config for mointoring `/var/log/auth.log`: +remote_file '/etc/vector/auth.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-auth]' +end + +remote_file '/etc/systemd/system/vector-auth.service' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-auth]' +end + +service 'vector-auth' do + action [:enable, :start] +end + +# Deploy config for mointoring `/var/log/consul/consul-*.log`: +remote_file '/etc/vector/consul.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-consul]' +end + +remote_file '/etc/systemd/system/vector-consul.service' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-consul]' +end + +service 'vector-consul' do + action [:enable, :start] +end + +# Deploy config for mointoring `journald`: +remote_file '/etc/vector/journald.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-journald]' +end + +remote_file '/etc/systemd/system/vector-journald.service' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-journald]' +end + +service 'vector-journald' do + action [:enable, :start] +end + +# Deploy config for mointoring `/var/log/unattended-upgrades/unattended-upgrades-dpkg.log`: +remote_file '/etc/vector/unattended-upgrade.toml' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-unattended-upgrade]' +end + +remote_file '/etc/systemd/system/vector-unattended-upgrade.service' do + owner 'root' + group 'root' + mode '0644' + + notifies :restart, 'service[vector-unattended-upgrade]' +end + +service 'vector-unattended-upgrade' do + action [:enable, :start] +end + # Stop vector default service: service 'vector' do action :disable diff --git a/nodes/example.json b/nodes/example.json index ae4dee6..3094664 100644 --- a/nodes/example.json +++ b/nodes/example.json @@ -1,5 +1,11 @@ { "recipes": [ - "./roles/base.rb" - ] + "./roles/base.rb", + "./cookbooks/nginx/default.rb" + ], + "nginx": { + "skip_lego": false, + "skip_webadm": false + }, + "is_ec2": true } diff --git a/roles/base.rb b/roles/base.rb index fc33af1..8838d79 100644 --- a/roles/base.rb +++ b/roles/base.rb @@ -1,11 +1,9 @@ include_recipe '../cookbooks/base/default.rb' include_recipe '../cookbooks/kazu634/default.rb' -include_recipe '../cookbooks/supervisor/default.rb' include_recipe '../cookbooks/vault/default.rb' include_recipe '../cookbooks/consul-template/default.rb' include_recipe '../cookbooks/consul/default.rb' include_recipe '../cookbooks/fzf/default.rb' -include_recipe '../cookbooks/promtail/default.rb' include_recipe '../cookbooks/vector/default.rb' include_recipe '../cookbooks/prometheus-exporters/default.rb' include_recipe '../cookbooks/nomad/default.rb' diff --git a/tasks/ubuntu.rake b/tasks/ubuntu.rake new file mode 100755 index 0000000..0a2cea7 --- /dev/null +++ b/tasks/ubuntu.rake @@ -0,0 +1,9 @@ +#!/usr/bin/env rake + +desc 'Invoke itamae command for the first time' +task :ubuntu do + node = `ls -1 nodes/*.json | xargs -I % basename % .json | fzf` + node.chomp! + + sh "ITAMAE_PASSWORD=musashi bundle ex itamae ssh -u ubuntu -i ~/.ssh/amazon.pem --host #{node} -j nodes/#{node}.json entrypoint.rb" +end