Merge pull request 'ubuntu-2204-support' (#4) from ubuntu-2204-support into master

Reviewed-on: #4
This commit is contained in:
Kazuhiro MUSASHI 2023-06-24 13:08:00 +00:00
commit 23e0dc8e9d
148 changed files with 2717 additions and 1903 deletions

View File

@ -66,17 +66,9 @@ include_recipe './lsyncd.rb'
# Install starship command:
include_recipe './starship.rb'
# recipes for Ubuntu 16.04
if node['platform_version'].to_f == 16.04
# ntp configurations
include_recipe './ntp.rb'
# misc recipe
include_recipe './unnecessary.rb'
end
# recipes for Ubuntu 20.04
if node['platform_version'].to_f == 20.04
# recipes for Ubuntu 20.04 and later
case node['platform_version']
when "20.04", "22.04"
remote_file '/etc/multipath.conf' do
owner 'root'
group 'root'
@ -88,6 +80,33 @@ if node['platform_version'].to_f == 20.04
service 'multipath-tools' do
action :nothing
end
package 'systemd-timesyncd'
service 'systemd-timesyncd' do
action :enable
end
end
case node['platform_version']
when "20.04"
remote_file '/etc/systemd/timesyncd.conf' do
owner 'root'
group 'root'
mode '0644'
notifies :restart, 'service[systemd-timesyncd]'
end
when "22.04"
remote_file '/etc/systemd/timesyncd.conf' do
owner 'root'
group 'root'
mode '0644'
source 'files/etc/systemd/timesyncd.2204.conf'
notifies :restart, 'service[systemd-timesyncd]'
end
end
# AWS EC2 Swap Setting:

View File

@ -129,3 +129,15 @@ Unattended-Upgrade::Automatic-Reboot "false";
// Allow package downgrade if Pin-Priority exceeds 1000
// Unattended-Upgrade::Allow-downgrade "false";
// When APT fails to mark a package to be upgraded or installed try adjusting
// candidates of related packages to help APT's resolver in finding a solution
// where the package can be upgraded or installed.
// This is a workaround until APT's resolver is fixed to always find a
// solution if it exists. (See Debian bug #711128.)
// The fallback is enabled by default, except on Debian's sid release because
// uninstallable packages are frequent there.
// Disabling the fallback speeds up unattended-upgrades when there are
// uninstallable packages at the expense of rarely keeping back packages which
// could be upgraded or installed.
// Unattended-Upgrade::Allow-APT-Mark-Fallback "true";

View File

@ -1,66 +0,0 @@
# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
driftfile /var/lib/ntp/ntp.drift
# Enable this if you want statistics to be logged.
#statsdir /var/log/ntpstats/
statistics loopstats peerstats clockstats
filegen loopstats file loopstats type day enable
filegen peerstats file peerstats type day enable
filegen clockstats file clockstats type day enable
# Specify one or more NTP servers.
# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
# more information.
pool 0.ubuntu.pool.ntp.org iburst
pool 1.ubuntu.pool.ntp.org iburst
pool 2.ubuntu.pool.ntp.org iburst
pool 3.ubuntu.pool.ntp.org iburst
# Use Ubuntu's ntp server as a fallback.
pool ntp.ubuntu.com
# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
# might also be helpful.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
# By default, exchange time with everybody, but don't allow configuration.
restrict -4 default kod notrap nomodify nopeer noquery limited
restrict -6 default kod notrap nomodify nopeer noquery limited
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1
restrict ::1
# Needed for adding pool entries
restrict source notrap nomodify noquery
# Clients from this (example!) subnet have unlimited access, but only if
# cryptographically authenticated.
#restrict 192.168.123.0 mask 255.255.255.0 notrust
# If you want to provide time to your local subnet, change the next line.
# (Again, the address is an example only.)
#broadcast 192.168.123.255
# If you want to listen to time broadcasts on your local subnet, de-comment the
# next lines. Please do this only if you trust everybody on the network!
#disable auth
#broadcastclient
#Changes recquired to use pps synchonisation as explained in documentation:
#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918
#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS
#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware
#server 127.127.22.1 # ATOM(PPS)
#fudge 127.127.22.1 flag3 1 # enable PPS API

View File

@ -0,0 +1,123 @@
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
Include /etc/ssh/sshd_config.d/*.conf
Port 10022
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
PermitRootLogin no
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
PasswordAuthentication no
#PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
KbdInteractiveAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the KbdInteractiveAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via KbdInteractiveAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and KbdInteractiveAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# PermitTTY no
# ForceCommand cvs server
#PasswordAuthentication yes

View File

@ -0,0 +1,20 @@
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Entries in this file show the compile time defaults. Local configuration
# should be created by either modifying this file, or by creating "drop-ins" in
# the timesyncd.conf.d/ subdirectory. The latter is generally recommended.
# Defaults can be restored by simply deleting this file and all drop-ins.
#
# See timesyncd.conf(5) for details.
[Time]
NTP=192.168.10.1
#FallbackNTP=ntp.ubuntu.com
#RootDistanceMaxSec=5
#PollIntervalMinSec=32
#PollIntervalMaxSec=2048

View File

@ -0,0 +1,19 @@
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Entries in this file show the compile time defaults.
# You can change settings by editing this file.
# Defaults can be restored by simply deleting this file.
#
# See timesyncd.conf(5) for details.
[Time]
NTP=192.168.10.1
#FallbackNTP=ntp.ubuntu.com
#RootDistanceMaxSec=5
#PollIntervalMinSec=32
#PollIntervalMaxSec=2048

View File

@ -1,13 +0,0 @@
package 'ntp'
remote_file '/etc/ntp.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[ntp]'
end
service 'ntp' do
action :nothing
end

View File

@ -8,7 +8,17 @@ execute 'ufw allow 10022' do
end
# Deploy the `sshd` configuration file:
case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp
case node['platform_version']
when "22.04"
remote_file '/etc/ssh/sshd_config' do
user 'root'
owner 'root'
group 'root'
mode '644'
source 'files/etc/ssh/sshd_config.2204'
end
when "20.04"
remote_file '/etc/ssh/sshd_config' do
user 'root'
@ -28,6 +38,7 @@ when "18.04"
source 'files/etc/ssh/sshd_config.1804'
end
else
remote_file '/etc/ssh/sshd_config' do
user 'root'

View File

@ -1,5 +1,5 @@
case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp
when "18.04", "20.04"
case node['platform_version']
when "18.04", "20.04", "22.04"
execute 'timedatectl set-timezone Asia/Tokyo' do
not_if 'timedatectl | grep Tokyo'
end

View File

@ -45,7 +45,7 @@ when "18.04"
not_if 'test -e /var/log/cron-apt/log'
end
when '20.04'
when '20.04', '22.04'
%w(20auto-upgrades 50unattended-upgrades).each do |conf|
remote_file "/etc/apt/apt.conf.d/#{conf}" do
owner 'root'

View File

@ -1,5 +0,0 @@
%w( apparmor iscsid lxc lxcfs lxd-containers lxd open-iscsi ).each do |s|
service s do
action :disable
end
end

View File

@ -1,90 +0,0 @@
server {
# allow access from localhost
listen 80 reuseport backlog=1024;
listen 443 ssl http2 backlog=1024;
server_name blog.kazu634.com;
ssl_certificate /etc/letsencrypt/live/blog.kazu634.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/blog.kazu634.com/privkey.pem;
ssl_dhparam /etc/letsencrypt/live/blog.kazu634.com/dhparams_4096.pem;
ssl_session_cache shared:SSL:3m;
ssl_buffer_size 4k;
ssl_session_timeout 10m;
ssl_session_tickets on;
ssl_session_ticket_key /etc/letsencrypt/live/blog.kazu634.com/ticket.key;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES128-GCM-SHA256:AES128-SHA256:AES128-SHA:AES256-GCM-SHA384:AES256-SHA256:AES256-SHA:DES-CBC3-SHA;
ssl_prefer_server_ciphers on;
ssl_stapling on;
ssl_stapling_verify on;
resolver 8.8.4.4 8.8.8.8 valid=300s;
resolver_timeout 10s;
# Enable HSTS (HTTP Strict Transport Security)
add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
root /var/www/blog;
index index.html index.htm;
access_log /var/log/nginx/blog.access.log ltsv;
error_log /var/log/nginx/blog.error.log;
location / {
gzip on;
gunzip on;
gzip_vary on;
# http2 server push:
http2_push_preload on;
http2_push /css/sanitize.css;
http2_push /css/responsive.css;
http2_push /css/highlight_monokai.css;
http2_push /css/theme.css;
http2_push /css/custom.css;
http2_push /images/profile.png;
http2_push /js/highlight.pack.js;
if (-e "/tmp/maintenance") {
return 503;
}
location /feed {
return 301 http://blog.kazu634.com/index.xml;
}
location /wp-content {
return 404;
}
location ~* \.css {
gzip_static always;
expires max;
}
location ~* \.js {
gzip_static always;
expires max;
}
location /images {
gzip_static always;
expires max;
}
location = /favicon.ico {
access_log off;
empty_gif;
expires max;
}
try_files $uri $uri/ /index.html;
}
}

View File

@ -13,7 +13,7 @@ else
end
ipaddr = run_command(cmd).stdout.chomp
cmd = 'grep nameserver /run/systemd/resolve/resolv.conf | grep -v 8.8.8.8 | grep -v 127.0.0.1 | perl -pe "s/nameserver //g" | perl -pe "s/\n/ /g"'
cmd = 'grep nameserver /run/systemd/resolve/resolv.conf | grep -v 8.8.8.8 | grep -v 127.0.0.1 | perl -pe "s/nameserver //g" | sort | uniq | perl -pe "s/\n/ /g"'
dns = run_command(cmd).stdout.chomp
node.reverse_merge!({
@ -23,6 +23,6 @@ node.reverse_merge!({
'ipaddr' => ipaddr,
'dns' => dns,
'encrypt' => 's2T3XUTb9MjHYOw8I820O5YkN2G6eJrjLjJRTnEAKoM=',
'token' => 'acb7096c-dcda-775a-b52c-b47c96b38d0e'
'token' => '63de6edb-0cb0-de95-d5f1-7facf616c26d'
}
})

View File

@ -7,7 +7,27 @@ package 'dnsmasq'
end
case run_command('grep VERSION_ID /etc/os-release | awk -F\" \'{print $2}\'').stdout.chomp
when "20.04", "22.04"
when "22.04"
template '/etc/systemd/resolved.conf' do
owner 'root'
group 'root'
mode '644'
source 'templates/etc/systemd/resolved.conf.2204.erb'
variables(dns: node['consul']['dns'])
notifies :restart, 'service[systemd-resolved]', :immediately
end
remote_file '/etc/dnsmasq.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[dnsmasq]', :immediately
end
when "20.04"
template '/etc/systemd/resolved.conf' do
owner 'root'
group 'root'

View File

@ -63,7 +63,6 @@ strict-order
# Add other name servers here, with domain specs if they are for
# non-public domains.
#server=/localnet/192.168.0.1
server=/consul/127.0.0.1#8600
# Example of routing PTR queries to nameservers: this will send all
@ -91,7 +90,7 @@ server=/consul/127.0.0.1#8600
# server=10.1.2.3@eth1
# and this sets the source (ie local) address used to talk to
# 10.1.2.3 to 192.168.1.1 port 55 (there must be a interface with that
# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that
# IP on the machine, obviously).
# server=10.1.2.3@192.168.1.1#55
@ -190,7 +189,7 @@ server=/consul/127.0.0.1#8600
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
# hosts. Use the DHCPv4 lease to derive the name, network segment and
# MAC address and assume that the host will also have an
# IPv6 address calculated using the SLAAC alogrithm.
# IPv6 address calculated using the SLAAC algorithm.
#dhcp-range=1234::, ra-names
# Do Router Advertisements, BUT NOT DHCP for this subnet.
@ -211,7 +210,7 @@ server=/consul/127.0.0.1#8600
#dhcp-range=1234::, ra-stateless, ra-names
# Do router advertisements for all subnets where we're doing DHCPv6
# Unless overriden by ra-stateless, ra-names, et al, the router
# Unless overridden by ra-stateless, ra-names, et al, the router
# advertisements will have the M and O bits set, so that the clients
# get addresses and configuration from DHCPv6, and the A bit reset, so the
# clients don't use SLAAC addresses.
@ -252,7 +251,7 @@ server=/consul/127.0.0.1#8600
# the IP address 192.168.0.60
#dhcp-host=id:01:02:02:04,192.168.0.60
# Always give the Infiniband interface with hardware address
# Always give the InfiniBand interface with hardware address
# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the
# ip address 192.168.0.61. The client id is derived from the prefix
# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of
@ -289,7 +288,7 @@ server=/consul/127.0.0.1#8600
# Give a fixed IPv6 address and name to client with
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
# Note also the they [] around the IPv6 address are obilgatory.
# Note also that the [] around the IPv6 address are obligatory.
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
# Ignore any clients which are not specified in dhcp-host lines
@ -355,11 +354,11 @@ server=/consul/127.0.0.1#8600
# Set option 58 client renewal time (T1). Defaults to half of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T1:1m
#dhcp-option=option:T1,1m
# Set option 59 rebinding time (T2). Defaults to 7/8 of the
# lease time if not specified. (RFC2132)
#dhcp-option=option:T2:2m
#dhcp-option=option:T2,2m
# Set the NTP time server address to be the same machine as
# is running dnsmasq
@ -437,22 +436,22 @@ server=/consul/127.0.0.1#8600
#dhcp-option-force=211,30i
# Set the boot filename for netboot/PXE. You will only need
# this is you want to boot machines over the network and you will need
# a TFTP server; either dnsmasq's built in TFTP server or an
# this if you want to boot machines over the network and you will need
# a TFTP server; either dnsmasq's built-in TFTP server or an
# external one. (See below for how to enable the TFTP server.)
#dhcp-boot=pxelinux.0
# The same as above, but use custom tftp-server instead machine running dnsmasq
#dhcp-boot=pxelinux,server.name,192.168.1.100
# Boot for Etherboot gPXE. The idea is to send two different
# filenames, the first loads gPXE, and the second tells gPXE what to
# load. The dhcp-match sets the gpxe tag for requests from gPXE.
#dhcp-match=set:gpxe,175 # gPXE sends a 175 option.
#dhcp-boot=tag:!gpxe,undionly.kpxe
#dhcp-boot=mybootimage
# Boot for iPXE. The idea is to send two different
# filenames, the first loads iPXE, and the second tells iPXE what to
# load. The dhcp-match sets the ipxe tag for requests from iPXE.
#dhcp-boot=undionly.kpxe
#dhcp-match=set:ipxe,175 # iPXE sends a 175 option.
#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php
# Encapsulated options for Etherboot gPXE. All the options are
# Encapsulated options for iPXE. All the options are
# encapsulated within option 175
#dhcp-option=encap:175, 1, 5b # priority code
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
@ -526,7 +525,7 @@ server=/consul/127.0.0.1#8600
# (using /etc/hosts) then that name can be specified as the
# tftp_servername (the third option to dhcp-boot) and in that
# case dnsmasq resolves this name and returns the resultant IP
# addresses in round robin fasion. This facility can be used to
# addresses in round robin fashion. This facility can be used to
# load balance the tftp load among a set of servers.
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
@ -548,6 +547,14 @@ server=/consul/127.0.0.1#8600
# http://www.isc.org/files/auth.html
#dhcp-authoritative
# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039.
# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit
# option with a DHCPACK including a Rapid Commit option and fully committed address
# and configuration information. This must only be enabled if either the server is
# the only server for the subnet, or multiple servers are present and they each
# commit a binding for all clients.
#dhcp-rapid-commit
# Run an executable when a DHCP lease is created or destroyed.
# The arguments sent to the script are "add" or "del",
# then the MAC address, the IP address and finally the hostname
@ -665,3 +672,8 @@ server=/consul/127.0.0.1#8600
# Include all files in a directory which end in .conf
#conf-dir=/etc/dnsmasq.d/,*.conf
# If a DHCP client claims that its name is "wpad", ignore that.
# This fixes a security hole. see CERT Vulnerability VU#598349
#dhcp-name-match=set:wpad-ignore,wpad
#dhcp-ignore-names=tag:wpad-ignore

View File

@ -1,9 +0,0 @@
[program:consul]
command=/usr/local/bin/consul agent -pid-file /var/run/consul.pid -config-dir=/etc/consul.d
stdout_logfile=/var/log/supervisor/consul.log
environment=GOMAXPROC="2"
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true
stopsignal=TERM

View File

@ -0,0 +1,34 @@
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Entries in this file show the compile time defaults. Local configuration
# should be created by either modifying this file, or by creating "drop-ins" in
# the resolved.conf.d/ subdirectory. The latter is generally recommended.
# Defaults can be restored by simply deleting this file and all drop-ins.
#
# Use 'systemd-analyze cat-config systemd/resolved.conf' to display the full config.
#
# See resolved.conf(5) for details.
[Resolve]
# Some examples of DNS servers which may be used for DNS= and FallbackDNS=:
# Cloudflare: 1.1.1.1#cloudflare-dns.com 1.0.0.1#cloudflare-dns.com 2606:4700:4700::1111#cloudflare-dns.com 2606:4700:4700::1001#cloudflare-dns.com
# Google: 8.8.8.8#dns.google 8.8.4.4#dns.google 2001:4860:4860::8888#dns.google 2001:4860:4860::8844#dns.google
# Quad9: 9.9.9.9#dns.quad9.net 149.112.112.112#dns.quad9.net 2620:fe::fe#dns.quad9.net 2620:fe::9#dns.quad9.net
DNS=127.0.0.1 <%= @dns %> 8.8.8.8
#FallbackDNS=
#Domains=
#DNSSEC=no
#DNSOverTLS=no
#MulticastDNS=no
#LLMNR=no
#Cache=no-negative
#CacheFromLocalhost=no
DNSStubListener=no
#DNSStubListenerExtra=
#ReadEtcHosts=yes
#ResolveUnicastSingleLabel=no

View File

@ -1,4 +1,4 @@
/var/log/promtail.log
/var/log/digdag.log
{
rotate 4
weekly

View File

@ -1,5 +1,5 @@
# Log kernel generated promtail log messages to file
:syslogtag,contains,"promtail" /var/log/promtail.log
# Log kernel generated digdag log messages to file
:syslogtag,contains,"digdag.sh" /var/log/digdag.log
# Uncomment the following to stop logging anything that matches the last rule.
# Doing this will stop logging kernel generated UFW log messages to the file

View File

@ -1,7 +0,0 @@
[program:digdag]
command=/etc/digdag/digdag.sh
stdout_logfile=/var/log/supervisor/digdag.log
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true

View File

@ -0,0 +1,57 @@
data_dir = "/var/lib/vector/"
[sources.digdag]
type = "file"
include = [ "/var/log/digdag.log" ]
ignore_older_secs = 600
read_from = "beginning"
[transforms.digdag_transform]
type = "remap"
inputs = ["digdag"]
source = '''
. |= parse_syslog!(.message)
.message = replace(.message, r'^\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2} \+\d{4} ', "")
if starts_with(.message, "[") {
l = parse_regex!(.message, r'\[(?P<level>[^\]]+)\]')
. = merge(., l)
.level = downcase(.level)
} else {
.level = "debug"
}
'''
[sinks.digdag_output]
type = "file"
inputs = [ "digdag_transform" ]
compression = "none"
path = "/tmp/digdag-%Y-%m-%d.log"
[sinks.digdag_output.encoding]
codec = "json"
[sinks.digdag_output.buffer]
max_size = 268435488
type = "disk"
[sinks.digdag_loki]
type = "loki"
inputs = [ "digdag_transform" ]
endpoint = "http://loki.service.consul:3100"
compression = "snappy"
[sinks.digdag_loki.labels]
level = "{{ level }}"
hostname = "{{ hostname }}"
job = "digdag"
filename = "/var/log/digdag.log"
[sinks.digdag_loki.encoding]
codec = "json"
[sinks.digdag_loki.buffer]
max_size = 268435488
type = "disk"

View File

@ -0,0 +1,15 @@
[Unit]
Description=digdag
Requires=network-online.target
After=network-online.target
[Service]
Type=simple
EnvironmentFile=-/etc/default/digdag
Restart=on-failure
ExecStart=/etc/digdag/digdag.sh
KillSignal=process
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
[Unit]
Description=Vector
Documentation=https://vector.dev
After=network-online.target
Requires=network-online.target
[Service]
ExecStart=/usr/bin/vector --config /etc/vector/digdag.toml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
StandardOutput=syslog
StandardError=syslog
SyslogIdentifier=vector
[Install]
WantedBy=multi-user.target

View File

@ -36,38 +36,53 @@ execute 'ufw reload-or-enable' do
action :nothing
end
# Deploy the config file for `supervisor`:
remote_file '/etc/supervisor/conf.d/digdag.conf' do
# Deploy the config file for `systemd`:
remote_file '/lib/systemd/system/digdag.service' do
owner 'root'
group 'root'
mode '644'
end
service 'digdag' do
action [ :enable, :restart ]
end
# Deploy `rsyslog` config file for `digdag`:
remote_file '/etc/rsyslog.d/30-digdag.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[rsyslog]', :immediately
end
service 'supervisor' do
action :nothing
end
# Deploy /etc/hosts file:
HOSTNAME = run_command('uname -n').stdout.chomp
template '/etc/promtail/digdag.yaml' do
# Deploy `logrotate` config for `digdag`:
remote_file '/etc/logrotate.d/digdag' do
owner 'root'
group 'root'
mode '644'
group 'root'
mode '644'
end
variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint'])
end
# Deploy the config file for `vector`:
remote_file '/etc/vector/digdag.toml' do
owner 'root'
group 'root'
mode '644'
end
# Deploy the `systemd` configuration:
remote_file '/lib/systemd/system/promtail-digdag.service' do
remote_file '/lib/systemd/system/vector-digdag.service' do
owner 'root'
group 'root'
mode '644'
end
# Service setting:
service 'promtail-digdag' do
service 'vector-digdag' do
action [ :enable, :restart ]
end
service 'rsyslog' do
action [ :nothing ]
end

View File

@ -1,61 +0,0 @@
server:
disable: true
positions:
filename: /var/opt/promtail/promtail_digdag_position.yaml
clients:
- url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push
scrape_configs:
- job_name: digdag
static_configs:
- targets:
- localhost
labels:
job: digdag
hostname: <%= @HOSTNAME %>
__path__: /var/log/supervisor/digdag.log
pipeline_stages:
- match:
selector: '{job="digdag"} !~ "^[0-9]{4}-[0-9]{2}-[0-9]{2}"'
action: drop
- match:
selector: '{job="digdag"} |~ "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} ERROR"'
action: drop
- match:
selector: '{job="digdag"} !~ "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} ERROR"'
stages:
- regex:
expression: '^(?P<datetime>\d+\-\d+\-\d+ \d+:\d+:\d+)([\.\d]+)? (?P<timezone>[\+\d]+) \[(?P<level>[^\]]+)\] (?P<message>.+)$'
- template:
source: timestamp
template: '{{ .datetime }} {{ .timezone }}'
- timestamp:
source: timestamp
format: 2006-01-02 15:04:05 -0700
- template:
source: level
template: '{{ if .level }}{{ .level }}{{ else }}notice{{ end }}'
- template:
source: level
template: '{{ ToLower .level }}'
- template:
source: level
template: '{{ regexReplaceAllLiteral "warn" .Value "warning" }}'
- labels:
level:
- output:
source: message

View File

@ -19,7 +19,7 @@ execute 'apt-get update' do
not_if 'which docker'
end
%w(docker-ce docker-ce-cli containerd.io).each do |p|
%w(docker-ce docker-ce-cli containerd.io docker-compose-plugin).each do |p|
package p
end

View File

@ -1,4 +1,10 @@
# Ignore the certificate
directory '/etc/docker/' do
owner 'root'
group 'root'
mode '0600'
end
remote_file '/etc/docker/daemon.json' do
owner 'root'
group 'root'

View File

@ -0,0 +1,9 @@
# -------------------------------------------
# Specifying the default settings:
# -------------------------------------------
node.reverse_merge!({
'everun' => {
'FQDN' => 'everun.club',
'production' => true
}
})

View File

@ -0,0 +1,5 @@
include_recipe './attributes.rb'
if node['everun']['production']
include_recipe './nginx.rb'
end

View File

@ -0,0 +1 @@
@reboot webadm cp -pr /home/webadm/works/everun/* /var/www/everun/

38
cookbooks/everun/nginx.rb Normal file
View File

@ -0,0 +1,38 @@
# Create the nginx directory:
%w( everun test-everun ).each do |d|
directory "/var/www/#{d}" do
owner 'www-data'
group 'webadm'
mode '770'
end
end
# Add the fstab entry:
file '/etc/fstab' do
action :edit
block do |content|
content << "tmpfs /var/www/everun tmpfs size=250m,noatime 0 0\n"
end
not_if 'grep /var/www/everun /etc/fstab'
notifies :run, 'execute[mount -a]'
end
execute 'mount -a' do
action :nothing
end
remote_file '/etc/cron.d/everun-blog' do
owner 'root'
group 'root'
mode '644'
end
# Create storage directory for blog data
directory '/home/webadm/works/everun' do
owner 'webadm'
group 'webadm'
mode '775'
end

View File

@ -1,18 +0,0 @@
# -------------------------------------------
# Specifying the default settings:
# -------------------------------------------
node.reverse_merge!({
'gitea' => {
'url' => 'https://github.com/go-gitea/gitea/releases/download/',
'prefix' => 'gitea-',
'postfix' => '-linux-amd64',
'storage' => '/opt/gitea/',
'location' => '/usr/local/bin/'
},
'go-mmproxy' => {
'url' => 'https://github.com/path-network/go-mmproxy/releases/',
'bin_url' => 'https://github.com/path-network/go-mmproxy/releases/download/2.0/go-mmproxy-2.0-centos8-x86_64',
'storage' => '/opt/go-mmproxy/',
'location' => '/usr/local/bin/'
},
})

View File

@ -1,10 +0,0 @@
# Loading the attributes:
include_recipe './attributes.rb'
# Install:
include_recipe './install.rb'
include_recipe './install-go-mmproxy.rb'
# Setup:
include_recipe './setup.rb'
include_recipe './setup-go-mmproxy.rb'

View File

@ -1,12 +0,0 @@
{
"service": {
"name": "go-mmproxy",
"port": 50021,
"check":{
"tcp": "localhost:50021",
"interval": "60s",
"timeout": "1s",
"success_before_passing": 3
}
}
}

View File

@ -1,78 +0,0 @@
APP_NAME = Gitea: Git with a cup of tea
RUN_USER = git
RUN_MODE = prod
[oauth2]
JWT_SECRET = Cyb3GmSaoJpkaHhA5X6wiNCK7KsngKEr6w_v37WZ1a4
[security]
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE1NjMzNzYxNzR9.Z8_xg9eBZt8fSMTQLQB2xxGUx7GB5M3_v_Tsc441LOg
INSTALL_LOCK = true
SECRET_KEY = Br3eWgKaVIvM2TiHgvRnDbeZTSvBeVxSOS2VbjsPiyZ8Egigqre4dq0ZqaIKoxlB
[database]
DB_TYPE = mysql
HOST = 192.168.10.200:3307
NAME = gitea
USER = root
PASSWD = Holiday88
SSL_MODE = disable
PATH = /var/lib/gitea/data/gitea.db
[repository]
ROOT = /var/lib/git
[server]
SSH_DOMAIN = gitea.kazu634.com
DOMAIN = gitea.kazu634.com
HTTP_PORT = 3000
ROOT_URL = https://gitea.kazu634.com/
DISABLE_SSH = false
SSH_PORT = 50022
LFS_START_SERVER = true
LFS_CONTENT_PATH = /var/lib/gitea/data/lfs
LFS_JWT_SECRET = hcxZi2iadhyYTdRtAOJXXWPckR-lK2rFHPCbA1isvV0
OFFLINE_MODE = false
[mailer]
ENABLED = false
[service]
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
DISABLE_REGISTRATION = true
ALLOW_ONLY_EXTERNAL_REGISTRATION = false
ENABLE_CAPTCHA = true
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = false
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.example.org
[picture]
DISABLE_GRAVATAR = false
ENABLE_FEDERATED_AVATAR = true
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[session]
PROVIDER = file
[log]
MODE = file
LEVEL = Info
ROOT_PATH = /var/lib/gitea/log
[other]
SHOW_FOOTER_VERSION = false
[attachment]
ENABLED = true
ALLOWED_TYPES = */*
MAX_SIZE = 1024
MAX_FILES = 25
[metrics]
ENABLED = true

View File

@ -1,26 +0,0 @@
settings {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd.status",
statusInterval = 20,
nodaemon = false
}
sync {
default.rsync,
source = "/var/lib/git/",
target = "admin@192.168.10.200:/volume1/Shared/AppData/gitea/git/",
rsync = {
archive = true,
compress = true
}
}
sync {
default.rsync,
source = "/var/lib/gitea/",
target = "admin@192.168.10.200:/volume1/Shared/AppData/gitea/gitea-data/",
rsync = {
archive = true,
compress = true
}
}

View File

@ -1,10 +0,0 @@
[program:gitea]
command=/usr/local/bin/gitea web -c /etc/gitea/app.ini
user=git
stdout_logfile=/var/log/supervisor/gitea.log
environment=GITEA_WORK_DIR="/var/lib/gitea/", HOME="/home/git", USER="git"
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true
stopsignal=TERM

View File

@ -1,17 +0,0 @@
[Unit]
Description=go-mmproxy
After=network.target
[Service]
Type=simple
LimitNOFILE=65535
ExecStartPost=/sbin/ip rule add from 127.0.0.1/8 iif lo table 123
ExecStartPost=/sbin/ip route add local 0.0.0.0/0 dev lo table 123
ExecStart=/usr/local/bin/go-mmproxy -l 0.0.0.0:50021 -4 127.0.0.1:10022 -v 2
ExecStopPost=/sbin/ip rule del from 127.0.0.1/8 iif lo table 123
ExecStopPost=/sbin/ip route del local 0.0.0.0/0 dev lo table 123
Restart=on-failure
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@ -1,12 +0,0 @@
[Unit]
Description=Grafana Promtail
Documentation=https://github.com/grafana/loki
After=network-online.target
[Service]
User=root
Restart=always
ExecStart=/usr/local/bin/promtail --config.file=/etc/promtail/gitea.yaml
[Install]
WantedBy=multi-user.target

View File

@ -1,6 +0,0 @@
# gitea public key
command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-4",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKaziOfXcs96+p5WU67S/v3RD2HvuRN9iqROba8REj3fZygCrVHnboF6I3O5fmO7FXh2Nj8iLW/aQT0LxondM2hch67g6D4sM4qcshriYYRfMHTc+w7jVE6bhzpl78kCUM/Scy/IwCXqMNwWDoji8Yt2MMIBsAoUPhP1DdseHsBpxXDtKVcaHy35SM+uEsl34yvcXiobitYtrclxI8D7AiRHQ77VoHzlv8m93WFKBYlJ4JbtaQpVPncpJzcqhs1gD0eIHCHHF8xg8VsrDyiWVBoh+4ixnr+HYUbhRRBalvDuGdgFdccDt1RIWWrlZNelRecR1LNgyvWL5x9H/4YMh9 WorkingCopy@KazuhirosiPad-24032019
# gitea public key
command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-5",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxKUdftBP05WHbz2wIbYqhgYpmvR/tcIrnVngM2vH3hvbFfS6Es2TBswqTml5+gRzyZrjaii3rJaNfQxcXEfW8lPHzp3weMDBgNrcuVby5Nix5N7EeEoPZyzPk1BvpzoIudE/zIO++ttpTIS3uMBLcqCny4M/mY8IHiLs/c1osP7nQ1QA96xBHTk3xxr9vVbVyCI68uQ79aumJbhP/nKO068HmBJ5M+4kRLNQ6US6dvd8/zbf2tyi0SqCJcLrUvF2AINlIc9T3oApftYdrcZpNeexQdb4HYkH4lwQg4oWbCMH/iDgc8KLJR21nXLZZrVkbSxcDvwcYsMeGwZrVOpuR Chef
# gitea public key
command="/usr/local/bin/gitea --config=\"/etc/gitea/app.ini\" serv key-9",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuib90/h5aKtL411mOOTe7Ja5udeSTfF9mmTVuVsw5TEbOZPEI0O+PWuCCg6DKHVtAb0POoBjg+I8V4NS2VGIAur0mUyKIY7Zadk/3Y/jdbEtS0IGMwdJOgfTEBFvNNXhx+di3cUPTOvSBlnXpIi3vMetzOmqXvb285RUTcPlmLQsdpnJGcShnWIwUXKqWwQB5QZ8MREPgdGedON4yyWsOIrCVJJjBRCwyWCbLQTNE6TKoWKauabPtNgdqFFcBmp6NYfR8Ob2qp0RVq2vi8FFxoEaFFbJUHlJIbiInVypPf3zwpXx8Gdw+Rr7Hs8YAGCjEqE8J8ZI0iXDhaE4HcrQPQ== kazu634@macbookpro.local

View File

@ -1,29 +0,0 @@
# Download:
TMP = "/tmp/go-mmproxy"
execute "wget #{node['go-mmproxy']['bin_url']} -O #{TMP}" do
not_if "test -e #{node['go-mmproxy']['storage']}/go-mmproxy"
end
# Install:
directory node['go-mmproxy']['storage'] do
owner 'root'
group 'root'
mode '755'
end
execute "mv #{TMP} #{node['go-mmproxy']['storage']}/go-mmproxy" do
not_if "test -e #{node['go-mmproxy']['storage']}/go-mmproxy"
end
# Change Owner and Permissions:
file "#{node['go-mmproxy']['storage']}/go-mmproxy" do
owner 'root'
group 'root'
mode '755'
end
# Create Link
link "#{node['go-mmproxy']['location']}/go-mmproxy" do
to "#{node['go-mmproxy']['storage']}/go-mmproxy"
end

View File

@ -1,55 +0,0 @@
gitea_url = ''
gitea_bin = ''
vtag = ''
tag = ''
# Calculate the Download URL:
begin
require 'net/http'
uri = URI.parse('https://github.com/go-gitea/gitea/releases/latest')
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)}
tag = vtag.sub(/^v/, '')
gitea_bin = "#{node['gitea']['prefix']}#{tag}#{node['gitea']['postfix']}"
gitea_url = "#{node['gitea']['url']}/#{vtag}/#{gitea_bin}"
end
rescue
# Abort the chef client process:
raise 'Cannot connect to http://github.com.'
end
# バージョン確認して、アップデート必要かどうか確認
result = run_command("gitea --version 2>&1 | grep #{tag}", error: false)
if result.exit_status != 0
# Download:
TMP = "/tmp/#{gitea_bin}"
execute "wget #{gitea_url} -O #{TMP}"
# Install:
directory node['gitea']['storage'] do
owner 'root'
group 'root'
mode '755'
end
execute "mv #{TMP} #{node['gitea']['storage']}/gitea"
# Change Owner and Permissions:
file "#{node['gitea']['storage']}/gitea" do
owner 'root'
group 'root'
mode '755'
end
# Create Link
link "#{node['gitea']['location']}/gitea" do
to "#{node['gitea']['storage']}/gitea"
end
end

View File

@ -1,43 +0,0 @@
# Deploy `supervisord` config`:
remote_file '/etc/systemd/system/go-mmproxy.service' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[go-mmproxy]'
end
service 'go-mmproxy' do
action [ :enable, :restart ]
end
# Depoy `consul` service configuration for `gitea`:
remote_file '/etc/consul.d/service-go-mmproxy.json' do
owner 'consul'
group 'consul'
mode '644'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Firewall settings here:
%w( 50021/tcp ).each do |p|
execute "ufw allow #{p}" do
user 'root'
not_if "LANG=c ufw status | grep #{p}"
notifies :run, 'execute[ufw reload-or-enable]'
end
end
execute 'ufw reload-or-enable' do
user 'root'
command 'LANG=C ufw reload | grep skipping && ufw --force enable || exit 0'
action :nothing
end

View File

@ -1,135 +0,0 @@
# Create `git` user:
user 'git' do
create_home true
home '/home/git/'
system_user true
shell '/bin/bash'
end
directory '/home/git/.ssh/' do
owner 'git'
group 'git'
mode '0700'
end
remote_file '/home/git/.ssh/authorized_keys' do
owner 'git'
group 'git'
mode '0600'
end
# Create `/etc/gitea/`:
%w(/etc/gitea).each do |d|
directory d do
owner 'root'
group 'root'
mode '0755'
end
end
%w(/var/lib/git /var/lib/gitea).each do |d|
directory d do
owner 'git'
group 'git'
mode '0755'
end
end
execute 'rsync -vrz --delete admin@192.168.10.200:/volume1/Shared/AppData/gitea/gitea-data/ /var/lib/gitea/' do
not_if 'test -e /var/lib/gitea/log'
end
execute 'rsync -vrz --delete admin@192.168.10.200:/volume1/Shared/AppData/gitea/git/ /var/lib/git/' do
not_if 'test -e /var/lib/git/kazu634/'
end
execute 'chown -R git:git /var/lib/gitea/'
execute 'chown -R git:git /var/lib/git/'
# Deploy `app.ini`:
remote_file '/etc/gitea/app.ini' do
owner 'git'
group 'git'
mode '644'
end
# Deploy `supervisord` config`:
remote_file '/etc/supervisor/conf.d/gitea.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
end
service 'supervisor' do
action :nothing
end
# Depoy `consul` service configuration for `gitea`:
remote_file '/etc/consul.d/service-gitea.json' do
owner 'consul'
group 'consul'
mode '644'
notifies :reload, 'service[consul]'
end
service 'consul' do
action :nothing
end
# Depoy `promtail` configuration for `gitea`:
template '/etc/promtail/gitea.yaml' do
owner 'root'
group 'root'
mode '644'
variables(HOSTNAME: node[:hostname], LOKIENDPOINT: node['promtail']['lokiendpoint'])
notifies :restart, 'service[promtail-gitea]'
end
# Deploy `systemd` configuration for `promtail-gitea`:
remote_file '/etc/systemd/system/promtail-gitea.service' do
owner 'root'
group 'root'
mode '644'
end
# Service setting:
service 'promtail-gitea' do
action [ :enable, :restart ]
end
# Deploy `systemd` configuration for `promtail-gitea`:
remote_file '/etc/lsyncd/lsyncd.conf.lua' do
owner 'root'
group 'root'
mode '644'
end
# Service setting:
service 'lsyncd' do
action [ :enable, :restart ]
end
# Firewall settings here:
%w( 3000/tcp ).each do |p|
execute "ufw allow #{p}" do
user 'root'
not_if "LANG=c ufw status | grep #{p}"
notifies :run, 'execute[ufw reload-or-enable]'
end
end
execute 'ufw reload-or-enable' do
user 'root'
command 'LANG=C ufw reload | grep skipping && ufw --force enable || exit 0'
action :nothing
end

View File

@ -1,61 +0,0 @@
server:
disable: true
positions:
filename: /var/opt/promtail/promtail_gitea_position.yaml
clients:
- url: http://<%= @LOKIENDPOINT %>/loki/api/v1/push
scrape_configs:
- job_name: gitea
static_configs:
- targets:
- localhost
labels:
job: gitea
hostname: <%= @HOSTNAME %>
vhost: gitea.kazu634.com
__path__: /var/log/supervisor/gitea.log
pipeline_stages:
- match:
selector: '{job="gitea"}'
stages:
- drop:
expression: "(Static|robots.txt|sitemap.xml)"
- regex:
expression: '^\[Macaron\] (?P<timestamp>[0-9]+\-[0-9]+\-[0-9]+ +[0-9]+:[0-9]+:[0-9]+): (?P<message1>[^\/]+) (?P<uri>\/[^ ]*) (?P<response>[^ ]+) (?P<message2>.+)$'
- timestamp:
source: timestamp
format: 2006-01-02 15:04:05
location: Asia/Tokyo
- template:
source: message
template: '{{ .message1 }} {{ .uri }} ({{ .message2 }})'
- template:
source: level
template: '{{ .response }}'
- template:
source: level
template: '{{ regexReplaceAllLiteral "(2[0-9]+|3[0-9]+|for)" .Value "info" }}'
- template:
source: level
template: '{{ regexReplaceAllLiteral "4[0-9]+" .Value "warning" }}'
- template:
source: level
template: '{{ regexReplaceAllLiteral "5[0-9]+" .Value "error" }}'
- labels:
level:
- output:
source: message

View File

@ -9,6 +9,9 @@
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
;instance_name = ${HOSTNAME}
# force migration will run migrations that might cause dataloss
;force_migration = false
#################################### Paths ####################################
[paths]
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
@ -64,9 +67,25 @@
;cert_file =
;cert_key =
# Unix socket gid
# Changing the gid of a file without privileges requires that the target group is in the group of the process and that the process is the file owner
# It is recommended to set the gid as http server user gid
# Not set when the value is -1
;socket_gid =
# Unix socket mode
;socket_mode =
# Unix socket path
;socket =
# CDN Url
;cdn_url =
# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections.
# `0` means there is no timeout for reading the request.
;read_timeout = 0
#################################### Database ####################################
[database]
# You can configure the database connection by specifying type, host, name, user and password
@ -84,9 +103,16 @@ password = 123qwe$%&RTY
# Example: mysql://user:secret@host:port/database
;url =
# For "postgres" only, either "disable", "require" or "verify-full"
# For "postgres", use either "disable", "require" or "verify-full"
# For "mysql", use either "true", "false", or "skip-verify".
;ssl_mode = disable
# Database drivers may support different transaction isolation levels.
# Currently, only "mysql" driver supports isolation levels.
# If the value is empty - driver's default isolation level is applied.
# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE".
;isolation_level =
;ca_cert_path =
;client_key_path =
;client_cert_path =
@ -110,6 +136,20 @@ password = 123qwe$%&RTY
# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared)
;cache_mode = private
# For "mysql" only if migrationLocking feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0.
;locking_attempt_timeout_sec = 0
# For "sqlite" only. How many times to retry query in case of database is locked failures. Default is 0 (disabled).
;query_retries = 0
# For "sqlite" only. How many times to retry transaction in case of database is locked failures. Default is 5.
;transaction_retries = 5
################################### Data sources #########################
[datasources]
# Upper limit of data sources that Grafana will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API.
;datasource_limit = 5000
#################################### Cache server #############################
[remote_cache]
# Either "redis", "memcached" or "database" default is "database"
@ -127,10 +167,13 @@ password = 123qwe$%&RTY
# This enables data proxy logging, default is false
;logging = false
# How long the data proxy waits before timing out, default is 30 seconds.
# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds.
# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set.
;timeout = 30
# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds.
;dialTimeout = 10
# How many seconds the data proxy waits before sending a keepalive probe request.
;keep_alive_seconds = 30
@ -143,6 +186,11 @@ password = 123qwe$%&RTY
# waiting for the server to approve.
;expect_continue_timeout_seconds = 1
# Optionally limits the total number of connections per host, including connections in the dialing,
# active, and idle states. On limit violation, dials will block.
# A value of zero (0) means no limit.
;max_conns_per_host = 0
# The maximum number of idle connections that Grafana will keep alive.
;max_idle_connections = 100
@ -152,6 +200,12 @@ password = 123qwe$%&RTY
# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false.
;send_user_header = false
# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests.
;response_limit = 0
# Limits the number of rows that Grafana will process from SQL data sources.
;row_limit = 1000000
#################################### Analytics ####################################
[analytics]
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
@ -160,19 +214,50 @@ password = 123qwe$%&RTY
# Change this option to false to disable reporting.
;reporting_enabled = true
# Set to false to disable all checks to https://grafana.net
# for new versions (grafana itself and plugins), check is used
# in some UI views to notify that grafana or plugin update exists
# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs
;reporting_distributor = grafana-labs
# Set to false to disable all checks to https://grafana.com
# for new versions of grafana. The check is used
# in some UI views to notify that a grafana update exists.
# This option does not cause any auto updates, nor send any information
# only a GET request to http://grafana.com to get latest versions
# only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version.
;check_for_updates = true
# Set to false to disable all checks to https://grafana.com
# for new versions of plugins. The check is used
# in some UI views to notify that a plugin update exists.
# This option does not cause any auto updates, nor send any information
# only a GET request to https://grafana.com to get the latest versions.
;check_for_plugin_updates = true
# Google Analytics universal tracking code, only enabled if you specify an id here
;google_analytics_ua_id =
# Google Analytics 4 tracking code, only enabled if you specify an id here
;google_analytics_4_id =
# When Google Analytics 4 Enhanced event measurement is enabled, we will try to avoid sending duplicate events and let Google Analytics 4 detect navigation changes, etc.
;google_analytics_4_send_manual_page_views = false
# Google Tag Manager ID, only enabled if you specify an id here
;google_tag_manager_id =
# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set
;rudderstack_write_key =
# Rudderstack data plane url, enabled only if rudderstack_write_key is also set
;rudderstack_data_plane_url =
# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set
;rudderstack_sdk_url =
# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config
;rudderstack_config_url =
# Controls if the UI contains any links to user feedback forms
;feedback_links_enabled = true
#################################### Security ####################################
[security]
# disable creation of admin user on first start of grafana
@ -184,9 +269,18 @@ password = 123qwe$%&RTY
# default admin password, can be changed before first start of grafana, or in profile settings
;admin_password = admin
# default admin email, created on startup
;admin_email = admin@localhost
# used for signing
;secret_key = SW2YcwTIb9zpOOhoPsMm
# current key provider used for envelope encryption, default to static value specified by secret_key
;encryption_provider = secretKey.v1
# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1
;available_encryption_providers =
# disable gravatar profile images
;disable_gravatar = false
@ -206,7 +300,6 @@ password = 123qwe$%&RTY
;allow_embedding = false
# Set to true if you want to enable http strict transport security (HSTS) response header.
# This is only sent when HTTPS is enabled in this configuration.
# HSTS tells browsers that the site should only be accessed using HTTPS.
;strict_transport_security = false
@ -228,12 +321,39 @@ password = 123qwe$%&RTY
# when they detect reflected cross-site scripting (XSS) attacks.
;x_xss_protection = true
# Enable adding the Content-Security-Policy header to your requests.
# CSP allows to control resources the user agent is allowed to load and helps prevent XSS attacks.
;content_security_policy = false
# Set Content Security Policy template used when adding the Content-Security-Policy header to your requests.
# $NONCE in the template includes a random nonce.
# $ROOT_PATH is server.root_url without the protocol.
;content_security_policy_template = """script-src 'self' 'unsafe-eval' 'unsafe-inline' 'strict-dynamic' $NONCE;object-src 'none';font-src 'self';style-src 'self' 'unsafe-inline' blob:;img-src * data:;base-uri 'self';connect-src 'self' grafana.com ws://$ROOT_PATH wss://$ROOT_PATH;manifest-src 'self';media-src 'none';form-action 'self';"""
# Controls if old angular plugins are supported or not. This will be disabled by default in future release
;angular_support_enabled = true
# List of additional allowed URLs to pass by the CSRF check, separated by spaces. Suggested when authentication comes from an IdP.
;csrf_trusted_origins = example.com
# List of allowed headers to be set by the user, separated by spaces. Suggested to use for if authentication lives behind reverse proxies.
;csrf_additional_headers =
[security.encryption]
# Defines the time-to-live (TTL) for decrypted data encryption keys stored in memory (cache).
# Please note that small values may cause performance issues due to a high frequency decryption operations.
;data_keys_cache_ttl = 15m
# Defines the frequency of data encryption keys cache cleanup interval.
# On every interval, decrypted data encryption keys that reached the TTL are removed from the cache.
;data_keys_cache_cleanup_interval = 1m
#################################### Snapshots ###########################
[snapshots]
# snapshot sharing options
;external_enabled = true
;external_snapshot_url = https://snapshots-origin.raintank.io
;external_snapshot_name = Publish to snapshot.raintank.io
;external_snapshot_url = https://snapshots.raintank.io
;external_snapshot_name = Publish to snapshots.raintank.io
# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for
# creating and deleting snapshots.
@ -281,6 +401,12 @@ password = 123qwe$%&RTY
# Default UI theme ("dark" or "light")
;default_theme = dark
# Default locale (supported IETF language tag, such as en-US)
;default_locale = en-US
# Path to a custom home page. Users are only redirected to this if the default home dashboard is used. It should match a frontend route and contain a leading slash.
;home_page =
# External user management, these options affect the organization users view
;external_manage_link_url =
;external_manage_link_name =
@ -295,10 +421,16 @@ password = 123qwe$%&RTY
# The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes).
;user_invite_max_lifetime_duration = 24h
# Enter a comma-separated list of users login to hide them in the Grafana UI. These users are shown to Grafana admins and themselves.
; hidden_users =
[auth]
# Login cookie name
;login_cookie_name = grafana_session
# Disable usage of Grafana build-in login solution.
;disable_login = false
# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation.
;login_maximum_inactive_lifetime_duration =
@ -311,7 +443,7 @@ password = 123qwe$%&RTY
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
;disable_login_form = false
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
# Set to true to disable the sign out link in the side menu. Useful if you use auth.proxy or auth.jwt, defaults to false
;disable_signout_menu = false
# URL to redirect the user to after sign out
@ -324,12 +456,21 @@ password = 123qwe$%&RTY
# OAuth state max age cookie duration in seconds. Defaults to 600 seconds.
;oauth_state_cookie_max_age = 600
# Skip forced assignment of OrgID 1 or 'auto_assign_org_id' for social logins
;oauth_skip_org_role_update_sync = false
# limit of api_key seconds to live before expiration
;api_key_max_seconds_to_live = -1
# Set to true to enable SigV4 authentication option for HTTP-based datasources.
;sigv4_auth_enabled = false
# Set to true to enable verbose logging of SigV4 request signing
;sigv4_verbose_logging = false
# Set to true to enable Azure authentication option for HTTP-based datasources.
;azure_auth_enabled = false
#################################### Anonymous Auth ######################
[auth.anonymous]
# enable anonymous access
@ -357,6 +498,9 @@ password = 123qwe$%&RTY
;allowed_domains =
;team_ids =
;allowed_organizations =
;role_attribute_path =
;role_attribute_strict = false
;allow_assign_grafana_admin = false
#################################### GitLab Auth #########################
[auth.gitlab]
@ -370,6 +514,9 @@ password = 123qwe$%&RTY
;api_url = https://gitlab.com/api/v4
;allowed_domains =
;allowed_groups =
;role_attribute_path =
;role_attribute_strict = false
;allow_assign_grafana_admin = false
#################################### Google Auth ##########################
[auth.google]
@ -405,6 +552,8 @@ password = 123qwe$%&RTY
;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token
;allowed_domains =
;allowed_groups =
;role_attribute_strict = false
;allow_assign_grafana_admin = false
#################################### Okta OAuth #######################
[auth.okta]
@ -420,6 +569,8 @@ password = 123qwe$%&RTY
;allowed_domains =
;allowed_groups =
;role_attribute_path =
;role_attribute_strict = false
;allow_assign_grafana_admin = false
#################################### Generic OAuth ##########################
[auth.generic_oauth]
@ -429,21 +580,30 @@ password = 123qwe$%&RTY
;client_id = some_id
;client_secret = some_secret
;scopes = user:email,read:org
;empty_scopes = false
;email_attribute_name = email:primary
;email_attribute_path =
;login_attribute_path =
;name_attribute_path =
;id_token_attribute_name =
;auth_url = https://foo.bar/login/oauth/authorize
;token_url = https://foo.bar/login/oauth/access_token
;api_url = https://foo.bar/user
;teams_url =
;allowed_domains =
;team_ids =
;allowed_organizations =
;role_attribute_path =
;role_attribute_strict = false
;groups_attribute_path =
;team_ids_attribute_path =
;tls_skip_verify_insecure = false
;tls_client_cert =
;tls_client_key =
;tls_client_ca =
;use_pkce = false
;auth_style =
;allow_assign_grafana_admin = false
#################################### Basic Auth ##########################
[auth.basic]
@ -458,20 +618,70 @@ password = 123qwe$%&RTY
;sync_ttl = 60
;whitelist = 192.168.1.1, 192.168.2.1
;headers = Email:X-User-Email, Name:X-User-Name
# Non-ASCII strings in header values are encoded using quoted-printable encoding
;headers_encoded = false
# Read the auth proxy docs for details on what the setting below enables
;enable_login_token = false
#################################### Auth JWT ##########################
[auth.jwt]
;enabled = true
;header_name = X-JWT-Assertion
;email_claim = sub
;username_claim = sub
;jwk_set_url = https://foo.bar/.well-known/jwks.json
;jwk_set_file = /path/to/jwks.json
;cache_ttl = 60m
;expect_claims = {"aud": ["foo", "bar"]}
;key_file = /path/to/key/file
;role_attribute_path =
;role_attribute_strict = false
;auto_sign_up = false
;url_login = false
;allow_assign_grafana_admin = false
#################################### Auth LDAP ##########################
[auth.ldap]
;enabled = false
;config_file = /etc/grafana/ldap.toml
;allow_sign_up = true
# prevent synchronizing ldap users organization roles
;skip_org_role_sync = false
# LDAP backround sync (Enterprise only)
# LDAP background sync (Enterprise only)
# At 1 am every day
;sync_cron = "0 0 1 * * *"
;sync_cron = "0 1 * * *"
;active_sync_enabled = true
#################################### AWS ###########################
[aws]
# Enter a comma-separated list of allowed AWS authentication providers.
# Options are: default (AWS SDK Default), keys (Access && secret key), credentials (Credentials field), ec2_iam_role (EC2 IAM Role)
; allowed_auth_providers = default,keys,credentials
# Allow AWS users to assume a role using temporary security credentials.
# If true, assume role will be enabled for all AWS authentication providers that are specified in aws_auth_providers
; assume_role_enabled = true
#################################### Azure ###############################
[azure]
# Azure cloud environment where Grafana is hosted
# Possible values are AzureCloud, AzureChinaCloud, AzureUSGovernment and AzureGermanCloud
# Default value is AzureCloud (i.e. public cloud)
;cloud = AzureCloud
# Specifies whether Grafana hosted in Azure service with Managed Identity configured (e.g. Azure Virtual Machines instance)
# If enabled, the managed identity can be used for authentication of Grafana in Azure services
# Disabled by default, needs to be explicitly enabled
;managed_identity_enabled = false
# Client ID to use for user-assigned managed identity
# Should be set for user-assigned identity and should be empty for system-assigned identity
;managed_identity_client_id =
#################################### Role-based Access Control ###########
[rbac]
;permission_cache = true
#################################### SMTP / Emailing ##########################
[smtp]
;enabled = false
@ -491,7 +701,8 @@ password = 123qwe$%&RTY
[emails]
;welcome_email_on_sign_up = false
;templates_pattern = emails/*.html
;templates_pattern = emails/*.html, emails/*.txt
;content_types = text/html
#################################### Logging ##########################
[log]
@ -550,6 +761,40 @@ password = 123qwe$%&RTY
# Syslog tag. By default, the process' argv[0] is used.
;tag =
[log.frontend]
# Should Sentry javascript agent be initialized
;enabled = false
# Defines which provider to use, default is Sentry
;provider = sentry
# Sentry DSN if you want to send events to Sentry.
;sentry_dsn =
# Custom HTTP endpoint to send events captured by the Sentry agent to. Default will log the events to stdout.
;custom_endpoint = /log
# Rate of events to be reported between 0 (none) and 1 (all), float
;sample_rate = 1.0
# Requests per second limit enforced an extended period, for Grafana backend log ingestion endpoint (/log).
;log_endpoint_requests_per_second_limit = 3
# Max requests accepted per short interval of time for Grafana backend log ingestion endpoint (/log).
;log_endpoint_burst_limit = 15
# Should error instrumentation be enabled, only affects Grafana Javascript Agent
;instrumentations_errors_enabled = true
# Should console instrumentation be enabled, only affects Grafana Javascript Agent
;instrumentations_console_enabled = false
# Should webvitals instrumentation be enabled, only affects Grafana Javascript Agent
;instrumentations_webvitals_enabled = false
# Api Key, only applies to Grafana Javascript Agent provider
;api_key = testApiKey
#################################### Usage Quotas ########################
[quota]
; enabled = false
@ -567,6 +812,9 @@ password = 123qwe$%&RTY
# limit number of api_keys per Org.
; org_api_key = 10
# limit number of alerts per Org.
;org_alert_rule = 100
# limit number of orgs a user can create.
; user_org = 10
@ -585,11 +833,75 @@ password = 123qwe$%&RTY
# global limit on number of logged in users.
; global_session = -1
# global limit of alerts
;global_alert_rule = -1
#################################### Unified Alerting ####################
[unified_alerting]
#Enable the Unified Alerting sub-system and interface. When enabled we'll migrate all of your alert rules and notification channels to the new system. New alert rules will be created and your notification channels will be converted into an Alertmanager configuration. Previous data is preserved to enable backwards compatibility but new data is removed.```
;enabled = true
# Comma-separated list of organization IDs for which to disable unified alerting. Only supported if unified alerting is enabled.
;disabled_orgs =
# Specify the frequency of polling for admin config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;admin_config_poll_interval = 60s
# Specify the frequency of polling for Alertmanager config changes.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;alertmanager_config_poll_interval = 60s
# Listen address/hostname and port to receive unified alerting messages for other Grafana instances. The port is used for both TCP and UDP. It is assumed other Grafana instances are also running on the same port. The default value is `0.0.0.0:9094`.
;ha_listen_address = "0.0.0.0:9094"
# Listen address/hostname and port to receive unified alerting messages for other Grafana instances. The port is used for both TCP and UDP. It is assumed other Grafana instances are also running on the same port. The default value is `0.0.0.0:9094`.
;ha_advertise_address = ""
# Comma-separated list of initial instances (in a format of host:port) that will form the HA cluster. Configuring this setting will enable High Availability mode for alerting.
;ha_peers = ""
# Time to wait for an instance to send a notification via the Alertmanager. In HA, each Grafana instance will
# be assigned a position (e.g. 0, 1). We then multiply this position with the timeout to indicate how long should
# each instance wait before sending the notification to take into account replication lag.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_peer_timeout = "15s"
# The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated
# across cluster more quickly at the expense of increased bandwidth usage.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_gossip_interval = "200ms"
# The interval between gossip full state syncs. Setting this interval lower (more frequent) will increase convergence speeds
# across larger clusters at the expense of increased bandwidth usage.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;ha_push_pull_interval = "60s"
# Enable or disable alerting rule execution. The alerting UI remains visible. This option has a legacy version in the `[alerting]` section that takes precedence.
;execute_alerts = true
# Alert evaluation timeout when fetching data from the datasource. This option has a legacy version in the `[alerting]` section that takes precedence.
# The timeout string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;evaluation_timeout = 30s
# Number of times we'll attempt to evaluate an alert rule before giving up on that evaluation. This option has a legacy version in the `[alerting]` section that takes precedence.
;max_attempts = 3
# Minimum interval to enforce between rule evaluations. Rules will be adjusted if they are less than this value or if they are not multiple of the scheduler interval (10s). Higher values can help with resource management as we'll schedule fewer evaluations over time. This option has a legacy version in the `[alerting]` section that takes precedence.
# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m.
;min_interval = 10s
[unified_alerting.reserved_labels]
# Comma-separated list of reserved labels added by the Grafana Alerting engine that should be disabled.
# For example: `disabled_labels=grafana_folder`
;disabled_labels =
#################################### Alerting ############################
[alerting]
# Disable alerting engine & UI features
;enabled = true
# Makes it possible to turn off alert rule execution but alerting UI is visible
# Disable legacy alerting engine & UI features
;enabled = false
# Makes it possible to turn off alert execution but alerting UI is visible
;execute_alerts = true
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
@ -602,7 +914,6 @@ password = 123qwe$%&RTY
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
;concurrent_render_limit = 5
# Default setting for alert calculation timeout. Default value is 30
;evaluation_timeout_seconds = 30
@ -623,6 +934,13 @@ password = 123qwe$%&RTY
;max_annotations_to_keep =
#################################### Annotations #########################
[annotations]
# Configures the batch size for the annotation clean-up job. This setting is used for dashboard, API, and alert annotations.
;cleanupjob_batchsize = 100
# Enforces the maximum allowed length of the tags for any newly introduced annotations. It can be between 500 and 4096 inclusive (which is the respective's column length). Default value is 500.
# Setting it to a higher value would impact performance therefore is not recommended.
;tags_length = 500
[annotations.dashboard]
# Dashboard annotations means that annotations are associated with the dashboard they are created on.
@ -650,8 +968,23 @@ password = 123qwe$%&RTY
# Enable the Explore section
;enabled = true
#################################### Help #############################
[help]
# Enable the Help section
;enabled = true
#################################### Profile #############################
[profile]
# Enable the Profile section
;enabled = true
#################################### Query History #############################
[query_history]
# Enable the Query history
;enabled = true
#################################### Internal Grafana Metrics ##########################
# Metrics available at HTTP API Url /metrics
# Metrics available at HTTP URL /metrics and /metrics/plugins/:pluginId
[metrics]
# Disable / Enable internal metrics
;enabled = true
@ -660,7 +993,7 @@ password = 123qwe$%&RTY
# Disable total stats (stat_totals_*) metrics to be generated
;disable_total_stats = false
#If both are set, basic auth will be required for the metrics endpoint.
#If both are set, basic auth will be required for the metrics endpoints.
; basic_auth_username =
; basic_auth_password =
@ -682,6 +1015,7 @@ password = 123qwe$%&RTY
;url = https://grafana.com
#################################### Distributed tracing ############
# Opentracing is deprecated use opentelemetry instead
[tracing.jaeger]
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
;address = localhost:6831
@ -705,6 +1039,23 @@ password = 123qwe$%&RTY
# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure.
;disable_shared_zipkin_spans = false
[tracing.opentelemetry]
# attributes that will always be included in when creating new spans. ex (key1:value1,key2:value2)
;custom_attributes = key1:value1,key2:value2
[tracing.opentelemetry.jaeger]
# jaeger destination (ex http://localhost:14268/api/traces)
; address = http://localhost:14268/api/traces
# Propagation specifies the text map propagation format: w3c, jaeger
; propagation = jaeger
# This is a configuration for OTLP exporter with GRPC protocol
[tracing.opentelemetry.otlp]
# otlp destination (ex localhost:4317)
; address = localhost:4317
# Propagation specifies the text map propagation format: w3c, jaeger
; propagation = w3c
#################################### External image storage ##########################
[external_image_storage]
# Used for uploading images to public servers so they can be included in slack/email messages.
@ -735,6 +1086,7 @@ password = 123qwe$%&RTY
;account_name =
;account_key =
;container_name =
;sas_token_expiration_days =
[external_image_storage.local]
# does not require any configuration
@ -745,9 +1097,16 @@ password = 123qwe$%&RTY
;server_url =
# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/.
;callback_url =
# An auth token that will be sent to and verified by the renderer. The renderer will deny any request without an auth token matching the one configured on the renderer side.
;renderer_token = -
# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server,
# which this setting can help protect against by only allowing a certain amount of concurrent requests.
;concurrent_render_request_limit = 30
# Determines the lifetime of the render key used by the image renderer to access and render Grafana.
# This setting should be expressed as a duration. Examples: 10s (seconds), 5m (minutes), 2h (hours).
# Default is 5m. This should be more than enough for most deployments.
# Change the value only if image rendering is failing and you see `Failed to get the render key from cache` in Grafana logs.
;render_key_lifetime = 5m
[panels]
# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities.
@ -756,9 +1115,35 @@ password = 123qwe$%&RTY
[plugins]
;enable_alpha = false
;app_tls_skip_verify_insecure = false
# Enter a comma-separated list of plugin identifiers to identify plugins that are allowed to be loaded even if they lack a valid signature.
# Enter a comma-separated list of plugin identifiers to identify plugins to load even if they are unsigned. Plugins with modified signatures are never loaded.
;allow_loading_unsigned_plugins =
;marketplace_url = https://grafana.com/grafana/plugins/
# Enable or disable installing / uninstalling / updating plugins directly from within Grafana.
;plugin_admin_enabled = false
;plugin_admin_external_manage_enabled = false
;plugin_catalog_url = https://grafana.com/grafana/plugins/
# Enter a comma-separated list of plugin identifiers to hide in the plugin catalog.
;plugin_catalog_hidden_plugins =
#################################### Grafana Live ##########################################
[live]
# max_connections to Grafana Live WebSocket endpoint per Grafana server instance. See Grafana Live docs
# if you are planning to make it higher than default 100 since this can require some OS and infrastructure
# tuning. 0 disables Live, -1 means unlimited connections.
;max_connections = 100
# allowed_origins is a comma-separated list of origins that can establish connection with Grafana Live.
# If not set then origin will be matched over root_url. Supports wildcard symbol "*".
;allowed_origins =
# engine defines an HA (high availability) engine to use for Grafana Live. By default no engine used - in
# this case Live features work only on a single Grafana server. Available options: "redis".
# Setting ha_engine is an EXPERIMENTAL feature.
;ha_engine =
# ha_engine_address sets a connection address for Live HA engine. Depending on engine type address format can differ.
# For now we only support Redis connection address in "host:port" format.
# This option is EXPERIMENTAL.
;ha_engine_address = "127.0.0.1:6379"
#################################### Grafana Image Renderer Plugin ##########################
[plugin.grafana-image-renderer]
@ -803,12 +1188,14 @@ password = 123qwe$%&RTY
# Mode 'reusable' will have one browser instance and will create a new incognito page on each request.
;rendering_mode =
# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
# When rendering_mode = clustered, you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser'
# and will cluster using browser instances.
# Mode 'context' will cluster using incognito pages.
;rendering_clustering_mode =
# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently..
# When rendering_mode = clustered, you can define the maximum number of browser instances/incognito pages that can execute concurrently. Default is '5'.
;rendering_clustering_max_concurrency =
# When rendering_mode = clustered, you can specify the duration a rendering request can take before it will time out. Default is `30` seconds.
;rendering_clustering_timeout =
# Limit the maximum viewport width, height and device scale factor that can be requested.
;rendering_viewport_max_width =
@ -825,8 +1212,15 @@ password = 123qwe$%&RTY
;license_path =
[feature_toggles]
# enable features, separated by spaces
;enable =
# there are currently two ways to enable feature toggles in the `grafana.ini`.
# you can either pass an array of feature you want to enable to the `enable` field or
# configure each toggle by setting the name of the toggle to true/false. Toggles set to true/false
# will take presidence over toggles in the `enable` list.
;enable = feature1,feature2
;feature1 = true
;feature2 = false
[date_formats]
# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/
@ -847,3 +1241,31 @@ password = 123qwe$%&RTY
# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc.
;default_timezone = browser
[expressions]
# Enable or disable the expressions functionality.
;enabled = true
[geomap]
# Set the JSON configuration for the default basemap
;default_baselayer_config = `{
; "type": "xyz",
; "config": {
; "attribution": "Open street map",
; "url": "https://tile.openstreetmap.org/{z}/{x}/{y}.png"
; }
;}`
# Enable or disable loading other base map layers
;enable_custom_baselayers = true
# Move an app plugin referenced by its id (including all its pages) to a specific navigation section
# Dependencies: needs the `topnav` feature to be enabled
[navigation.app_sections]
# The following will move an app plugin with the id of `my-app-id` under the `starred` section
# my-app-id = admin
# Move a specific app plugin page (referenced by its `path` field) to a specific navigation section
[navigation.app_standalone_pages]
# The following will move the page with the path "/a/my-app-id/starred-content" from `my-app-id` to the `starred` section
# /a/my-app-id/starred-content = starred

View File

@ -13,7 +13,7 @@ begin
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)}
vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)}
tag = vtag.sub(/^v/, '')
loki_bin = "#{node['loki']['zip']}"

View File

@ -89,11 +89,6 @@ remote_file '/etc/logrotate.d/loki' do
mode '644'
end
# Restart the `supervisor`:
service 'supervisor' do
action :nothing
end
# Firewall settings here:
%w( 3100/tcp ).each do |p|
execute "ufw allow #{p}" do

View File

@ -3,8 +3,8 @@
# -------------------------------------------
node.reverse_merge!({
'nginx' => {
'version' => '1.21.3',
'skip_lego' => 'false',
'skip_webadm' => 'false'
'version' => '1.25.0',
'skip_lego' => 'true',
'skip_webadm' => 'true'
}
})

View File

@ -38,7 +38,7 @@ begin
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)}
if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)}
vtag = $1
tag_version = vtag.sub('v', '')
@ -78,7 +78,7 @@ directory MODULEDIR do
end
# Build starts here:
execute "#{NGINXBUILD} -d working -v #{version} -c configure.sh -zlib -pcre -openssl -opensslversion=1.1.1d" do
execute "#{NGINXBUILD} -d working -v #{version} -c configure.sh -zlib -pcre -libressl -libresslversion 3.8.0" do
cwd WORKDIR
user USER

View File

@ -33,12 +33,14 @@ end
end
# Prerequisites for Building nginx:
if node['nginx']['skip_webadm']
if !node['nginx']['skip_webadm']
include_recipe './webadm.rb'
include_recipe '../blog/default.rb'
include_recipe '../everun/default.rb'
end
# Install Let's Encrypt:
if node['nginx']['skip_lego']
if !node['nginx']['skip_lego']
include_recipe './lego.rb'
end

View File

@ -1,84 +0,0 @@
#####################################
# LEGO Settings
#####################################
execute "#{LEGO_STORAGE}/lego_run.sh" do
user 'root'
cwd LEGO_STORAGE
not_if "test -d #{LEGO_STORAGE}/.lego"
end
encrypted_remote_file '/etc/cron.d/lego' do
owner 'root'
group 'root'
mode '644'
source 'files/etc/cron.d/lego'
password ENV['ITAMAE_PASSWORD']
end
remote_file "/etc/lego/dhparams_4096.pem" do
owner 'root'
group 'root'
mode '444'
end
execute "openssl rand 48 > /etc/lego/ticket.key"
#####################################
# Deploy nginx Settings
#####################################
# Deploy the `sudoers` file:
remote_file '/etc/sudoers.d/webadm' do
owner 'root'
group 'root'
mode '440'
end
# Create directories:
%w(/home/webadm/.ssh /home/webadm/repo).each do |d|
directory d do
owner 'webadm'
group 'webadm'
mode '700'
end
end
# Deploy `~/.ssh/.ssh/authorized_keys`:
encrypted_remote_file '/home/webadm/.ssh/authorized_keys' do
owner 'webadm'
group 'webadm'
mode '600'
source 'files/home/webadm/.ssh/authorized_keys'
password ENV['ITAMAE_PASSWORD']
end
# Deploy secret keys
%w( id_rsa.github id_rsa.chef ).each do |conf|
encrypted_remote_file "/home/webadm/.ssh/#{conf}" do
owner 'webadm'
group 'webadm'
mode '600'
source "files/home/webadm/.ssh/#{conf}"
password ENV['ITAMAE_PASSWORD']
end
end
# Create `repo` directory:
git '/home/webadm/repo/nginx-config' do
user 'webadm'
repository 'https://gitea.kazu634.com/kazu634/nginx-config.git'
end
execute '/home/webadm/repo/nginx-config/deploy.sh' do
user 'root'
cwd '/home/webadm/repo/nginx-config/'
end
service 'consul-template' do
action :restart
end
service 'nginx' do
action :restart
end

View File

@ -0,0 +1,10 @@
md5:e2c4b92cac6937e5c2e14bcb166748cf:salt:35-2-158-147-217-138-24-188:aes-256-cfb:m5WUGUv4kMl3U4EpsDCZbTmqfDQEp3CGzBk84671Dhxt0rRtETnCY2ECGD7W
+O9MMKk0jCDCUxz7EZoggsHQL40dwvcCKs5qgcFFmZYOMygfxBVJ+cqBZ//0
Zdav0tp1Qc3ejX2x3kmZBgAn4WCRVCmIZYtPYj0w4nrAohXSITJOo6MKNfsB
ASvoywRNHTYJAxT/UrYrJudR3Yq2a0gIcVgGZAYBKOUb2syMTixo245x128p
pX2QtcHjE87g9uGeUVWLkIM9m5uvBGULgdKknO03PXF0jWHxQvv/RRN+aG0H
To70zhqrlJWibKlO9PgPyVhoQSgxBG9i2f18hw2Kcnr0xSYvfC3yfkvem5C2
Zgpj+xRIfbB6tw7k/ePdguBJ5e94Y5nDtavMr58Wxgtnleyc3/k/iRgK1wpD
BUrf83ZWMt3QPwDL4J5npo+4YDCObrsvO3BD14XMUHpSpCvVdKCnMnngQdRt
7TERfhMMRCPcHbUD9gFh6HcsT+GzU6a9iwyJ03nYweWB/nXGGfwnTfrklwfJ
CuTSFnA=

View File

@ -0,0 +1,10 @@
md5:3429dd1d1b7fae6ff356c639afaeaa7c:salt:114-48-239-183-69-3-57-50:aes-256-cfb:/mKhySMGT7hiRIYO45LOqBxEmwI6wCQKvrwdK+sOJq5p5xbn7wDiYwUWnhGT
feCcW0iiVS0Qq5Wpnf01KTBaQWPditaR/CBYxCToV0EZ+7lA6HUTaX7qELGP
nPTkPn6CmTgW7I/kI9XfkeeSbT0Ti+2xo3XSpce1kftGp67aBcxM6XLSCKiS
IUMFoQIBHbUlJxJ5y6vj3uA/2v/r99y/dHymoKS695abnFPfq6rDqnJC7PKe
wEeLoObLSauqgnTF4CZUgZxaSSVUCNRjkV3WTHiu3UIEsHjiwJFBqJfWzVr9
dvgzZAFt5YUwwGHEhZjtO66/Tp8Po4SZzRRDbftCBSS8nIZQ66qYwmKHGK98
eYOFtpbQYnVMJKWd+orSDse61CcaT2tPgTZ4fjln/a4Ru4V5Kr4/HRyRmn6J
bIzBVuBVuh8T0oh36GSefSjfL7KyProS4waFlX53qwrMPHBmP873cJ2ZO1GN
HYk2QEUPP1BWEWiv9kfNu6mZPKVHIL7CEkvOAxlDWaKjgll5eNbbfzDw1hh3
Hn3RPGs=

View File

@ -1,10 +0,0 @@
md5:c198ab6d9a81886d9cdf034ced32690d:salt:60-164-230-14-22-35-114-134:aes-256-cfb:atMw5FrhPmOlYIuYK/874SqBpeIBEjWtsDIyOFJzrmWywTHKczcGsev1hngX
zKyL249Q78t+aoFmNIBxZq/SxToTPbeP5hsBs5ELn2IVHwR0/uBfJD4e0irN
cGfeOMDo8n7CLwYruoAzHeTlFxJUVzZg49h5ZdlIlWjnOsgXffdi8SRvD2v8
hBFppx5ynNaI7Cue4YBf0DtjejuWiimXZZz0GkDjHHHK5ie2/BdHUGrLLPEQ
o8ZvVUxUhxQ3Kk0flqiwlXVKZs16qB589lG4nCJ5NV0KbkAdJ1GPHK0+yBTz
D21Cz0ilals+WhrVhFXZjZyUM73auhCXJC98vffdrhDfoQZyZTUD3NGWpgnP
Jy1T4nEi5HhXKgvvBEob2M85BfBlP1x1ll4ear/c+18Uf98k08/Rsya4xYqf
W6Uq4RHeVEgK4QsLbWxENhstjpc/RIW0yds6WEdJzwayu5MSVeQ4A24HALke
Kxv5MUs81lzlMjwBWmv+AAgdn842A1OpkHPoqRUr6incxQwkqRvSDQw8R02Y
MJovgrblcRVpPo/HYsdGkqAlEv4FcCK3XRd8yiczghUlkhNokB8=

View File

@ -0,0 +1,8 @@
md5:c97addd9484611e9038f4d21490f95ef:salt:46-243-167-154-98-197-19-76:aes-256-cfb:MulsiIrRht0HvexrIXKc6q6pW9B4LnSaNB3FQyOghiAmaQKafmjvaPycv3nl
O/2FKcYHZ9g4sRysBo9t/Yttd7Q+ytGKz5MWG0w7vddvVsijaBjcqltS5Zvh
r6gTozBur13iBqsk7AYlU/wjyH62Zdgmo0rJBHp70Zqx4Bk81bDrqHbypzcK
XcM1Qg1jU1Y0bJgUyCLkpTYOjtNBug0sRYQ/Slv0/UbzgEA5WtTO7sRAEPuj
Y0qvUJVDz+0zYRinOwCOA+IGARqB5GsDtQ4YgGR9kKSmoUPPRSjIg7xSKB0S
rn1CUSjKEbmPIHeOMWSg7CXmOzzVPMTNqM6MLjGHmOyWGSDPvwRiPI5AacNu
AmOsFNY2EiWUJolrz5RpZZXjkGFmcwnxn+7ZtoWO7nD8JhaCrPpxC6C/rnav
ZGg=

View File

@ -0,0 +1,8 @@
md5:032af53422a767d4edf60d5d2f8ec84e:salt:231-40-60-67-6-253-79-25:aes-256-cfb:PiAZ+U6IHA4GvL3gDsLzeV48MvnaAaEbAqWqYLq4TrsrbRj8J2QT6ANUjZoC
IxHgZ8yn/jNmpGrqj1ZPvF3V2qGG9RomI5txRf3oEWaiM1EGoHrcgj5GSEeF
7izz9sPV+DGA/aY0VTZOSIIdogZ7yY8KGRJ5w30KTmJtvZ6zzYUFzBtzqLup
Ax3I5OzDJUuIOWr0wcE+SPAuBq4VWzfY2gTUUeepy+VMDilN2dltRAlPL+6R
t8wy4JjIuQ8y/fYVYkSVACWgL9cXWWQWgyk8yr+KJFV3ejL0UxwCGtpy54cj
kVtt1b3i/VhntaSFKMzY6BtRKrSbtd1nvuMT8gSrY9Kq6MFUNorjlAkAznkK
R4Jw6aWF8aMor3JhCp0aqc109K9pvmvkCRvCkYKH/Fs9DLGD1AsEDPFrdndi
AWs=

View File

@ -1,8 +0,0 @@
md5:c3ff40a35a072ebcdf4b00de0c62eede:salt:220-201-162-125-99-148-31-141:aes-256-cfb:P5sXyTi2l8dAegj6vwcxQlxAoXCz9ynBa/f2BATSr+ViTEQmlgqiMi6N7Zud
URbZGWBf94Wr0QqN3JMDqKX3d/ajr1C6tSoG25NL7r293PjR6icNaGklP4S+
WjNZWnEslsIfarfZZoSDw557BPo52r8nkEwSPfgdsZQiZgIUvSYAwZbVCp99
Frwyg9fc9riQ3zxOcYxygCVKZGyEKj0R+W4BBTeoMXzfzVu+kQUR+ZS1HVco
pEHAufUq4zI7P1EHFhZBM6A/E9c048Xr6ClshStsQA51qLwbnjhrBMZzQbJt
IJ9fcoTpHQq4NTD6XItiB7vFVbe6DDlQUPP4JQ0e3rxeX0Pwontjipqk2ucM
L5aN8Q+4H3JdH3x9Z2H0YlDJZ6i1XbIp2vp7ijtMlJR/pEc9ryEvBkbGH2yW
4DuvEQHOeQcb

View File

@ -96,16 +96,27 @@ http {
# Logging Settings
##
log_format ltsv "time:$time_local\thost:$remote_addr"
"\tforwardedfor:$http_x_forwarded_for\t"
"method:$request_method\tpath:$request_uri\tprotocol:$server_protocol"
"\tstatus:$status\tsize:$body_bytes_sent\treferer:$http_referer"
"\tua:$http_user_agent\ttaken_sec:$request_time"
"\tbackend:$upstream_addr\tbackend_status:$upstream_status"
"\tcache:$upstream_http_x_cache\tbackend_runtime:$upstream_response_time"
"\tvhost:$host";
log_format json escape=json
'{'
'"time":"$time_local",'
'"host":"$remote_addr",'
'"forwardedfor":"$http_x_forwarded_for",'
'"method":"$request_method",'
'"path":"$request_uri",'
'"protocol":"$server_protocol",'
'"status":"$status",'
'"size":"$body_bytes_sent",'
'"referer":"$http_referer",'
'"ua":"$http_user_agent",'
'"taken_sec":"$request_time",'
'"backend":"$upstream_addr",'
'"backend_status":"$upstream_status",'
'"cache":"$upstream_http_x_cache",'
'"backend_runtime":"$upstream_response_time",'
'"vhost":"$host"'
'}';
access_log /var/log/nginx/access.log ltsv;
access_log /var/log/nginx/access.log json;
error_log /var/log/nginx/error.log;
##

View File

@ -0,0 +1,17 @@
[Unit]
Description=Vector
Documentation=https://vector.dev
After=network-online.target
Requires=network-online.target
[Service]
ExecStart=/usr/bin/vector --config /etc/vector/nginx-access.toml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
StandardOutput=journal
StandardError=journal
SyslogIdentifier=vector
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,17 @@
[Unit]
Description=Vector
Documentation=https://vector.dev
After=network-online.target
Requires=network-online.target
[Service]
ExecStart=/usr/bin/vector --config /etc/vector/nginx-error.toml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
StandardOutput=journal
StandardError=journal
SyslogIdentifier=vector
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,65 @@
data_dir = "/var/lib/vector/"
[sources.nginx]
type = "file"
include = [ "/var/log/nginx/*access.log" ]
ignore_older_secs = 600
read_from = "beginning"
[transforms.nginx_transform]
type = "remap"
inputs = ["nginx"]
source = '''
.hostname = .host
l = parse_json!(.message)
. = merge!(., l)
del(.message)
del(.host)
.status = string!(.status)
if match(.status, r'^[23]') {
.level = "info"
} else if match(.status, r'^[4]') {
.level = "warn"
} else {
.level = "error"
}
.timestamp = parse_timestamp!(.time, format: "%d/%b/%Y:%T %z")
del(.time)
'''
[sinks.nginx_output]
type = "file"
inputs = [ "nginx_transform" ]
compression = "none"
path = "/tmp/nginx-access-%Y-%m-%d.log"
[sinks.nginx_output.encoding]
codec = "json"
[sinks.nginx_output.buffer]
max_size = 268435488
type = "disk"
[sinks.nginx_loki]
type = "loki"
inputs = [ "nginx_transform" ]
endpoint = "http://loki.service.consul:3100"
compression = "snappy"
[sinks.nginx_loki.labels]
level = "{{ level }}"
hostname = "{{ hostname }}"
job = "nginx"
vhost = "{{ vhost }}"
[sinks.nginx_loki.encoding]
codec = "json"
[sinks.nginx_loki.buffer]
max_size = 268435488
type = "disk"

View File

@ -0,0 +1,56 @@
data_dir = "/var/lib/vector/"
[sources.nginx-error]
type = "file"
include = [ "/var/log/nginx/*error.log" ]
ignore_older_secs = 600
read_from = "beginning"
[transforms.nginx-error_transform]
type = "remap"
inputs = ["nginx-error"]
source = '''
.hostname = .host
del(.host)
el, err = parse_regex(.message, r'^(?P<timestamp>[^ ]+ [^ ]+) (?P<level>[^ ]+) (?P<message>.*)$')
. = merge(., el)
tmp, err = replace(.level, "[", "")
.level = replace(tmp, "]", "")
.timestamp = parse_timestamp!(.timestamp, "%Y/%m/%d %T")
'''
[sinks.nginx-error_output]
type = "file"
inputs = [ "nginx-error_transform" ]
compression = "none"
path = "/tmp/nginx-error-%Y-%m-%d.log"
[sinks.nginx-error_output.encoding]
codec = "json"
[sinks.nginx-error_output.buffer]
max_size = 268435488
type = "disk"
[sinks.nginx-error_loki]
type = "loki"
inputs = [ "nginx-error_transform" ]
endpoint = "http://loki.service.consul:3100"
compression = "snappy"
[sinks.nginx-error_loki.labels]
level = "{{ level }}"
hostname = "{{ hostname }}"
vhost = "{{ vhost }}"
job = "nginx"
[sinks.nginx-error_loki.encoding]
codec = "json"
[sinks.nginx-error_loki.buffer]
max_size = 268435488
type = "disk"

View File

@ -8,4 +8,4 @@
--http-uwsgi-temp-path=/var/lib/nginx/uwsgi --with-debug --with-pcre-jit --with-ipv6 --with-http_ssl_module \
--with-http_v2_module --with-http_stub_status_module --with-http_realip_module --with-http_auth_request_module \
--with-http_addition_module --with-http_geoip_module --with-http_gunzip_module --with-http_gzip_static_module \
--with-http_sub_module --with-stream --with-stream_ssl_module
--with-http_sub_module --with-stream --with-stream_ssl_module --with-http_v3_module

View File

@ -24,7 +24,7 @@ begin
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)}
if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)}
vtag = $1
tag_version = vtag.sub('v', '')
@ -78,26 +78,27 @@ directory "#{LEGO_STORAGE}" do
mode '755'
end
encrypted_remote_file "#{LEGO_STORAGE}/lego_run.sh" do
owner 'root'
group 'root'
mode '500'
source "files/#{LEGO_STORAGE}/lego_run.sh"
password ENV['ITAMAE_PASSWORD']
end
%w( kazu634 everun ).each do |domain|
encrypted_remote_file "#{LEGO_STORAGE}/#{domain}_run.sh" do
owner 'root'
group 'root'
mode '500'
source "files/#{LEGO_STORAGE}/#{domain}_run.sh"
password ENV['ITAMAE_PASSWORD']
end
execute "#{LEGO_STORAGE}/lego_run.sh" do
user 'root'
cwd LEGO_STORAGE
not_if "test -d #{LEGO_STORAGE}/.lego"
end
execute "#{LEGO_STORAGE}/#{domain}_run.sh" do
user 'root'
cwd LEGO_STORAGE
end
encrypted_remote_file '/etc/cron.d/lego' do
owner 'root'
group 'root'
mode '644'
source 'files/etc/cron.d/lego'
password ENV['ITAMAE_PASSWORD']
encrypted_remote_file "/etc/cron.d/#{domain}" do
owner 'root'
group 'root'
mode '644'
source "files/etc/cron.d/#{domain}"
password ENV['ITAMAE_PASSWORD']
end
end
remote_file "/etc/lego/dhparams_4096.pem" do

View File

@ -13,7 +13,7 @@ remote_file '/lib/systemd/system/nginx.service' do
end
# Firewall Setting:
%w( 80/tcp 443/tcp ).each do |port|
%w( 80/tcp 443/tcp 443/udp ).each do |port|
execute "ufw allow #{port}" do
user 'root'
@ -35,25 +35,36 @@ service 'nginx' do
action [ :enable, :start ]
end
# Deploy `promtail` config file:
HOSTNAME = run_command('uname -n').stdout.chomp
template '/etc/promtail/nginx.yaml' do
owner 'root'
group 'root'
mode '644'
variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint'])
end
# Deploy the `systemd` configuration:
remote_file '/lib/systemd/system/promtail-nginx.service' do
# Deploy `vector` config:
remote_file '/etc/vector/nginx-access.toml' do
owner 'root'
group 'root'
mode '644'
end
# Service setting:
service 'promtail-nginx' do
action [ :enable, :restart ]
remote_file '/etc/systemd/system/vector-nginx-access.service' do
owner 'root'
group 'root'
mode '644'
end
service 'vector-nginx-access' do
action [ :enable, :start ]
end
remote_file '/etc/vector/nginx-error.toml' do
owner 'root'
group 'root'
mode '644'
end
remote_file '/etc/systemd/system/vector-nginx-error.service' do
owner 'root'
group 'root'
mode '644'
end
service 'vector-nginx-error' do
action [ :enable, :start ]
end

View File

@ -6,3 +6,58 @@ user 'webadm' do
create_home true
end
#####################################
# Deploy nginx Settings
#####################################
# Deploy the `sudoers` file:
remote_file '/etc/sudoers.d/webadm' do
owner 'root'
group 'root'
mode '440'
end
# Create directories:
%w(/home/webadm/.ssh /home/webadm/repo).each do |d|
directory d do
owner 'webadm'
group 'webadm'
mode '700'
end
end
# Deploy `~/.ssh/.ssh/authorized_keys`:
encrypted_remote_file '/home/webadm/.ssh/authorized_keys' do
owner 'webadm'
group 'webadm'
mode '600'
source 'files/home/webadm/.ssh/authorized_keys'
password ENV['ITAMAE_PASSWORD']
end
# Deploy secret keys
%w( id_rsa.github id_rsa.chef ).each do |conf|
encrypted_remote_file "/home/webadm/.ssh/#{conf}" do
owner 'webadm'
group 'webadm'
mode '600'
source "files/home/webadm/.ssh/#{conf}"
password ENV['ITAMAE_PASSWORD']
end
end
# Create `repo` directory:
git '/home/webadm/repo/nginx-config' do
user 'webadm'
repository 'https://github.com/kazu634/nginx-config.git'
end
execute '/home/webadm/repo/nginx-config/deploy.sh' do
user 'root'
cwd '/home/webadm/repo/nginx-config/'
end
service 'consul-template' do
action :restart
end

View File

@ -1,4 +1,4 @@
URL = 'https://github.com/rrreeeyyy/exporter_proxy/releases/download/v0.1.0/exporter_proxy_linux_amd64'
URL = 'https://github.com/rrreeeyyy/exporter_proxy/releases/download/v0.4.1/exporter_proxy_linux_amd64'
BIN = '/usr/local/bin/exporter_proxy'
CONFDIR = '/etc/prometheus_exporters.d/exporter_proxy/'
CONF = 'config.yml'
@ -28,16 +28,27 @@ remote_file "#{CONFDIR}#{CONF}" do
mode '644'
end
remote_file '/etc/supervisor/conf.d/exporter_proxy.conf' do
remote_file '/etc/systemd/system/exporter_proxy.service' do
user 'root'
group 'root'
mode '644'
end
service 'exporter_proxy' do
action [:enable, :start]
end
remote_file '/etc/consul.d/service-exporter_proxy.json' do
user 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[consul]'
end
service 'supervisor' do
service 'consul' do
action :nothing
end

View File

@ -1,11 +1,12 @@
{
"service": {
"name": "gitea",
"port": 3000,
"name": "exporter-proxy",
"port": 60000,
"check":{
"tcp": "localhost:3000",
"tcp": "localhost:60000",
"interval": "60s",
"timeout": "1s",
"status": "passing",
"success_before_passing": 3
}
}

View File

@ -0,0 +1 @@
OPTIONS=' --web.listen-address="127.0.0.1:9100" --collector.systemd'

View File

@ -0,0 +1,13 @@
/var/log/filestat_exporter.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -0,0 +1,13 @@
/var/log/node_exporter.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -3,13 +3,13 @@ listen: "0.0.0.0:60000"
# access_log (optional)
access_log:
path: "/dev/stdout"
path: "/var/log/exporter_proxy_access.log"
format: "ltsv"
fields: ['time', 'time_nsec', 'status', 'size', 'reqtime_nsec', 'backend', 'path', 'query', 'method']
# error_log (required)
error_log:
path: "/dev/stderr"
path: "/var/log/exporter_proxy_access.log"
# exporters: The path of exporter_proxy and the URL of the destination exporter
exporters:
@ -19,3 +19,4 @@ exporters:
filestat_exporter:
path: "/filestat_exporter/metrics"
url: "http://127.0.0.1:9943/metrics"

View File

@ -0,0 +1,7 @@
# Log kernel generated digdag log messages to file
:syslogtag,contains,"filestat_exporter" /var/log/filestat_exporter.log
# Uncomment the following to stop logging anything that matches the last rule.
# Doing this will stop logging kernel generated UFW log messages to the file
# normally containing kern.* messages (eg, /var/log/kern.log)
& stop

View File

@ -0,0 +1,7 @@
# Log kernel generated digdag log messages to file
:syslogtag,contains,"node_exporter" /var/log/node_exporter.log
# Uncomment the following to stop logging anything that matches the last rule.
# Doing this will stop logging kernel generated UFW log messages to the file
# normally containing kern.* messages (eg, /var/log/kern.log)
& stop

View File

@ -1,8 +0,0 @@
[program:exporter_proxy]
command=/usr/local/bin/exporter_proxy -config /etc/prometheus_exporters.d/exporter_proxy/config.yml
stdout_logfile=/var/log/supervisor/exporter_proxy.log
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true
stopsignal=HUP

View File

@ -1,8 +0,0 @@
[program:filestat_exporter]
command=/usr/local/bin/filestat_exporter --config.file=/etc/prometheus_exporters.d/filestat.yml
stdout_logfile=/var/log/supervisor/filestat_exporter.log
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true
stopsignal=HUP

View File

@ -1,8 +0,0 @@
[program:node_exporter]
command=/usr/local/bin/node_exporter --web.listen-address="127.0.0.1:9100"
stdout_logfile=/var/log/supervisor/node_exporter.log
redirect_stderr=true
stdout_logfile_maxbytes=1MB
stdout_logfile_backups=5
autorestart=true
stopsignal=HUP

View File

@ -0,0 +1,11 @@
[Unit]
Description=Exporter Proxy
[Service]
User=root
Group=root
ExecStart=/usr/local/bin/exporter_proxy -config /etc/prometheus_exporters.d/exporter_proxy/config.yml
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,11 @@
[Unit]
Description=Filestat Exporter
[Service]
User=root
Group=root
ExecStart=/usr/local/bin/filestat_exporter --config.file=/etc/prometheus_exporters.d/filestat.yml
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,11 @@
[Unit]
Description=Node Exporter
[Service]
User=root
Group=root
EnvironmentFile=-/etc/default/node_exporter
ExecStart=/usr/local/bin/node_exporter $OPTIONS
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
[Unit]
Description=Vector
Documentation=https://vector.dev
After=network-online.target
Requires=network-online.target
[Service]
ExecStart=/usr/bin/vector --config /etc/vector/filestat_exporter.toml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
StandardOutput=journal
StandardError=journal
SyslogIdentifier=vector
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,17 @@
[Unit]
Description=Vector
Documentation=https://vector.dev
After=network-online.target
Requires=network-online.target
[Service]
ExecStart=/usr/bin/vector --config /etc/vector/node_exporter.toml
ExecReload=/bin/kill -HUP $MAINPID
Restart=always
StandardOutput=journal
StandardError=journal
SyslogIdentifier=vector
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,55 @@
data_dir = "/var/lib/vector/"
[sources.filestat_exporter]
type = "file"
include = [ "/var/log/filestat_exporter.log" ]
ignore_older_secs = 600
read_from = "beginning"
[transforms.filestat_exporter_transform]
type = "remap"
inputs = ["filestat_exporter"]
source = '''
. |= parse_syslog!(.message)
. |= parse_key_value!(.message)
del(.message)
del(.host)
.message = .msg
del(.msg)
'''
[sinks.filestat_exporter_output]
type = "file"
inputs = [ "filestat_exporter_transform" ]
compression = "none"
path = "/tmp/filestat_exporter-%Y-%m-%d.log"
[sinks.filestat_exporter_output.encoding]
codec = "json"
[sinks.filestat_exporter_output.buffer]
max_size = 268435488
type = "disk"
[sinks.filestat_exporter_loki]
type = "loki"
inputs = [ "filestat_exporter_transform" ]
endpoint = "http://loki.service.consul:3100"
compression = "snappy"
[sinks.filestat_exporter_loki.labels]
level = "{{ level }}"
hostname = "{{ hostname }}"
job = "filestat_exporter"
filename = "/var/log/filestat_exporter.log"
[sinks.filestat_exporter_loki.encoding]
codec = "json"
[sinks.filestat_exporter_loki.buffer]
max_size = 268435488
type = "disk"

View File

@ -0,0 +1,54 @@
data_dir = "/var/lib/vector/"
[sources.node_exporter]
type = "file"
include = [ "/var/log/node_exporter.log" ]
ignore_older_secs = 600
read_from = "beginning"
[transforms.node_exporter_transform]
type = "remap"
inputs = ["node_exporter"]
source = '''
. |= parse_syslog!(.message)
. |= parse_key_value!(.message)
del(.message)
del(.host)
.message = .msg
del(.msg)
'''
[sinks.node_exporter_output]
type = "file"
inputs = [ "node_exporter_transform" ]
compression = "none"
path = "/tmp/node_exporter-%Y-%m-%d.log"
[sinks.node_exporter_output.encoding]
codec = "json"
[sinks.node_exporter_output.buffer]
max_size = 268435488
type = "disk"
[sinks.node_exporter_loki]
type = "loki"
inputs = [ "node_exporter_transform" ]
endpoint = "http://loki.service.consul:3100"
compression = "snappy"
[sinks.node_exporter_loki.labels]
level = "{{ level }}"
hostname = "{{ hostname }}"
job = "node_exporter"
filename = "/var/log/node_exporter.log"
[sinks.node_exporter_loki.encoding]
codec = "json"
[sinks.node_exporter_loki.buffer]
max_size = 268435488
type = "disk"

View File

@ -1,22 +1,61 @@
# Deploy the `supervisord` configuration:
# Deploy the configuration:
remote_file '/etc/prometheus_exporters.d/filestat.yml' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
end
# Deploy the `supervisord` configuration:
remote_file '/etc/supervisor/conf.d/filestat_exporter.conf' do
# Deploy the `systemd` configuration:
remote_file '/etc/systemd/system/filestat_exporter.service' do
owner 'root'
group 'root'
mode '644'
end
service 'filestat_exporter' do
action [:enable, :start]
end
# Deploy `rsyslog` config:
remote_file '/etc/rsyslog.d/30-filestat_exporter.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[rsyslog]'
end
# Deploy `consul` config for `node_exporter`:
service 'rsyslog' do
action :nothing
end
# Deploy `logrotate` config:
remote_file '/etc/logrotate.d/filestat_exporter' do
owner 'root'
group 'root'
mode '644'
end
# Deploy `vector` config:
remote_file '/etc/vector/filestat_exporter.toml' do
owner 'root'
group 'root'
mode '0644'
notifies :restart, 'service[vector-filestat_exporter]'
end
remote_file '/etc/systemd/system/vector-filestat_exporter.service' do
owner 'root'
group 'root'
mode '0644'
end
service 'vector-filestat_exporter' do
action [:enable, :start]
end
# Deploy `consul` config for `filestat_exporter`:
remote_file '/etc/consul.d/service-filestat_exporter.json' do
owner 'consul'
group 'consul'

View File

@ -1,10 +1,55 @@
# Deploy the `supervisord` configuration:
remote_file '/etc/supervisor/conf.d/node_exporter.conf' do
# Deploy the `systemd` configuration:
remote_file '/etc/systemd/system/node_exporter.service' do
owner 'root'
group 'root'
mode '644'
end
remote_file '/etc/default/node_exporter' do
owner 'root'
group 'root'
mode '644'
end
service 'node_exporter' do
action [ :enable, :start]
end
# Deploy `rsyslog` config for `node_exporter`:
remote_file '/etc/rsyslog.d/30-node_exporter.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[rsyslog]'
end
service 'rsyslog' do
action :nothing
end
# Deploy `logrotate` config for `node_exporter`:
remote_file '/etc/logrotate.d/node_exporter' do
owner 'root'
group 'root'
mode '0644'
end
# Deploy the `systemd` config for `vector`:
remote_file '/etc/vector/node_exporter.toml' do
owner 'root'
group 'root'
mode '644'
end
remote_file '/etc/systemd/system/vector-node_exporter.service' do
owner 'root'
group 'root'
mode '644'
end
service 'vector-node_exporter' do
action [ :enable, :start]
end
# Deploy `consul` config for `node_exporter`:

View File

@ -13,7 +13,7 @@ begin
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
vtag = $1 if response.body =~ %r{tag\/(v\d+\.\d+\.\d+)}
vtag = $1 if response['location'] =~ %r{tag\/(v\d+\.\d+\.\d+)}
tag = vtag.sub(/^v/, '')
alertmanager_bin = "#{node['alertmanager']['prefix']}#{tag}#{node['alertmanager']['postfix']}"

View File

@ -8,39 +8,80 @@
end
# Deploy `alertmanager` file:
remote_file '/etc/prometheus.d/alertmanager.yml' do
owner 'root'
group 'root'
mode '644'
encrypted_remote_file '/etc/prometheus.d/alertmanager.yml' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
source 'files/etc/prometheus.d/alertmanager.yml/'
password ENV['ITAMAE_PASSWORD']
notifies :restart, 'service[alertmanager]'
end
# Deploy alert setting file:
%w(node_exporter prometheus filestat).each do |conf|
%w(node_exporter prometheus filestat services snmp).each do |conf|
remote_file "/etc/prometheus.d/alerts/#{conf}.yml" do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[prometheus]'
end
end
# Deploy `supervisord` config:
remote_file '/etc/supervisor/conf.d/alertmanager.conf' do
# Deploy `systemd` config for `alertmanager`:
remote_file '/etc/systemd/system/alertmanager.service' do
owner 'root'
group 'root'
mode '644'
end
service 'alertmanager' do
action [:enable, :start]
end
# Deploy `rsyslog` config for `alertmanager`:
remote_file '/etc/rsyslog.d/30-alertmanager.conf' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[rsyslog]'
end
# Restart the `supervisor`:
service 'supervisor' do
service 'rsyslog' do
action :nothing
end
# Deploy `logroted` config for `alertmanager`:
remote_file '/etc/logrotate.d/alertmanager' do
owner 'root'
group 'root'
mode '644'
end
# Deploy `vector` config for `alertmanager`:
remote_file '/etc/vector/alertmanager.toml' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[vector-alertmanager]'
end
remote_file '/etc/systemd/system/vector-alertmanager.service' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[vector-alertmanager]'
end
service 'vector-alertmanager' do
action [:enable, :start]
end
# Firewall settings here:
%w( 9093/tcp ).each do |p|
execute "ufw allow #{p}" do

View File

@ -12,7 +12,7 @@ begin
Timeout.timeout(3) do
response = Net::HTTP.get_response(uri)
tag = $1 if response.body =~ %r{tag\/(\d+\.\d+)}
tag = $1 if response['location'] =~ %r{tag\/(\d+\.\d+)}
alertmanager_webhook_bin = "#{node['alertmanager_webhook']['prefix']}#{tag}#{node['alertmanager_webhook']['postfix']}"

View File

@ -1,14 +1,32 @@
# Deploy `supervisor` config for `Alert Manager Webhook Logger`
remote_file '/etc/supervisor/conf.d/alertmanager_webhook_logger.conf' do
# Deploy `systemd` config for `Alert Manager Webhook Logger`
remote_file '/etc/systemd/system/webhook.service' do
owner 'root'
group 'root'
mode '644'
notifies :restart, 'service[supervisor]'
notifies :restart, 'service[webhook]'
end
# Restart the `supervisor`:
service 'supervisor' do
service 'webhook' do
action [:enable, :start]
end
# Deploy `rsyslog` config for `Alert Manager Webhook Logger`:
remote_file '/etc/rsyslog.d/30-webhook.conf' do
owner 'root'
group 'root'
mode '0644'
notifies :restart, 'service[rsyslog]'
end
service 'rsyslog' do
action :nothing
end
# Deploy `logrotate` config for `Alert Manager Webhook Logger`:
remote_file '/etc/logrotate.d/webhook' do
owner 'root'
group 'root'
mode '0644'
end

View File

@ -12,28 +12,3 @@ include_recipe './alertmanager_webhook_setup.rb'
include_recipe './snmp_exporter_install.rb'
include_recipe './snmp_exporter_setup.rb'
# Deploy /etc/hosts file:
HOSTNAME = run_command('uname -n').stdout.chomp
template '/etc/promtail/prometheus.yaml' do
owner 'root'
group 'root'
mode '644'
variables(HOSTNAME: HOSTNAME, LOKIENDPOINT: node['promtail']['lokiendpoint'])
notifies :restart, 'service[promtail-prometheus]'
end
# Deploy the `systemd` configuration:
remote_file '/lib/systemd/system/promtail-prometheus.service' do
owner 'root'
group 'root'
mode '644'
end
# Service setting:
service 'promtail-prometheus' do
action [ :enable, :restart ]
end

View File

@ -0,0 +1,13 @@
/var/log/alertmanager.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -0,0 +1,13 @@
/var/log/prometheus.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -0,0 +1,13 @@
/var/log/snmp_exporter.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -0,0 +1,13 @@
/var/log/alertmanager-webhook.log
{
rotate 4
weekly
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -1,21 +1,13 @@
global:
slack_api_url: 'https://hooks.slack.com/services/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'
route:
receiver: 'test-route'
group_by: [alertname]
group_wait: 10s
group_interval: 1m
repeat_interval: 6h
receivers:
- name: 'test-route'
slack_configs:
- channel: '#ops'
title: "{{ range .Alerts }}{{ .Annotations.summary }}\n{{ end }}"
text: "{{ range .Alerts }}{{ .Annotations.description }}\n{{ end }}"
send_resolved: true
webhook_configs:
- send_resolved: true
url: 'http://localhost:6725'
md5:28ec9f4b96884f37cbd904fb91f5ee7d:salt:161-52-232-3-248-143-138-217:aes-256-cfb:Zl1SiauJIVlZ5Nl/QwFyZN4DzwSfaWC12a3AEioxgNPUdXHKPeRCb7u2o4Bw
JrjA7SKRxeDjMBqYyQUnDG9AZ88l1xWkXMIm2cIjWdjj+5aRYpOls7QjCXfQ
iN7d9tpw7tuqg3kMplKvEAHPGSDLK+1kBxCD37gYpQMa/VjL4Rnf9PD8Duae
te52AbngvCRk7RfIIINIv6fiECYvC1dgnGs7lzwio4vC6ssO6O1uelt5IYvz
1OQdywvSEYhh7KWSBRGwLhBAFmzDfdRcuJJD0qT6Sv0VkSKp4VP/MQsjuit6
qESm5AAiiP9qyoRmWZgBuN3yyjAudmkNE6d+vJ3/5WXg0u8c54G+yQ0/eREe
tKfIsn9hYna87GfM5Cwtwn5Iw1DYxWFTImsGV/aM3XsCLiA6Z9pMXdJR8YWY
6pyODiTlpL8F/SRV6r+tABgJVq3Dc+C05xBoulYOx8LhHbBHo6oo9nupvHak
YTe9RKctdZ8Qpf3QtsVQmBmOPhYMuBB9yMQ3EYZUErsm4aKcXpYOjjViO0uP
DIY9CwmQbcPymFOS5nMUR85T6Qnuu0huGMVB0dIKh7vPECVIMd/0IxEkNacQ
GaHbC9Cuav5vB0gyqTdg5xHWA89dC7jbz5anqPMBmpStPajGoGoH3vXmMrSf
zylLMtkRb+EVXVSrdo9emjomJfzPxrmKk3hxfjnP4P/KxhIS8H/kjVEiDOpO
IjO7

View File

@ -0,0 +1,101 @@
groups:
- name: services
rules:
- alert: Digdag
expr: node_systemd_unit_state{name="digdag.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "Digdag is not running: {{ $labels.instance }}."
description: "Digdag is not running: {{ $labels.instance }}."
- alert: node_exporter
expr: node_systemd_unit_state{name="node_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "node_exporter is not running: {{ $labels.instance }}."
description: "node_exporter is not running: {{ $labels.instance }}."
- alert: vector-node_exporter
expr: node_systemd_unit_state{name="vector-node_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "vector-node_exporter is not running: {{ $labels.instance }}."
description: "vector-node_exporter is not running: {{ $labels.instance }}."
- alert: snmp_exporter
expr: node_systemd_unit_state{name="snmp_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "snmp_exporter is not running: {{ $labels.instance }}."
description: "snmp_exporter is not running: {{ $labels.instance }}."
- alert: vector-snmp_exporter
expr: node_systemd_unit_state{name="vector-snmp_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "vector-snmp_exporter is not running: {{ $labels.instance }}."
description: "vector-snmp_exporter is not running: {{ $labels.instance }}."
- alert: filestat_exporter
expr: node_systemd_unit_state{name="filestat_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "filestat_exporter is not running: {{ $labels.instance }}."
description: "filestat_exporter is not running: {{ $labels.instance }}."
- alert: vector-filestat_exporter
expr: node_systemd_unit_state{name="vector-filestat_exporter.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "vector-filestat_exporter is not running: {{ $labels.instance }}."
description: "vector-filestat_exporter is not running: {{ $labels.instance }}."
- alert: exporter_proxy
expr: node_systemd_unit_state{name="exporter_proxy.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "exporter_proxy is not running: {{ $labels.instance }}."
description: "exporter_proxy is not running: {{ $labels.instance }}."
- alert: prometheus
expr: node_systemd_unit_state{name="prometheus.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "prometheus is not running: {{ $labels.instance }}."
description: "prometheus is not running: {{ $labels.instance }}."
- alert: vector-prometheus
expr: node_systemd_unit_state{name="vector-prometheus.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "vector-prometheus is not running: {{ $labels.instance }}."
description: "vector-prometheus is not running: {{ $labels.instance }}."
- alert: vault
expr: node_systemd_unit_state{name="vault.service", state="active"} != 1
for: 5m
labels:
severity: error
annotations:
summary: "vault is not running: {{ $labels.instance }}."
description: "vault is not running: {{ $labels.instance }}."

View File

@ -0,0 +1,7 @@
# Log kernel generated digdag log messages to file
:syslogtag,contains,"alertmanager" /var/log/alertmanager.log
# Uncomment the following to stop logging anything that matches the last rule.
# Doing this will stop logging kernel generated UFW log messages to the file
# normally containing kern.* messages (eg, /var/log/kern.log)
& stop

View File

@ -0,0 +1,7 @@
# Log kernel generated digdag log messages to file
:syslogtag,contains,"prometheus" /var/log/prometheus.log
# Uncomment the following to stop logging anything that matches the last rule.
# Doing this will stop logging kernel generated UFW log messages to the file
# normally containing kern.* messages (eg, /var/log/kern.log)
& stop

Some files were not shown because too many files have changed in this diff Show More