From 5897b97065cea3b40fc432450ca89dcf7f8f7611 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Fri, 6 Jan 2023 15:35:06 +0100 Subject: [PATCH 01/36] Renamed mail notification method for watchdog to be more general --- data/Dockerfiles/watchdog/watchdog.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 231d0ecd..517d160e 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -97,7 +97,7 @@ log_msg() { echo $(date) $(printf '%s\n' "${1}") } -function mail_error() { +function notify_error() { THROTTLE= [[ -z ${1} ]] && return 1 # If exists, body will be the content of "/tmp/${1}", even if ${2} is set @@ -197,7 +197,7 @@ get_container_ip() { # One-time check if grep -qi "$(echo ${IPV6_NETWORK} | cut -d: -f1-3)" <<< "$(ip a s)"; then if [[ -z "$(get_ipv6)" ]]; then - mail_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection." + notify_error "ipv6-config" "enable_ipv6 is true in docker-compose.yml, but an IPv6 link could not be established. Please verify your IPv6 connection." fi fi @@ -747,7 +747,7 @@ olefy_checks() { # Notify about start if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then - mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow." + notify_error "watchdog-mailcow" "Watchdog started monitoring mailcow." fi # Create watchdog agents @@ -1029,33 +1029,33 @@ while true; do fi if [[ ${com_pipe_answer} == "ratelimit" ]]; then log_msg "At least one ratelimit was applied" - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then log_msg "Mail queue status is critical" - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" elif [[ ${com_pipe_answer} == "external_checks" ]]; then log_msg "Your mailcow is an open relay!" # Define $2 to override message text, else print service was restarted at ... - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!" + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!" elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then log_msg "MySQL replication is not working properly" # Define $2 to override message text, else print service was restarted at ... # Once mail per 10 minutes - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the SQL replication status" 600 + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check the SQL replication status" 600 elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then log_msg "Dovecot replication is not working properly" # Define $2 to override message text, else print service was restarted at ... # Once mail per 10 minutes - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600 + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600 elif [[ ${com_pipe_answer} == "certcheck" ]]; then log_msg "Certificates are about to expire" # Define $2 to override message text, else print service was restarted at ... # Only mail once a day - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please renew your certificate" 86400 + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please renew your certificate" 86400 elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then log_msg "acme-mailcow did not complete successfully" # Define $2 to override message text, else print service was restarted at ... - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" "Please check acme-mailcow for further information." + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check acme-mailcow for further information." elif [[ ${com_pipe_answer} == "fail2ban" ]]; then F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null)) if [[ ! -z "${F2B_RES}" ]]; then @@ -1065,7 +1065,7 @@ while true; do log_msg "Banned ${host}" rm /tmp/fail2ban 2> /dev/null timeout 2s whois "${host}" > /tmp/fail2ban - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && mail_error "${com_pipe_answer}" "IP ban: ${host}" + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && notify_error "${com_pipe_answer}" "IP ban: ${host}" done fi elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then @@ -1085,7 +1085,7 @@ while true; do else log_msg "Sending restart command to ${CONTAINER_ID}..." curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && mail_error "${com_pipe_answer}" + [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" log_msg "Wait for restarted container to settle and continue watching..." sleep 35 fi From 9ef14a20d17ebdeaa49249e66068699827040fb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Fri, 6 Jan 2023 15:43:43 +0100 Subject: [PATCH 02/36] Centralized checking of enabled notifications --- data/Dockerfiles/watchdog/watchdog.sh | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 517d160e..e6e70ba7 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -98,6 +98,8 @@ log_msg() { } function notify_error() { + # Check if one of the notification options is enabled + [[ -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ -z ${WATCHDOG_NOTIFY_WEBHOOK} ]] && return 0 THROTTLE= [[ -z ${1} ]] && return 1 # If exists, body will be the content of "/tmp/${1}", even if ${2} is set @@ -746,9 +748,7 @@ olefy_checks() { } # Notify about start -if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then - notify_error "watchdog-mailcow" "Watchdog started monitoring mailcow." -fi +notify_error "watchdog-mailcow" "Watchdog started monitoring mailcow." # Create watchdog agents @@ -1029,33 +1029,33 @@ while true; do fi if [[ ${com_pipe_answer} == "ratelimit" ]]; then log_msg "At least one ratelimit was applied" - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" + notify_error "${com_pipe_answer}" elif [[ ${com_pipe_answer} == "mail_queue_status" ]]; then log_msg "Mail queue status is critical" - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" + notify_error "${com_pipe_answer}" elif [[ ${com_pipe_answer} == "external_checks" ]]; then log_msg "Your mailcow is an open relay!" # Define $2 to override message text, else print service was restarted at ... - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!" + notify_error "${com_pipe_answer}" "Please stop mailcow now and check your network configuration!" elif [[ ${com_pipe_answer} == "mysql_repl_checks" ]]; then log_msg "MySQL replication is not working properly" # Define $2 to override message text, else print service was restarted at ... # Once mail per 10 minutes - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check the SQL replication status" 600 + notify_error "${com_pipe_answer}" "Please check the SQL replication status" 600 elif [[ ${com_pipe_answer} == "dovecot_repl_checks" ]]; then log_msg "Dovecot replication is not working properly" # Define $2 to override message text, else print service was restarted at ... # Once mail per 10 minutes - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600 + notify_error "${com_pipe_answer}" "Please check the Dovecot replicator status" 600 elif [[ ${com_pipe_answer} == "certcheck" ]]; then log_msg "Certificates are about to expire" # Define $2 to override message text, else print service was restarted at ... # Only mail once a day - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please renew your certificate" 86400 + notify_error "${com_pipe_answer}" "Please renew your certificate" 86400 elif [[ ${com_pipe_answer} == "acme-mailcow" ]]; then log_msg "acme-mailcow did not complete successfully" # Define $2 to override message text, else print service was restarted at ... - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" "Please check acme-mailcow for further information." + notify_error "${com_pipe_answer}" "Please check acme-mailcow for further information." elif [[ ${com_pipe_answer} == "fail2ban" ]]; then F2B_RES=($(timeout 4s ${REDIS_CMDLINE} --raw GET F2B_RES 2> /dev/null)) if [[ ! -z "${F2B_RES}" ]]; then @@ -1065,7 +1065,7 @@ while true; do log_msg "Banned ${host}" rm /tmp/fail2ban 2> /dev/null timeout 2s whois "${host}" > /tmp/fail2ban - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && notify_error "${com_pipe_answer}" "IP ban: ${host}" + [[ ${WATCHDOG_NOTIFY_BAN} =~ ^([yY][eE][sS]|[yY])+$ ]] && notify_error "${com_pipe_answer}" "IP ban: ${host}" done fi elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then @@ -1085,7 +1085,7 @@ while true; do else log_msg "Sending restart command to ${CONTAINER_ID}..." curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart - [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && notify_error "${com_pipe_answer}" + notify_error "${com_pipe_answer}" log_msg "Wait for restarted container to settle and continue watching..." sleep 35 fi @@ -1095,3 +1095,4 @@ while true; do kill -USR1 ${BACKGROUND_TASKS[*]} fi done + From fe8131f7430ec3d2b97793587bcbf88bd058a67a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Fri, 6 Jan 2023 15:52:36 +0100 Subject: [PATCH 03/36] Only sent mail if enabled --- data/Dockerfiles/watchdog/watchdog.sh | 63 ++++++++++++++------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index e6e70ba7..5d962309 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -124,37 +124,40 @@ function notify_error() { else SUBJECT="${WATCHDOG_SUBJECT}: ${1}" fi - IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}" - for rcpt in "${MAIL_RCPTS[@]}"; do - RCPT_DOMAIN= - RCPT_MX= - RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'}) - CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx) - if [[ -z ${CHECK_FOR_VALID_MX} ]]; then - log_msg "Cannot determine MX for ${rcpt}, skipping email notification..." - return 1 - fi - [ -f "/tmp/${1}" ] && BODY="/tmp/${1}" - timeout 10s ./smtp-cli --missing-modules-ok \ - "${SMTP_VERBOSE}" \ - --charset=UTF-8 \ - --subject="${SUBJECT}" \ - --body-plain="${BODY}" \ - --add-header="X-Priority: 1" \ - --to=${rcpt} \ - --from="watchdog@${MAILCOW_HOSTNAME}" \ - --hello-host=${MAILCOW_HOSTNAME} \ - --ipv4 - if [[ $? -eq 1 ]]; then # exit code 1 is fine - log_msg "Sent notification email to ${rcpt}" - else - if [[ "${SMTP_VERBOSE}" == "" ]]; then - log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf." - else - log_msg "Error while sending notification email to ${rcpt}." + + if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then + IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}" + for rcpt in "${MAIL_RCPTS[@]}"; do + RCPT_DOMAIN= + RCPT_MX= + RCPT_DOMAIN=$(echo ${rcpt} | awk -F @ {'print $NF'}) + CHECK_FOR_VALID_MX=$(dig +short ${RCPT_DOMAIN} mx) + if [[ -z ${CHECK_FOR_VALID_MX} ]]; then + log_msg "Cannot determine MX for ${rcpt}, skipping email notification..." + return 1 fi - fi - done + [ -f "/tmp/${1}" ] && BODY="/tmp/${1}" + timeout 10s ./smtp-cli --missing-modules-ok \ + "${SMTP_VERBOSE}" \ + --charset=UTF-8 \ + --subject="${SUBJECT}" \ + --body-plain="${BODY}" \ + --add-header="X-Priority: 1" \ + --to=${rcpt} \ + --from="watchdog@${MAILCOW_HOSTNAME}" \ + --hello-host=${MAILCOW_HOSTNAME} \ + --ipv4 + if [[ $? -eq 1 ]]; then # exit code 1 is fine + log_msg "Sent notification email to ${rcpt}" + else + if [[ "${SMTP_VERBOSE}" == "" ]]; then + log_msg "Error while sending notification email to ${rcpt}. You can enable verbose logging by setting 'WATCHDOG_VERBOSE=y' in mailcow.conf." + else + log_msg "Error while sending notification email to ${rcpt}." + fi + fi + done + fi } get_container_ip() { From 3e69304f0f9c22abd8b777d66c8a1099a3d2ef12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Fri, 6 Jan 2023 16:25:18 +0100 Subject: [PATCH 04/36] Send webhook --- data/Dockerfiles/watchdog/watchdog.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 5d962309..741417c5 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -125,6 +125,7 @@ function notify_error() { SUBJECT="${WATCHDOG_SUBJECT}: ${1}" fi + # Send mail notification if enabled if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then IFS=',' read -r -a MAIL_RCPTS <<< "${WATCHDOG_NOTIFY_EMAIL}" for rcpt in "${MAIL_RCPTS[@]}"; do @@ -158,6 +159,23 @@ function notify_error() { fi done fi + + # Send webhook notification if enabled + if [[ ! -z ${WATCHDOG_NOTIFY_WEBHOOK} ]]; then + if [[ -z ${WATCHDOG_NOTIFY_WEBHOOK_BODY} ]]; then + log_msg "No webhook body set, skipping webhook notification..." + return 1 + fi + + WEBHOOK_BODY=$(echo "${WATCHDOG_NOTIFY_WEBHOOK_BODY}" | envsubst '$SUBJECT,$BODY') + + curl -X POST \ + -H "Content-Type: application/json" \ + -d ${WEBHOOK_BODY} \ + ${WATCHDOG_NOTIFY_WEBHOOK} + + log_msg "Posted notification to webhook" + fi } get_container_ip() { From b83841d2533a62e44300ee5b36c3b1a4121e7b02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sat, 7 Jan 2023 15:44:29 +0100 Subject: [PATCH 05/36] Replace placeholders with sed --- data/Dockerfiles/watchdog/watchdog.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 741417c5..ada402ba 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -167,7 +167,7 @@ function notify_error() { return 1 fi - WEBHOOK_BODY=$(echo "${WATCHDOG_NOTIFY_WEBHOOK_BODY}" | envsubst '$SUBJECT,$BODY') + WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s/\$SUBJECT\|\${SUBJECT}/$SUBJECT/g" | sed "s/\$BODY\|\${BODY}/$BODY/" | sed "s/\"/\\\\\"/g") curl -X POST \ -H "Content-Type: application/json" \ From b6b399a590dc3da14ac51d976ed59d54be530ad1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sat, 7 Jan 2023 16:00:17 +0100 Subject: [PATCH 06/36] Fixed POST to webhook --- data/Dockerfiles/watchdog/watchdog.sh | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index ada402ba..2b8ff78f 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -167,14 +167,13 @@ function notify_error() { return 1 fi - WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s/\$SUBJECT\|\${SUBJECT}/$SUBJECT/g" | sed "s/\$BODY\|\${BODY}/$BODY/" | sed "s/\"/\\\\\"/g") + # Replace subject and body placeholders + WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s/\$SUBJECT\|\${SUBJECT}/$SUBJECT/g" | sed "s/\$BODY\|\${BODY}/$BODY/") + + # POST to webhook + curl -X POST -H "Content-Type: application/json" -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK} - curl -X POST \ - -H "Content-Type: application/json" \ - -d ${WEBHOOK_BODY} \ - ${WATCHDOG_NOTIFY_WEBHOOK} - - log_msg "Posted notification to webhook" + log_msg "Sent notification using webhook" fi } From 38f5e293b0770db1dcd86564a89357a46fb708d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sat, 7 Jan 2023 16:21:11 +0100 Subject: [PATCH 07/36] Webhook variables in config generation --- generate_config.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/generate_config.sh b/generate_config.sh index 70dc5887..da11f60a 100755 --- a/generate_config.sh +++ b/generate_config.sh @@ -338,6 +338,13 @@ USE_WATCHDOG=y #WATCHDOG_NOTIFY_EMAIL=a@example.com,b@example.com,c@example.com #WATCHDOG_NOTIFY_EMAIL= +# Send notifications to a webhook URL that receives a POST request with the content type "application/json". +# You can use this to send notifications to services like Discord, Slack and others. +#WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +# JSON body included in the webhook POST request. Needs to be in single quotes. +# Following variables are available: SUBJECT, BODY +#WATCHDOG_NOTIFY_WEBHOOK_BODY='{"username": "Mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}' + # Notify about banned IP (includes whois lookup) WATCHDOG_NOTIFY_BAN=n From 50fde60899c42a4a3e5bc2dcf8c5a6f00a00a2e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sat, 7 Jan 2023 16:29:43 +0100 Subject: [PATCH 08/36] Added webhook variables to update script --- update.sh | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/update.sh b/update.sh index 34d17354..4ada78d8 100755 --- a/update.sh +++ b/update.sh @@ -367,6 +367,8 @@ CONFIG_ARRAY=( "SKIP_SOGO" "USE_WATCHDOG" "WATCHDOG_NOTIFY_EMAIL" + "WATCHDOG_NOTIFY_WEBHOOK" + "WATCHDOG_NOTIFY_WEBHOOK_BODY" "WATCHDOG_NOTIFY_BAN" "WATCHDOG_EXTERNAL_CHECKS" "WATCHDOG_SUBJECT" @@ -546,6 +548,20 @@ for option in ${CONFIG_ARRAY[@]}; do echo "#MAILDIR_SUB=Maildir" >> mailcow.conf echo "MAILDIR_SUB=" >> mailcow.conf fi + elif [[ ${option} == "WATCHDOG_NOTIFY_WEBHOOK" ]]; then + if ! grep -q ${option} mailcow.conf; then + echo "Adding new option \"${option}\" to mailcow.conf" + echo '# Send notifications to a webhook URL that receives a POST request with the content type "application/json".' >> mailcow.conf + echo '# You can use this to send notifications to services like Discord, Slack and others.' >> mailcow.conf + echo '#WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' >> mailcow.conf + fi + elif [[ ${option} == "WATCHDOG_NOTIFY_WEBHOOK_BODY" ]]; then + if ! grep -q ${option} mailcow.conf; then + echo "Adding new option \"${option}\" to mailcow.conf" + echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf + echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf + echo '#WATCHDOG_NOTIFY_WEBHOOK_BODY=\'{"username": "Mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}\'' >> mailcow.conf + fi elif [[ ${option} == "WATCHDOG_NOTIFY_BAN" ]]; then if ! grep -q ${option} mailcow.conf; then echo "Adding new option \"${option}\" to mailcow.conf" @@ -925,4 +941,4 @@ fi # echo # git reflog --color=always | grep "Before update on " # echo -# echo "Use \"git reset --hard hash-on-the-left\" and run $COMPOSE_COMMAND up -d afterwards." \ No newline at end of file +# echo "Use \"git reset --hard hash-on-the-left\" and run $COMPOSE_COMMAND up -d afterwards." From e4347792b87e6537b02e885c223f4e5b95957400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sun, 8 Jan 2023 20:02:18 +0100 Subject: [PATCH 09/36] mailcow should be llow --- generate_config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generate_config.sh b/generate_config.sh index da11f60a..3181eac7 100755 --- a/generate_config.sh +++ b/generate_config.sh @@ -343,7 +343,7 @@ USE_WATCHDOG=y #WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX # JSON body included in the webhook POST request. Needs to be in single quotes. # Following variables are available: SUBJECT, BODY -#WATCHDOG_NOTIFY_WEBHOOK_BODY='{"username": "Mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}' +#WATCHDOG_NOTIFY_WEBHOOK_BODY='{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}' # Notify about banned IP (includes whois lookup) WATCHDOG_NOTIFY_BAN=n From 7877215d597e42f1f761a4ea4695e6888eac4238 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Sun, 8 Jan 2023 20:02:46 +0100 Subject: [PATCH 10/36] mailcow should be lowercase --- update.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/update.sh b/update.sh index 4ada78d8..8e23f414 100755 --- a/update.sh +++ b/update.sh @@ -560,7 +560,7 @@ for option in ${CONFIG_ARRAY[@]}; do echo "Adding new option \"${option}\" to mailcow.conf" echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf - echo '#WATCHDOG_NOTIFY_WEBHOOK_BODY=\'{"username": "Mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}\'' >> mailcow.conf + echo '#WATCHDOG_NOTIFY_WEBHOOK_BODY=\'{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}\'' >> mailcow.conf fi elif [[ ${option} == "WATCHDOG_NOTIFY_BAN" ]]; then if ! grep -q ${option} mailcow.conf; then From a3c5f785e9dd8412cb2807de71246ce12f91ebf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Kleinekath=C3=B6fer?= Date: Mon, 20 Feb 2023 22:34:53 +0100 Subject: [PATCH 11/36] Added new env vars to docker compose --- docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index b940b336..61e7a78e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -471,6 +471,8 @@ services: - WATCHDOG_NOTIFY_EMAIL=${WATCHDOG_NOTIFY_EMAIL:-} - WATCHDOG_NOTIFY_BAN=${WATCHDOG_NOTIFY_BAN:-y} - WATCHDOG_SUBJECT=${WATCHDOG_SUBJECT:-Watchdog ALERT} + - WATCHDOG_NOTIFY_WEBHOOK=${WATCHDOG_NOTIFY_WEBHOOK} + - WATCHDOG_NOTIFY_WEBHOOK_BODY=${WATCHDOG_NOTIFY_WEBHOOK_BODY} - WATCHDOG_EXTERNAL_CHECKS=${WATCHDOG_EXTERNAL_CHECKS:-n} - WATCHDOG_MYSQL_REPLICATION_CHECKS=${WATCHDOG_MYSQL_REPLICATION_CHECKS:-n} - WATCHDOG_VERBOSE=${WATCHDOG_VERBOSE:-n} From e2e8fbe3131327eb65f22e31fb200d55c59512dd Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 10 Jul 2023 13:54:23 +0200 Subject: [PATCH 12/36] [Web] add f2b_banlist endpoint --- data/Dockerfiles/netfilter/server.py | 2 + data/web/admin.php | 6 +- data/web/inc/functions.fail2ban.inc.php | 65 +++++++++++++++++++- data/web/inc/functions.inc.php | 15 +++++ data/web/js/build/013-mailcow.js | 8 +++ data/web/json_api.php | 21 ++++++- data/web/lang/lang.de-de.json | 2 + data/web/lang/lang.en-gb.json | 2 + data/web/templates/admin/tab-config-f2b.twig | 9 +++ 9 files changed, 127 insertions(+), 3 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 698137bf..9f3cacb3 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -16,6 +16,7 @@ import json import iptc import dns.resolver import dns.exception +import uuid while True: try: @@ -94,6 +95,7 @@ def verifyF2boptions(f2boptions): verifyF2boption(f2boptions,'retry_window', 600) verifyF2boption(f2boptions,'netban_ipv4', 32) verifyF2boption(f2boptions,'netban_ipv6', 128) + verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4())) def verifyF2boption(f2boptions, f2boption, f2bdefault): f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault diff --git a/data/web/admin.php b/data/web/admin.php index 14cb89f5..8a96ee51 100644 --- a/data/web/admin.php +++ b/data/web/admin.php @@ -85,6 +85,8 @@ $cors_settings = cors('get'); $cors_settings['allowed_origins'] = str_replace(", ", "\n", $cors_settings['allowed_origins']); $cors_settings['allowed_methods'] = explode(", ", $cors_settings['allowed_methods']); +$f2b_data = fail2ban('get'); + $template = 'admin.twig'; $template_data = [ 'tfa_data' => $tfa_data, @@ -101,7 +103,8 @@ $template_data = [ 'domains' => $domains, 'all_domains' => $all_domains, 'mailboxes' => $mailboxes, - 'f2b_data' => fail2ban('get'), + 'f2b_data' => $f2b_data, + 'f2b_banlist_url' => getBaseUrl() . "/api/v1/get/fail2ban/banlist/" . $f2b_data['banlist_id'], 'q_data' => quarantine('settings'), 'qn_data' => quota_notification('get'), 'rsettings_map' => file_get_contents('http://nginx:8081/settings.php'), @@ -112,6 +115,7 @@ $template_data = [ 'password_complexity' => password_complexity('get'), 'show_rspamd_global_filters' => @$_SESSION['show_rspamd_global_filters'], 'cors_settings' => $cors_settings, + 'is_https' => isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] === 'on', 'lang_admin' => json_encode($lang['admin']), 'lang_datatables' => json_encode($lang['datatables']) ]; diff --git a/data/web/inc/functions.fail2ban.inc.php b/data/web/inc/functions.fail2ban.inc.php index 2c4aa41d..3e0c75c4 100644 --- a/data/web/inc/functions.fail2ban.inc.php +++ b/data/web/inc/functions.fail2ban.inc.php @@ -1,5 +1,5 @@ 'f2b_modified' ); break; + case 'banlist': + try { + $f2b_options = json_decode($redis->Get('F2B_OPTIONS'), true); + } + catch (RedisException $e) { + $_SESSION['return'][] = array( + 'type' => 'danger', + 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), + 'msg' => array('redis_error', $e) + ); + return false; + } + if (is_array($_extra)) { + $_extra = $_extra[0]; + } + if ($_extra != $f2b_options['banlist_id']){ + return false; + } + + switch ($_data) { + case 'get': + try { + $bl = $redis->hGetAll('F2B_BLACKLIST'); + $active_bans = $redis->hGetAll('F2B_ACTIVE_BANS'); + } + catch (RedisException $e) { + $_SESSION['return'][] = array( + 'type' => 'danger', + 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), + 'msg' => array('redis_error', $e) + ); + return false; + } + $banlist = implode("\n", array_merge(array_keys($bl), array_keys($active_bans))); + return $banlist; + break; + case 'refresh': + if ($_SESSION['mailcow_cc_role'] != "admin") { + return false; + } + + $f2b_options['banlist_id'] = uuid4(); + try { + $redis->Set('F2B_OPTIONS', json_encode($f2b_options)); + } + catch (RedisException $e) { + $_SESSION['return'][] = array( + 'type' => 'danger', + 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), + 'msg' => array('redis_error', $e) + ); + return false; + } + + $_SESSION['return'][] = array( + 'type' => 'success', + 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), + 'msg' => 'f2b_banlist_refreshed' + ); + return true; + break; + } + break; } } diff --git a/data/web/inc/functions.inc.php b/data/web/inc/functions.inc.php index 6418945c..3cff09b9 100644 --- a/data/web/inc/functions.inc.php +++ b/data/web/inc/functions.inc.php @@ -2246,6 +2246,21 @@ function cors($action, $data = null) { break; } } +function getBaseURL() { + $protocol = isset($_SERVER['HTTPS']) && $_SERVER['HTTPS'] === 'on' ? 'https' : 'http'; + $host = $_SERVER['HTTP_HOST']; + $base_url = $protocol . '://' . $host; + + return $base_url; +} +function uuid4() { + $data = openssl_random_pseudo_bytes(16); + + $data[6] = chr(ord($data[6]) & 0x0f | 0x40); + $data[8] = chr(ord($data[8]) & 0x3f | 0x80); + + return vsprintf('%s%s-%s-%s-%s-%s%s%s', str_split(bin2hex($data), 4)); +} function get_logs($application, $lines = false) { if ($lines === false) { diff --git a/data/web/js/build/013-mailcow.js b/data/web/js/build/013-mailcow.js index e659915b..cc54fafb 100644 --- a/data/web/js/build/013-mailcow.js +++ b/data/web/js/build/013-mailcow.js @@ -371,3 +371,11 @@ function addTag(tagAddElem, tag = null){ $(tagValuesElem).val(JSON.stringify(value_tags)); $(tagInputElem).val(''); } +function copyToClipboard(id) { + var copyText = document.getElementById(id); + copyText.select(); + copyText.setSelectionRange(0, 99999); + // only works with https connections + navigator.clipboard.writeText(copyText.value); + mailcow_alert_box(lang.copy_to_clipboard, "success"); +} \ No newline at end of file diff --git a/data/web/json_api.php b/data/web/json_api.php index 16c78baf..50a45b56 100644 --- a/data/web/json_api.php +++ b/data/web/json_api.php @@ -503,6 +503,15 @@ if (isset($_GET['query'])) { print(json_encode($getArgs)); $_SESSION['challenge'] = $WebAuthn->getChallenge(); return; + break; + case "fail2ban": + if (!isset($_SESSION['mailcow_cc_role'])){ + switch ($object) { + case 'banlist': + echo fail2ban('banlist', 'get', $extra); + break; + } + } break; } if (isset($_SESSION['mailcow_cc_role'])) { @@ -1324,6 +1333,9 @@ if (isset($_GET['query'])) { break; case "fail2ban": switch ($object) { + case 'banlist': + echo fail2ban('banlist', 'get', $extra); + break; default: $data = fail2ban('get'); process_get_return($data); @@ -1930,7 +1942,14 @@ if (isset($_GET['query'])) { process_edit_return(fwdhost('edit', array_merge(array('fwdhost' => $items), $attr))); break; case "fail2ban": - process_edit_return(fail2ban('edit', array_merge(array('network' => $items), $attr))); + switch ($object) { + case 'banlist': + process_edit_return(fail2ban('banlist', 'refresh', $items)); + break; + default: + process_edit_return(fail2ban('edit', array_merge(array('network' => $items), $attr))); + break; + } break; case "ui_texts": process_edit_return(customize('edit', 'ui_texts', $attr)); diff --git a/data/web/lang/lang.de-de.json b/data/web/lang/lang.de-de.json index d6f79dc5..2091e670 100644 --- a/data/web/lang/lang.de-de.json +++ b/data/web/lang/lang.de-de.json @@ -147,6 +147,7 @@ "change_logo": "Logo ändern", "configuration": "Konfiguration", "convert_html_to_text": "Konvertiere HTML zu reinem Text", + "copy_to_clipboard": "Text wurde in die Zwischenablage kopiert!", "cors_settings": "CORS Einstellungen", "credentials_transport_warning": "Warnung: Das Hinzufügen einer neuen Regel bewirkt die Aktualisierung der Authentifizierungsdaten aller vorhandenen Einträge mit identischem Next Hop.", "customer_id": "Kunde", @@ -1019,6 +1020,7 @@ "domain_removed": "Domain %s wurde entfernt", "dovecot_restart_success": "Dovecot wurde erfolgreich neu gestartet", "eas_reset": "ActiveSync Gerät des Benutzers %s wurde zurückgesetzt", + "f2b_banlist_refreshed": "Banlist ID wurde erfolgreich erneuert.", "f2b_modified": "Änderungen an Fail2ban-Parametern wurden gespeichert", "forwarding_host_added": "Weiterleitungs-Host %s wurde hinzugefügt", "forwarding_host_removed": "Weiterleitungs-Host %s wurde entfernt", diff --git a/data/web/lang/lang.en-gb.json b/data/web/lang/lang.en-gb.json index 28ff19b8..b176bc28 100644 --- a/data/web/lang/lang.en-gb.json +++ b/data/web/lang/lang.en-gb.json @@ -151,6 +151,7 @@ "change_logo": "Change logo", "configuration": "Configuration", "convert_html_to_text": "Convert HTML to plain text", + "copy_to_clipboard": "Text copied to clipboard!", "cors_settings": "CORS Settings", "credentials_transport_warning": "Warning: Adding a new transport map entry will update the credentials for all entries with a matching next hop column.", "customer_id": "Customer ID", @@ -1028,6 +1029,7 @@ "domain_removed": "Domain %s has been removed", "dovecot_restart_success": "Dovecot was restarted successfully", "eas_reset": "ActiveSync devices for user %s were reset", + "f2b_banlist_refreshed": "Banlist ID has been successfully refreshed.", "f2b_modified": "Changes to Fail2ban parameters have been saved", "forwarding_host_added": "Forwarding host %s has been added", "forwarding_host_removed": "Forwarding host %s has been removed", diff --git a/data/web/templates/admin/tab-config-f2b.twig b/data/web/templates/admin/tab-config-f2b.twig index c15fb72f..68aa57a4 100644 --- a/data/web/templates/admin/tab-config-f2b.twig +++ b/data/web/templates/admin/tab-config-f2b.twig @@ -90,6 +90,15 @@ {% if not f2b_data.active_bans and not f2b_data.perm_bans %} {{ lang.admin.no_active_bans }} {% endif %} +
+
+ + {% if is_https %} + + {% endif %} + +
+
{% for active_ban in f2b_data.active_bans %}

From 65cbc478b8ac644c826bbb5153bd557f29cda10f Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Tue, 11 Jul 2023 10:13:00 +0200 Subject: [PATCH 13/36] [Web] add manage f2b external option --- data/Dockerfiles/netfilter/server.py | 79 ++++++++++++-------- data/web/inc/functions.fail2ban.inc.php | 9 ++- data/web/lang/lang.de-de.json | 2 + data/web/lang/lang.en-gb.json | 2 + data/web/templates/admin/tab-config-f2b.twig | 7 ++ 5 files changed, 65 insertions(+), 34 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 9f3cacb3..428ddb96 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -96,6 +96,7 @@ def verifyF2boptions(f2boptions): verifyF2boption(f2boptions,'netban_ipv4', 32) verifyF2boption(f2boptions,'netban_ipv6', 128) verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4())) + verifyF2boption(f2boptions,'manage_external', 0) def verifyF2boption(f2boptions, f2boption, f2bdefault): f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault @@ -158,6 +159,7 @@ def mailcowChainOrder(): exit_code = 2 def ban(address): + global f2boptions global lock refreshF2boptions() BAN_TIME = int(f2boptions['ban_time']) @@ -199,7 +201,7 @@ def ban(address): cur_time = int(round(time.time())) NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 )) - if type(ip) is ipaddress.IPv4Address: + if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1: with lock: chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') rule = iptc.Rule() @@ -208,7 +210,7 @@ def ban(address): rule.target = target if rule not in chain.rules: chain.insert_rule(rule) - else: + elif int(f2boptions['manage_external']) != 1: with lock: chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') rule = iptc.Rule6() @@ -253,37 +255,52 @@ def unban(net): bans[net]['ban_counter'] += 1 def permBan(net, unban=False): + global f2boptions global lock if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: - with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) + if int(f2boptions['manage_external']) != 1: + with lock: + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') + rule = iptc.Rule() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) + elif not unban: + logCrit('Add host/network %s to blacklist' % net) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + logCrit('Remove host/network %s from blacklist' % net) + r.hdel('F2B_PERM_BANS', '%s' % net) else: - with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) + if int(f2boptions['manage_external']) != 1: + with lock: + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') + rule = iptc.Rule6() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) + elif not unban: + logCrit('Add host/network %s to blacklist' % net) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + logCrit('Remove host/network %s from blacklist' % net) + r.hdel('F2B_PERM_BANS', '%s' % net) def quit(signum, frame): global quit_now @@ -555,7 +572,7 @@ def initChain(): chain.insert_rule(rule) if __name__ == '__main__': - + refreshF2boptions() # In case a previous session was killed without cleanup clear() # Reinit MAILCOW chain diff --git a/data/web/inc/functions.fail2ban.inc.php b/data/web/inc/functions.fail2ban.inc.php index 3e0c75c4..abc12cc3 100644 --- a/data/web/inc/functions.fail2ban.inc.php +++ b/data/web/inc/functions.fail2ban.inc.php @@ -247,6 +247,7 @@ function fail2ban($_action, $_data = null, $_extra = null) { $netban_ipv6 = intval((isset($_data['netban_ipv6'])) ? $_data['netban_ipv6'] : $is_now['netban_ipv6']); $wl = (isset($_data['whitelist'])) ? $_data['whitelist'] : $is_now['whitelist']; $bl = (isset($_data['blacklist'])) ? $_data['blacklist'] : $is_now['blacklist']; + $manage_external = (isset($_data['manage_external'])) ? intval($_data['manage_external']) : 0; } else { $_SESSION['return'][] = array( @@ -266,6 +267,8 @@ function fail2ban($_action, $_data = null, $_extra = null) { $f2b_options['netban_ipv6'] = ($netban_ipv6 > 128) ? 128 : $netban_ipv6; $f2b_options['max_attempts'] = ($max_attempts < 1) ? 1 : $max_attempts; $f2b_options['retry_window'] = ($retry_window < 1) ? 1 : $retry_window; + $f2b_options['banlist_id'] = $is_now['banlist_id']; + $f2b_options['manage_external'] = ($manage_external > 0) ? 1 : 0; try { $redis->Set('F2B_OPTIONS', json_encode($f2b_options)); $redis->Del('F2B_WHITELIST'); @@ -351,8 +354,8 @@ function fail2ban($_action, $_data = null, $_extra = null) { switch ($_data) { case 'get': try { - $bl = $redis->hGetAll('F2B_BLACKLIST'); - $active_bans = $redis->hGetAll('F2B_ACTIVE_BANS'); + $bl = $redis->hKeys('F2B_BLACKLIST'); + $active_bans = $redis->hKeys('F2B_ACTIVE_BANS'); } catch (RedisException $e) { $_SESSION['return'][] = array( @@ -362,7 +365,7 @@ function fail2ban($_action, $_data = null, $_extra = null) { ); return false; } - $banlist = implode("\n", array_merge(array_keys($bl), array_keys($active_bans))); + $banlist = implode("\n", array_merge($bl, $active_bans)); return $banlist; break; case 'refresh': diff --git a/data/web/lang/lang.de-de.json b/data/web/lang/lang.de-de.json index 2091e670..7c2171aa 100644 --- a/data/web/lang/lang.de-de.json +++ b/data/web/lang/lang.de-de.json @@ -181,6 +181,8 @@ "f2b_blacklist": "Blacklist für Netzwerke und Hosts", "f2b_filter": "Regex-Filter", "f2b_list_info": "Ein Host oder Netzwerk auf der Blacklist wird immer eine Whitelist-Einheit überwiegen. Die Aktualisierung der Liste dauert einige Sekunden.", + "f2b_manage_external": "Fail2Ban extern verwalten", + "f2b_manage_external_info": "Fail2ban wird die Banlist weiterhin pflegen, jedoch werden keine aktiven Regeln zum blockieren gesetzt. Die unten generierte Banlist, kann verwendet werden, um den Datenverkehr extern zu blockieren.", "f2b_max_attempts": "Max. Versuche", "f2b_max_ban_time": "Maximale Bannzeit in Sekunden", "f2b_netban_ipv4": "Netzbereich für IPv4-Banns (8-32)", diff --git a/data/web/lang/lang.en-gb.json b/data/web/lang/lang.en-gb.json index b176bc28..e7c82cda 100644 --- a/data/web/lang/lang.en-gb.json +++ b/data/web/lang/lang.en-gb.json @@ -185,6 +185,8 @@ "f2b_blacklist": "Blacklisted networks/hosts", "f2b_filter": "Regex filters", "f2b_list_info": "A blacklisted host or network will always outweigh a whitelist entity. List updates will take a few seconds to be applied.", + "f2b_manage_external": "Manage Fail2Ban externally", + "f2b_manage_external_info": "Fail2ban will still maintain the banlist, but it will not actively set rules to block traffic. Use the generated banlist below to externally block the traffic.", "f2b_max_attempts": "Max. attempts", "f2b_max_ban_time": "Max. ban time (s)", "f2b_netban_ipv4": "IPv4 subnet size to apply ban on (8-32)", diff --git a/data/web/templates/admin/tab-config-f2b.twig b/data/web/templates/admin/tab-config-f2b.twig index 68aa57a4..dac69516 100644 --- a/data/web/templates/admin/tab-config-f2b.twig +++ b/data/web/templates/admin/tab-config-f2b.twig @@ -42,6 +42,13 @@ +

+
+ + +
+

{{ lang.admin.f2b_manage_external_info }}

+

{{ lang.admin.f2b_list_info|raw }}

From 1537fb39c0c8c996a05ae677b5fa7e20775b4851 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Tue, 11 Jul 2023 10:19:32 +0200 Subject: [PATCH 14/36] [Web] add manage f2b external option --- data/Dockerfiles/netfilter/server.py | 70 +++++++++++----------------- 1 file changed, 28 insertions(+), 42 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 428ddb96..982fa97c 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -258,49 +258,35 @@ def permBan(net, unban=False): global f2boptions global lock if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: - if int(f2boptions['manage_external']) != 1: - with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) - elif not unban: - logCrit('Add host/network %s to blacklist' % net) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif unban: - logCrit('Remove host/network %s from blacklist' % net) - r.hdel('F2B_PERM_BANS', '%s' % net) + with lock: + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') + rule = iptc.Rule() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban and int(f2boptions['manage_external']) != 1: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) else: - if int(f2boptions['manage_external']) != 1: - with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) - elif not unban: - logCrit('Add host/network %s to blacklist' % net) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif unban: - logCrit('Remove host/network %s from blacklist' % net) - r.hdel('F2B_PERM_BANS', '%s' % net) + with lock: + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') + rule = iptc.Rule6() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban and int(f2boptions['manage_external']) != 1: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) def quit(signum, frame): global quit_now From 987cfd5dae4014b35c183ce4be0e1f8856950116 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Tue, 11 Jul 2023 10:31:25 +0200 Subject: [PATCH 15/36] [Web] f2b banlist - add http status codes --- data/web/inc/functions.fail2ban.inc.php | 3 +++ data/web/inc/prerequisites.inc.php | 1 + 2 files changed, 4 insertions(+) diff --git a/data/web/inc/functions.fail2ban.inc.php b/data/web/inc/functions.fail2ban.inc.php index abc12cc3..5962237f 100644 --- a/data/web/inc/functions.fail2ban.inc.php +++ b/data/web/inc/functions.fail2ban.inc.php @@ -342,12 +342,14 @@ function fail2ban($_action, $_data = null, $_extra = null) { 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), 'msg' => array('redis_error', $e) ); + http_response_code(500); return false; } if (is_array($_extra)) { $_extra = $_extra[0]; } if ($_extra != $f2b_options['banlist_id']){ + http_response_code(404); return false; } @@ -363,6 +365,7 @@ function fail2ban($_action, $_data = null, $_extra = null) { 'log' => array(__FUNCTION__, $_action, $_data_log, $_extra), 'msg' => array('redis_error', $e) ); + http_response_code(500); return false; } $banlist = implode("\n", array_merge($bl, $active_bans)); diff --git a/data/web/inc/prerequisites.inc.php b/data/web/inc/prerequisites.inc.php index b3b1cc13..f7fd80b4 100644 --- a/data/web/inc/prerequisites.inc.php +++ b/data/web/inc/prerequisites.inc.php @@ -70,6 +70,7 @@ try { } } catch (Exception $e) { +http_response_code(500); ?>
Connection to Redis failed.

The following error was reported:
getMessage();?>
Date: Wed, 12 Jul 2023 09:42:17 +0200 Subject: [PATCH 16/36] [Netfilter] Update Compose File to 1.53 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4c854aeb..b68a97fa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -426,7 +426,7 @@ services: - acme netfilter-mailcow: - image: mailcow/netfilter:1.52 + image: mailcow/netfilter:1.53 stop_grace_period: 30s depends_on: - dovecot-mailcow From db2759b7d184e68713bf0441f84df9f624ce3c6d Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Wed, 12 Jul 2023 16:46:32 +0200 Subject: [PATCH 17/36] [Web] fix wrong content type + add more http 500 responses --- data/web/inc/prerequisites.inc.php | 3 +++ data/web/json_api.php | 2 ++ 2 files changed, 5 insertions(+) diff --git a/data/web/inc/prerequisites.inc.php b/data/web/inc/prerequisites.inc.php index f7fd80b4..9c5203e7 100644 --- a/data/web/inc/prerequisites.inc.php +++ b/data/web/inc/prerequisites.inc.php @@ -70,6 +70,7 @@ try { } } catch (Exception $e) { +// Stop when redis is not available http_response_code(500); ?>
Connection to Redis failed.

The following error was reported:
getMessage();?>
@@ -99,6 +100,7 @@ try { } catch (PDOException $e) { // Stop when SQL connection fails +http_response_code(500); ?>
Connection to database failed.

The following error was reported:
getMessage();?>
Connection to dockerapi container failed.

The following error was reported:
-
Date: Thu, 12 Oct 2023 12:46:02 +0200 Subject: [PATCH 18/36] Allow suppressing watchdog start notification. The default behavior is still the old one (send a notifcation when the watchdog is started), but this notification can now be suppressed by setting WATCHDOG_NOTIFY_START=n. --- data/Dockerfiles/watchdog/watchdog.sh | 2 +- docker-compose.yml | 1 + generate_config.sh | 3 +++ update.sh | 7 +++++++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 231d0ecd..77281b71 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -746,7 +746,7 @@ olefy_checks() { } # Notify about start -if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]]; then +if [[ ! -z ${WATCHDOG_NOTIFY_EMAIL} ]] && [[ ${WATCHDOG_NOTIFY_START} =~ ^([yY][eE][sS]|[yY])+$ ]]; then mail_error "watchdog-mailcow" "Watchdog started monitoring mailcow." fi diff --git a/docker-compose.yml b/docker-compose.yml index c8cfd589..f208de0c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -488,6 +488,7 @@ services: - USE_WATCHDOG=${USE_WATCHDOG:-n} - WATCHDOG_NOTIFY_EMAIL=${WATCHDOG_NOTIFY_EMAIL:-} - WATCHDOG_NOTIFY_BAN=${WATCHDOG_NOTIFY_BAN:-y} + - WATCHDOG_NOTIFY_START=${WATCHDOG_NOTIFY_START:-y} - WATCHDOG_SUBJECT=${WATCHDOG_SUBJECT:-Watchdog ALERT} - WATCHDOG_EXTERNAL_CHECKS=${WATCHDOG_EXTERNAL_CHECKS:-n} - WATCHDOG_MYSQL_REPLICATION_CHECKS=${WATCHDOG_MYSQL_REPLICATION_CHECKS:-n} diff --git a/generate_config.sh b/generate_config.sh index f25309ea..30af54e3 100755 --- a/generate_config.sh +++ b/generate_config.sh @@ -401,6 +401,9 @@ USE_WATCHDOG=y # Notify about banned IP (includes whois lookup) WATCHDOG_NOTIFY_BAN=n +# Send a notification when the watchdog is started. +WATCHDOG_NOTIFY_START=y + # Subject for watchdog mails. Defaults to "Watchdog ALERT" followed by the error message. #WATCHDOG_SUBJECT= diff --git a/update.sh b/update.sh index 5204659e..533612ed 100755 --- a/update.sh +++ b/update.sh @@ -449,6 +449,7 @@ CONFIG_ARRAY=( "USE_WATCHDOG" "WATCHDOG_NOTIFY_EMAIL" "WATCHDOG_NOTIFY_BAN" + "WATCHDOG_NOTIFY_START" "WATCHDOG_EXTERNAL_CHECKS" "WATCHDOG_SUBJECT" "SKIP_CLAMD" @@ -636,6 +637,12 @@ for option in ${CONFIG_ARRAY[@]}; do echo '# Notify about banned IP. Includes whois lookup.' >> mailcow.conf echo "WATCHDOG_NOTIFY_BAN=y" >> mailcow.conf fi + elif [[ ${option} == "WATCHDOG_NOTIFY_START" ]]; then + if ! grep -q ${option} mailcow.conf; then + echo "Adding new option \"${option}\" to mailcow.conf" + echo '# Send a notification when the watchdog is started.' >> mailcow.conf + echo "WATCHDOG_NOTIFY_START=y" >> mailcow.conf + fi elif [[ ${option} == "WATCHDOG_SUBJECT" ]]; then if ! grep -q ${option} mailcow.conf; then echo "Adding new option \"${option}\" to mailcow.conf" From f39005b72ddd41662855e9f83d2021e6a23bffa6 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 30 Oct 2023 11:54:14 +0100 Subject: [PATCH 19/36] [Netfilter] add nftables support --- data/Dockerfiles/netfilter/Dockerfile | 14 +- .../netfilter/docker-entrypoint.sh | 29 + data/Dockerfiles/netfilter/main.py | 459 ++++++++++++++++ .../Dockerfiles/netfilter/modules/IPTables.py | 213 ++++++++ data/Dockerfiles/netfilter/modules/Logger.py | 23 + .../Dockerfiles/netfilter/modules/NFTables.py | 495 ++++++++++++++++++ .../Dockerfiles/netfilter/modules/__init__.py | 0 docker-compose.yml | 2 +- 8 files changed, 1232 insertions(+), 3 deletions(-) create mode 100755 data/Dockerfiles/netfilter/docker-entrypoint.sh create mode 100644 data/Dockerfiles/netfilter/main.py create mode 100644 data/Dockerfiles/netfilter/modules/IPTables.py create mode 100644 data/Dockerfiles/netfilter/modules/Logger.py create mode 100644 data/Dockerfiles/netfilter/modules/NFTables.py create mode 100644 data/Dockerfiles/netfilter/modules/__init__.py diff --git a/data/Dockerfiles/netfilter/Dockerfile b/data/Dockerfiles/netfilter/Dockerfile index 4fcb5eef..8f76ec63 100644 --- a/data/Dockerfiles/netfilter/Dockerfile +++ b/data/Dockerfiles/netfilter/Dockerfile @@ -1,6 +1,8 @@ FROM alpine:3.17 LABEL maintainer "The Infrastructure Company GmbH " +WORKDIR /app + ENV XTABLES_LIBDIR /usr/lib/xtables ENV PYTHON_IPTABLES_XTABLES_VERSION 12 ENV IPTABLES_LIBDIR /usr/lib @@ -14,10 +16,13 @@ RUN apk add --virtual .build-deps \ iptables \ ip6tables \ xtables-addons \ + nftables \ tzdata \ py3-pip \ + py3-nftables \ musl-dev \ && pip3 install --ignore-installed --upgrade pip \ + jsonschema \ python-iptables \ redis \ ipaddress \ @@ -26,5 +31,10 @@ RUN apk add --virtual .build-deps \ # && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \ -COPY server.py / -CMD ["python3", "-u", "/server.py"] +COPY modules /app/modules +COPY main.py /app/ +COPY ./docker-entrypoint.sh /app/ + +RUN chmod +x /app/docker-entrypoint.sh + +CMD ["/bin/sh", "-c", "/app/docker-entrypoint.sh"] \ No newline at end of file diff --git a/data/Dockerfiles/netfilter/docker-entrypoint.sh b/data/Dockerfiles/netfilter/docker-entrypoint.sh new file mode 100755 index 00000000..47370a1f --- /dev/null +++ b/data/Dockerfiles/netfilter/docker-entrypoint.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +backend=iptables + +nft list table ip filter &>/dev/null +nftables_found=$? + +iptables -L -n &>/dev/null +iptables_found=$? + +if [ $nftables_found -lt $iptables_found ]; then + backend=nftables +fi + +if [ $nftables_found -gt $iptables_found ]; then + backend=iptables +fi + +if [ $nftables_found -eq 0 ] && [ $nftables_found -eq $iptables_found ]; then + nftables_lines=$(nft list ruleset | wc -l) + iptables_lines=$(iptables-save | wc -l) + if [ $nftables_lines -gt $iptables_lines ]; then + backend=nftables + else + backend=iptables + fi +fi + +exec python -u /app/main.py $backend diff --git a/data/Dockerfiles/netfilter/main.py b/data/Dockerfiles/netfilter/main.py new file mode 100644 index 00000000..a6859c95 --- /dev/null +++ b/data/Dockerfiles/netfilter/main.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python3 + +import re +import os +import sys +import time +import atexit +import signal +import ipaddress +from collections import Counter +from random import randint +from threading import Thread +from threading import Lock +import redis +import json +import dns.resolver +import dns.exception +from modules.Logger import Logger +from modules.IPTables import IPTables +from modules.NFTables import NFTables + + +# connect to redis +while True: + try: + redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '') + redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '') + if "".__eq__(redis_slaveof_ip): + r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0) + else: + r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0) + r.ping() + except Exception as ex: + print('%s - trying again in 3 seconds' % (ex)) + time.sleep(3) + else: + break +pubsub = r.pubsub() + +# rename fail2ban to netfilter +if r.exists('F2B_LOG'): + r.rename('F2B_LOG', 'NETFILTER_LOG') + + +# globals +WHITELIST = [] +BLACKLIST= [] +bans = {} +quit_now = False +exit_code = 0 +lock = Lock() + + +# init Logger +logger = Logger(r) +# init backend +backend = sys.argv[1] +if backend == "nftables": + logger.logInfo('Using NFTables backend') + tables = NFTables("MAILCOW", logger) +else: + logger.logInfo('Using IPTables backend') + tables = IPTables("MAILCOW", logger) + + +def refreshF2boptions(): + global f2boptions + global quit_now + global exit_code + + f2boptions = {} + + if not r.get('F2B_OPTIONS'): + f2boptions['ban_time'] = r.get('F2B_BAN_TIME') + f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME') + f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT') + f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') + f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') + f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') + f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') + else: + try: + f2boptions = json.loads(r.get('F2B_OPTIONS')) + except ValueError: + logger.logCrit('Error loading F2B options: F2B_OPTIONS is not json') + quit_now = True + exit_code = 2 + + verifyF2boptions(f2boptions) + r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) + +def verifyF2boptions(f2boptions): + verifyF2boption(f2boptions,'ban_time', 1800) + verifyF2boption(f2boptions,'max_ban_time', 10000) + verifyF2boption(f2boptions,'ban_time_increment', True) + verifyF2boption(f2boptions,'max_attempts', 10) + verifyF2boption(f2boptions,'retry_window', 600) + verifyF2boption(f2boptions,'netban_ipv4', 32) + verifyF2boption(f2boptions,'netban_ipv6', 128) + +def verifyF2boption(f2boptions, f2boption, f2bdefault): + f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault + +def refreshF2bregex(): + global f2bregex + global quit_now + global exit_code + if not r.get('F2B_REGEX'): + f2bregex = {} + f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)' + f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)' + f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+' + f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+' + f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+' + f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),' + f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' + f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' + f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked' + f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+' + r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False)) + else: + try: + f2bregex = {} + f2bregex = json.loads(r.get('F2B_REGEX')) + except ValueError: + logger.logCrit('Error loading F2B options: F2B_REGEX is not json') + quit_now = True + exit_code = 2 + +def get_ip(address): + ip = ipaddress.ip_address(address) + if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped: + ip = ip.ipv4_mapped + if ip.is_private or ip.is_loopback: + return False + + return ip + +def ban(address): + global lock + + refreshF2boptions() + BAN_TIME = int(f2boptions['ban_time']) + BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) + MAX_ATTEMPTS = int(f2boptions['max_attempts']) + RETRY_WINDOW = int(f2boptions['retry_window']) + NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4']) + NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6']) + + ip = get_ip(address) + if not ip: return + address = str(ip) + self_network = ipaddress.ip_network(address) + + with lock: + temp_whitelist = set(WHITELIST) + if temp_whitelist: + for wl_key in temp_whitelist: + wl_net = ipaddress.ip_network(wl_key, False) + if wl_net.overlaps(self_network): + logger.logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net)) + return + + net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False) + net = str(net) + + if not net in bans: + bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0} + + bans[net]['attempts'] += 1 + bans[net]['last_attempt'] = time.time() + + if bans[net]['attempts'] >= MAX_ATTEMPTS: + cur_time = int(round(time.time())) + NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] + logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 )) + if type(ip) is ipaddress.IPv4Address: + with lock: + tables.banIPv4(net) + else: + with lock: + tables.banIPv6(net) + + r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME) + else: + logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) + +def unban(net): + global lock + + if not net in bans: + logger.logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net) + r.hdel('F2B_QUEUE_UNBAN', '%s' % net) + return + + logger.logInfo('Unbanning %s' % net) + if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network: + with lock: + tables.unbanIPv4(net) + else: + with lock: + tables.unbanIPv6(net) + + r.hdel('F2B_ACTIVE_BANS', '%s' % net) + r.hdel('F2B_QUEUE_UNBAN', '%s' % net) + if net in bans: + bans[net]['attempts'] = 0 + bans[net]['ban_counter'] += 1 + +def permBan(net, unban=False): + global lock + + is_unbanned = False + is_banned = False + if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: + with lock: + if unban: + is_unbanned = tables.unbanIPv4(net) + else: + is_banned = tables.banIPv4(net) + else: + with lock: + if unban: + is_unbanned = tables.unbanIPv6(net) + else: + is_banned = tables.banIPv6(net) + + + if is_unbanned: + r.hdel('F2B_PERM_BANS', '%s' % net) + logger.logCrit('Removed host/network %s from blacklist' % net) + elif is_banned: + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + logger.logCrit('Added host/network %s to blacklist' % net) + +def clear(): + global lock + logger.logInfo('Clearing all bans') + for net in bans.copy(): + unban(net) + with lock: + tables.clearIPv4Table() + tables.clearIPv6Table() + r.delete('F2B_ACTIVE_BANS') + r.delete('F2B_PERM_BANS') + pubsub.unsubscribe() + +def watch(): + logger.logInfo('Watching Redis channel F2B_CHANNEL') + pubsub.subscribe('F2B_CHANNEL') + + global quit_now + global exit_code + + while not quit_now: + try: + for item in pubsub.listen(): + refreshF2bregex() + for rule_id, rule_regex in f2bregex.items(): + if item['data'] and item['type'] == 'message': + try: + result = re.search(rule_regex, item['data']) + except re.error: + result = False + if result: + addr = result.group(1) + ip = ipaddress.ip_address(addr) + if ip.is_private or ip.is_loopback: + continue + logger.logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data'])) + ban(addr) + except Exception as ex: + logger.logWarn('Error reading log line from pubsub: %s' % ex) + quit_now = True + exit_code = 2 + +def snat4(snat_target): + global lock + global quit_now + + while not quit_now: + time.sleep(10) + with lock: + tables.snat4(snat_target, os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24') + +def snat6(snat_target): + global lock + global quit_now + + while not quit_now: + time.sleep(10) + with lock: + tables.snat6(snat_target, os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64')) + +def autopurge(): + while not quit_now: + time.sleep(10) + refreshF2boptions() + BAN_TIME = int(f2boptions['ban_time']) + MAX_BAN_TIME = int(f2boptions['max_ban_time']) + BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) + MAX_ATTEMPTS = int(f2boptions['max_attempts']) + QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN') + if QUEUE_UNBAN: + for net in QUEUE_UNBAN: + unban(str(net)) + for net in bans.copy(): + if bans[net]['attempts'] >= MAX_ATTEMPTS: + NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] + TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt'] + if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME: + unban(net) + +def mailcowChainOrder(): + global lock + global quit_now + global exit_code + while not quit_now: + time.sleep(10) + with lock: + quit_now, exit_code = tables.checkIPv4ChainOrder() + if quit_now: return + quit_now, exit_code = tables.checkIPv6ChainOrder() + +def isIpNetwork(address): + try: + ipaddress.ip_network(address, False) + except ValueError: + return False + return True + +def genNetworkList(list): + resolver = dns.resolver.Resolver() + hostnames = [] + networks = [] + for key in list: + if isIpNetwork(key): + networks.append(key) + else: + hostnames.append(key) + for hostname in hostnames: + hostname_ips = [] + for rdtype in ['A', 'AAAA']: + try: + answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3) + except dns.exception.Timeout: + logger.logInfo('Hostname %s timedout on resolve' % hostname) + break + except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): + continue + except dns.exception.DNSException as dnsexception: + logger.logInfo('%s' % dnsexception) + continue + for rdata in answer: + hostname_ips.append(rdata.to_text()) + networks.extend(hostname_ips) + return set(networks) + +def whitelistUpdate(): + global lock + global quit_now + global WHITELIST + while not quit_now: + start_time = time.time() + list = r.hgetall('F2B_WHITELIST') + new_whitelist = [] + if list: + new_whitelist = genNetworkList(list) + with lock: + if Counter(new_whitelist) != Counter(WHITELIST): + WHITELIST = new_whitelist + logger.logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST)) + time.sleep(60.0 - ((time.time() - start_time) % 60.0)) + +def blacklistUpdate(): + global quit_now + global BLACKLIST + while not quit_now: + start_time = time.time() + list = r.hgetall('F2B_BLACKLIST') + new_blacklist = [] + if list: + new_blacklist = genNetworkList(list) + if Counter(new_blacklist) != Counter(BLACKLIST): + addban = set(new_blacklist).difference(BLACKLIST) + delban = set(BLACKLIST).difference(new_blacklist) + BLACKLIST = new_blacklist + logger.logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST)) + if addban: + for net in addban: + permBan(net=net) + if delban: + for net in delban: + permBan(net=net, unban=True) + time.sleep(60.0 - ((time.time() - start_time) % 60.0)) + +def quit(signum, frame): + global quit_now + quit_now = True + + +if __name__ == '__main__': + # In case a previous session was killed without cleanup + clear() + # Reinit MAILCOW chain + # Is called before threads start, no locking + logger.logInfo("Initializing mailcow netfilter chain") + tables.initChainIPv4() + tables.initChainIPv6() + + watch_thread = Thread(target=watch) + watch_thread.daemon = True + watch_thread.start() + + if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n': + try: + snat_ip = os.getenv('SNAT_TO_SOURCE') + snat_ipo = ipaddress.ip_address(snat_ip) + if type(snat_ipo) is ipaddress.IPv4Address: + snat4_thread = Thread(target=snat4,args=(snat_ip,)) + snat4_thread.daemon = True + snat4_thread.start() + except ValueError: + print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address') + + if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n': + try: + snat_ip = os.getenv('SNAT6_TO_SOURCE') + snat_ipo = ipaddress.ip_address(snat_ip) + if type(snat_ipo) is ipaddress.IPv6Address: + snat6_thread = Thread(target=snat6,args=(snat_ip,)) + snat6_thread.daemon = True + snat6_thread.start() + except ValueError: + print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address') + + autopurge_thread = Thread(target=autopurge) + autopurge_thread.daemon = True + autopurge_thread.start() + + mailcowchainwatch_thread = Thread(target=mailcowChainOrder) + mailcowchainwatch_thread.daemon = True + mailcowchainwatch_thread.start() + + blacklistupdate_thread = Thread(target=blacklistUpdate) + blacklistupdate_thread.daemon = True + blacklistupdate_thread.start() + + whitelistupdate_thread = Thread(target=whitelistUpdate) + whitelistupdate_thread.daemon = True + whitelistupdate_thread.start() + + signal.signal(signal.SIGTERM, quit) + atexit.register(clear) + + while not quit_now: + time.sleep(0.5) + + sys.exit(exit_code) diff --git a/data/Dockerfiles/netfilter/modules/IPTables.py b/data/Dockerfiles/netfilter/modules/IPTables.py new file mode 100644 index 00000000..c60ecc61 --- /dev/null +++ b/data/Dockerfiles/netfilter/modules/IPTables.py @@ -0,0 +1,213 @@ +import iptc +import time + +class IPTables: + def __init__(self, chain_name, logger): + self.chain_name = chain_name + self.logger = logger + + def initChainIPv4(self): + if not iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) in iptc.Table(iptc.Table.FILTER).chains: + iptc.Table(iptc.Table.FILTER).create_chain(self.chain_name) + for c in ['FORWARD', 'INPUT']: + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c) + rule = iptc.Rule() + rule.src = '0.0.0.0/0' + rule.dst = '0.0.0.0/0' + target = iptc.Target(rule, self.chain_name) + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + + def initChainIPv6(self): + if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) in iptc.Table6(iptc.Table6.FILTER).chains: + iptc.Table6(iptc.Table6.FILTER).create_chain(self.chain_name) + for c in ['FORWARD', 'INPUT']: + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c) + rule = iptc.Rule6() + rule.src = '::/0' + rule.dst = '::/0' + target = iptc.Target(rule, self.chain_name) + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + + def checkIPv4ChainOrder(self): + filter_table = iptc.Table(iptc.Table.FILTER) + filter_table.refresh() + return self.checkChainOrder(filter_table) + + def checkIPv6ChainOrder(self): + filter_table = iptc.Table6(iptc.Table6.FILTER) + filter_table.refresh() + return self.checkChainOrder(filter_table) + + def checkChainOrder(self, filter_table): + err = False + exit_code = None + + forward_chain = iptc.Chain(filter_table, 'FORWARD') + input_chain = iptc.Chain(filter_table, 'INPUT') + for chain in [forward_chain, input_chain]: + target_found = False + for position, item in enumerate(chain.rules): + if item.target.name == self.chain_name: + target_found = True + if position > 2: + self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name)) + err = True + exit_code = 2 + if not target_found: + self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name)) + err = True + exit_code = 2 + + return err, exit_code + + def clearIPv4Table(self): + self.clearTable(iptc.Table(iptc.Table.FILTER)) + + def clearIPv6Table(self): + self.clearTable(iptc.Table6(iptc.Table6.FILTER)) + + def clearTable(self, filter_table): + filter_table.autocommit = False + forward_chain = iptc.Chain(filter_table, "FORWARD") + input_chain = iptc.Chain(filter_table, "INPUT") + mailcow_chain = iptc.Chain(filter_table, self.chain_name) + if mailcow_chain in filter_table.chains: + for rule in mailcow_chain.rules: + mailcow_chain.delete_rule(rule) + for rule in forward_chain.rules: + if rule.target.name == self.chain_name: + forward_chain.delete_rule(rule) + for rule in input_chain.rules: + if rule.target.name == self.chain_name: + input_chain.delete_rule(rule) + filter_table.delete_chain(self.chain_name) + filter_table.commit() + filter_table.refresh() + filter_table.autocommit = True + + def banIPv4(self, source): + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) + rule = iptc.Rule() + rule.src = source + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule in chain.rules: + return False + chain.insert_rule(rule) + return True + + def banIPv6(self, source): + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) + rule = iptc.Rule6() + rule.src = source + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule in chain.rules: + return False + chain.insert_rule(rule) + return True + + def unbanIPv4(self, source): + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) + rule = iptc.Rule() + rule.src = source + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules: + return False + chain.delete_rule(rule) + return True + + def unbanIPv6(self, source): + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) + rule = iptc.Rule6() + rule.src = source + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules: + return False + chain.delete_rule(rule) + return True + + def snat4(self, snat_target, source): + try: + table = iptc.Table('nat') + table.refresh() + chain = iptc.Chain(table, 'POSTROUTING') + table.autocommit = False + new_rule = self.getSnat4Rule(snat_target, source) + + if not chain.rules: + # if there are no rules in the chain, insert the new rule directly + self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + for position, rule in enumerate(chain.rules): + if not hasattr(rule.target, 'parameter'): + continue + match = all(( + new_rule.get_src() == rule.get_src(), + new_rule.get_dst() == rule.get_dst(), + new_rule.target.parameters == rule.target.parameters, + new_rule.target.name == rule.target.name + )) + if position == 0: + if not match: + self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + if match: + self.logger.logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') + chain.delete_rule(rule) + + table.commit() + table.autocommit = True + return True + except: + self.logger.logCrit('Error running SNAT4, retrying...') + return False + + def snat6(self, snat_target, source): + try: + table = iptc.Table6('nat') + table.refresh() + chain = iptc.Chain(table, 'POSTROUTING') + table.autocommit = False + new_rule = self.getSnat6Rule(snat_target, source) + + if new_rule not in chain.rules: + self.logger.logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (new_rule.src, snat_target)) + chain.insert_rule(new_rule) + else: + for position, item in enumerate(chain.rules): + if item == new_rule: + if position != 0: + chain.delete_rule(new_rule) + + table.commit() + table.autocommit = True + except: + self.logger.logCrit('Error running SNAT6, retrying...') + + + def getSnat4Rule(self, snat_target, source): + rule = iptc.Rule() + rule.src = source + rule.dst = '!' + rule.src + target = rule.create_target("SNAT") + target.to_source = snat_target + match = rule.create_match("comment") + match.comment = f'{int(round(time.time()))}' + return rule + + def getSnat6Rule(self, snat_target, source): + rule = iptc.Rule6() + rule.src = source + rule.dst = '!' + rule.src + target = rule.create_target("SNAT") + target.to_source = snat_target + return rule diff --git a/data/Dockerfiles/netfilter/modules/Logger.py b/data/Dockerfiles/netfilter/modules/Logger.py new file mode 100644 index 00000000..d60d52fa --- /dev/null +++ b/data/Dockerfiles/netfilter/modules/Logger.py @@ -0,0 +1,23 @@ +import time +import json + +class Logger: + def __init__(self, redis): + self.r = redis + + def log(self, priority, message): + tolog = {} + tolog['time'] = int(round(time.time())) + tolog['priority'] = priority + tolog['message'] = message + self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False)) + print(message) + + def logWarn(self, message): + self.log('warn', message) + + def logCrit(self, message): + self.log('crit', message) + + def logInfo(self, message): + self.log('info', message) diff --git a/data/Dockerfiles/netfilter/modules/NFTables.py b/data/Dockerfiles/netfilter/modules/NFTables.py new file mode 100644 index 00000000..d341dc36 --- /dev/null +++ b/data/Dockerfiles/netfilter/modules/NFTables.py @@ -0,0 +1,495 @@ +import nftables +import ipaddress + +class NFTables: + def __init__(self, chain_name, logger): + self.chain_name = chain_name + self.logger = logger + + self.nft = nftables.Nftables() + self.nft.set_json_output(True) + self.nft.set_handle_output(True) + self.nft_chain_names = {'ip': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} }, + 'ip6': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} } } + + self.search_current_chains() + + def initChainIPv4(self): + self.insert_mailcow_chains("ip") + + def initChainIPv6(self): + self.insert_mailcow_chains("ip6") + + def checkIPv4ChainOrder(self): + return self.checkChainOrder("ip") + + def checkIPv6ChainOrder(self): + return self.checkChainOrder("ip6") + + def checkChainOrder(self, filter_table): + err = False + exit_code = None + + for chain in ['input', 'forward']: + chain_position = self.check_mailcow_chains(filter_table, chain) + if chain_position is None: continue + + if chain_position is False: + self.logger.logCrit(f'MAILCOW target not found in {filter_table} {chain} table, restarting container to fix it...') + err = True + exit_code = 2 + + if chain_position > 0: + self.logger.logCrit(f'MAILCOW target is in position {chain_position} in the {filter_table} {chain} table, restarting container to fix it...') + err = True + exit_code = 2 + + return err, exit_code + + def clearIPv4Table(self): + self.clearTable("ip") + + def clearIPv6Table(self): + self.clearTable("ip6") + + def clearTable(self, _family): + is_empty_dict = True + json_command = self.get_base_dict() + chain_handle = self.get_chain_handle(_family, "filter", self.chain_name) + # if no handle, the chain doesn't exists + if chain_handle is not None: + is_empty_dict = False + # flush chain + mailcow_chain = {'family': _family, 'table': 'filter', 'name': self.chain_name} + flush_chain = {'flush': {'chain': mailcow_chain}} + json_command["nftables"].append(flush_chain) + + # remove rule in forward chain + # remove rule in input chain + chains_family = [self.nft_chain_names[_family]['filter']['input'], + self.nft_chain_names[_family]['filter']['forward'] ] + + for chain_base in chains_family: + if not chain_base: continue + + rules_handle = self.get_rules_handle(_family, "filter", chain_base) + if rules_handle is not None: + for r_handle in rules_handle: + is_empty_dict = False + mailcow_rule = {'family':_family, + 'table': 'filter', + 'chain': chain_base, + 'handle': r_handle } + delete_rules = {'delete': {'rule': mailcow_rule} } + json_command["nftables"].append(delete_rules) + + # remove chain + # after delete all rules referencing this chain + if chain_handle is not None: + mc_chain_handle = {'family':_family, + 'table': 'filter', + 'name': self.chain_name, + 'handle': chain_handle } + delete_chain = {'delete': {'chain': mc_chain_handle} } + json_command["nftables"].append(delete_chain) + + if is_empty_dict == False: + if self.nft_exec_dict(json_command): + self.logger.logInfo(f"Clear completed: {_family}") + + def banIPv4(self, source): + ban_dict = self.get_ban_ip_dict(source, "ip") + return self.nft_exec_dict(ban_dict) + + def banIPv6(self, source): + ban_dict = self.get_ban_ip_dict(source, "ip6") + return self.nft_exec_dict(ban_dict) + + def unbanIPv4(self, source): + unban_dict = self.get_unban_ip_dict(source, "ip") + if not unban_dict: + return False + return self.nft_exec_dict(unban_dict) + + def unbanIPv6(self, source): + unban_dict = self.get_unban_ip_dict(source, "ip6") + if not unban_dict: + return False + return self.nft_exec_dict(unban_dict) + + def snat4(self, snat_target, source): + self.snat_rule("ip", snat_target, source) + + def snat6(self, snat_target, source): + self.snat_rule("ip6", snat_target, source) + + + def nft_exec_dict(self, query: dict): + if not query: return False + + rc, output, error = self.nft.json_cmd(query) + if rc != 0: + #self.logger.logCrit(f"Nftables Error: {error}") + return False + + # Prevent returning False or empty string on commands that do not produce output + if rc == 0 and len(output) == 0: + return True + + return output + + def get_base_dict(self): + return {'nftables': [{ 'metainfo': { 'json_schema_version': 1} } ] } + + def search_current_chains(self): + nft_chain_priority = {'ip': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} }, + 'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } } + + # Command: 'nft list chains' + _list = {'list' : {'chains': 'null'} } + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + if kernel_ruleset: + for _object in kernel_ruleset['nftables']: + chain = _object.get("chain") + if not chain: continue + + _family = chain['family'] + _table = chain['table'] + _hook = chain.get("hook") + _priority = chain.get("prio") + _name = chain['name'] + + if _family not in self.nft_chain_names: continue + if _table not in self.nft_chain_names[_family]: continue + if _hook not in self.nft_chain_names[_family][_table]: continue + if _priority is None: continue + + _saved_priority = nft_chain_priority[_family][_table][_hook] + if _saved_priority is None or _priority < _saved_priority: + # at this point, we know the chain has: + # hook and priority set + # and it has the lowest priority + nft_chain_priority[_family][_table][_hook] = _priority + self.nft_chain_names[_family][_table][_hook] = _name + + def search_for_chain(self, kernel_ruleset: dict, chain_name: str): + found = False + for _object in kernel_ruleset["nftables"]: + chain = _object.get("chain") + if not chain: + continue + ch_name = chain.get("name") + if ch_name == chain_name: + found = True + break + return found + + def get_chain_dict(self, _family: str, _name: str): + # nft (add | create) chain [] + _chain_opts = {'family': _family, 'table': 'filter', 'name': _name } + _add = {'add': {'chain': _chain_opts} } + final_chain = self.get_base_dict() + final_chain["nftables"].append(_add) + return final_chain + + def get_mailcow_jump_rule_dict(self, _family: str, _chain: str): + _jump_rule = self.get_base_dict() + _expr_opt=[] + _expr_counter = {'family': _family, 'table': 'filter', 'packets': 0, 'bytes': 0} + _counter_dict = {'counter': _expr_counter} + _expr_opt.append(_counter_dict) + + _jump_opts = {'jump': {'target': self.chain_name} } + + _expr_opt.append(_jump_opts) + + _rule_params = {'family': _family, + 'table': 'filter', + 'chain': _chain, + 'expr': _expr_opt, + 'comment': "mailcow" } + + _add_rule = {'insert': {'rule': _rule_params} } + + _jump_rule["nftables"].append(_add_rule) + + return _jump_rule + + def insert_mailcow_chains(self, _family: str): + nft_input_chain = self.nft_chain_names[_family]['filter']['input'] + nft_forward_chain = self.nft_chain_names[_family]['filter']['forward'] + # Command: 'nft list table filter' + _table_opts = {'family': _family, 'name': 'filter'} + _list = {'list': {'table': _table_opts} } + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + if kernel_ruleset: + # chain + if not self.search_for_chain(kernel_ruleset, self.chain_name): + cadena = self.get_chain_dict(_family, self.chain_name) + if self.nft_exec_dict(cadena): + self.logger.logInfo(f"MAILCOW {_family} chain created successfully.") + + input_jump_found, forward_jump_found = False, False + + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): + continue + + rule = _object["rule"] + if nft_input_chain and rule["chain"] == nft_input_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + input_jump_found = True + if nft_forward_chain and rule["chain"] == nft_forward_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + forward_jump_found = True + + if not input_jump_found: + command = self.get_mailcow_jump_rule_dict(_family, nft_input_chain) + self.nft_exec_dict(command) + + if not forward_jump_found: + command = self.get_mailcow_jump_rule_dict(_family, nft_forward_chain) + self.nft_exec_dict(command) + + def delete_nat_rule(self, _family:str, _chain: str, _handle:str): + delete_command = self.get_base_dict() + _rule_opts = {'family': _family, + 'table': 'nat', + 'chain': _chain, + 'handle': _handle } + _delete = {'delete': {'rule': _rule_opts} } + delete_command["nftables"].append(_delete) + + return self.nft_exec_dict(delete_command) + + def snat_rule(self, _family: str, snat_target: str, source_address: str): + chain_name = self.nft_chain_names[_family]['nat']['postrouting'] + + # no postrouting chain, may occur if docker has ipv6 disabled. + if not chain_name: return + + # Command: nft list chain nat + _chain_opts = {'family': _family, 'table': 'nat', 'name': chain_name} + _list = {'list':{'chain': _chain_opts} } + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + if not kernel_ruleset: + return + + rule_position = 0 + rule_handle = None + rule_found = False + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): + continue + + rule = _object["rule"] + if not rule.get("comment") or not rule["comment"] == "mailcow": + rule_position +=1 + continue + + rule_found = True + rule_handle = rule["handle"] + break + + dest_net = ipaddress.ip_network(source_address) + target_net = ipaddress.ip_network(snat_target) + + if rule_found: + saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"] + saddr_len = int(rule["expr"][0]["match"]["right"]["prefix"]["len"]) + + daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"] + daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"]) + + target_ip = rule["expr"][3]["snat"]["addr"] + + saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len)) + daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len)) + current_target_net = ipaddress.ip_network(target_ip) + + match = all(( + dest_net == saddr_net, + dest_net == daddr_net, + target_net == current_target_net + )) + try: + if rule_position == 0: + if not match: + # Position 0 , it is a mailcow rule , but it does not have the same parameters + if self.delete_nat_rule(_family, chain_name, rule_handle): + self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule does not match configured parameters') + else: + # Position > 0 and is mailcow rule + if self.delete_nat_rule(_family, chain_name, rule_handle): + self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule is at position {rule_position}') + except: + self.logger.logCrit(f"Error running SNAT on {_family}, retrying..." ) + else: + # rule not found + json_command = self.get_base_dict() + try: + snat_dict = {'snat': {'addr': str(target_net.network_address)} } + + expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0} + counter_dict = {'counter': expr_counter} + + prefix_dict = {'prefix': {'addr': str(dest_net.network_address), 'len': int(dest_net.prefixlen)} } + payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} } + match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} } + + payload_dict2 = {'payload': {'protocol': _family, 'field': "daddr"} } + match_dict2 = {'match': {'op': '!=', 'left': payload_dict2, 'right': prefix_dict } } + expr_list = [ + match_dict1, + match_dict2, + counter_dict, + snat_dict + ] + rule_fields = {'family': _family, + 'table': 'nat', + 'chain': chain_name, + 'comment': "mailcow", + 'expr': expr_list } + + insert_dict = {'insert': {'rule': rule_fields} } + json_command["nftables"].append(insert_dict) + if self.nft_exec_dict(json_command): + self.logger.logInfo(f'Added {_family} nat {chain_name} rule for source network {dest_net} to {target_net}') + except: + self.logger.logCrit(f"Error running SNAT on {_family}, retrying...") + + def get_chain_handle(self, _family: str, _table: str, chain_name: str): + chain_handle = None + # Command: 'nft list chains {family}' + _list = {'list': {'chains': {'family': _family} } } + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + if kernel_ruleset: + for _object in kernel_ruleset["nftables"]: + if not _object.get("chain"): + continue + chain = _object["chain"] + if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name: + chain_handle = chain["handle"] + break + return chain_handle + + def get_rules_handle(self, _family: str, _table: str, chain_name: str): + rule_handle = [] + # Command: 'nft list chain {family} {table} {chain_name}' + _chain_opts = {'family': _family, 'table': _table, 'name': chain_name} + _list = {'list': {'chain': _chain_opts} } + command = self.get_base_dict() + command['nftables'].append(_list) + + kernel_ruleset = self.nft_exec_dict(command) + if kernel_ruleset: + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): + continue + + rule = _object["rule"] + if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name: + if rule.get("comment") and rule["comment"] == "mailcow": + rule_handle.append(rule["handle"]) + return rule_handle + + def get_ban_ip_dict(self, ipaddr: str, _family: str): + json_command = self.get_base_dict() + + expr_opt = [] + ipaddr_net = ipaddress.ip_network(ipaddr) + right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } } + + left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} } + match_dict = {'op': '==', 'left': left_dict, 'right': right_dict } + expr_opt.append({'match': match_dict}) + + counter_dict = {'counter': {'family': _family, 'table': "filter", 'packets': 0, 'bytes': 0} } + expr_opt.append(counter_dict) + + expr_opt.append({'drop': "null"}) + + rule_dict = {'family': _family, 'table': "filter", 'chain': self.chain_name, 'expr': expr_opt} + + base_dict = {'insert': {'rule': rule_dict} } + json_command["nftables"].append(base_dict) + + return json_command + + def get_unban_ip_dict(self, ipaddr:str, _family: str): + json_command = self.get_base_dict() + # Command: 'nft list chain {s_family} filter MAILCOW' + _chain_opts = {'family': _family, 'table': 'filter', 'name': self.chain_name} + _list = {'list': {'chain': _chain_opts} } + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + rule_handle = None + if kernel_ruleset: + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): + continue + + rule = _object["rule"]["expr"][0]["match"] + left_opt = rule["left"]["payload"] + if not left_opt["protocol"] == _family: + continue + if not left_opt["field"] =="saddr": + continue + + # ip currently banned + rule_right = rule["right"] + if isinstance(rule_right, dict): + current_rule_ip = rule_right["prefix"]["addr"] + '/' + str(rule_right["prefix"]["len"]) + else: + current_rule_ip = rule_right + current_rule_net = ipaddress.ip_network(current_rule_ip) + + # ip to ban + candidate_net = ipaddress.ip_network(ipaddr) + + if current_rule_net == candidate_net: + rule_handle = _object["rule"]["handle"] + break + + if rule_handle is not None: + mailcow_rule = {'family': _family, 'table': 'filter', 'chain': self.chain_name, 'handle': rule_handle} + delete_rule = {'delete': {'rule': mailcow_rule} } + json_command["nftables"].append(delete_rule) + else: + return False + + return json_command + + def check_mailcow_chains(self, family: str, chain: str): + position = 0 + rule_found = False + chain_name = self.nft_chain_names[family]['filter'][chain] + + if not chain_name: return None + + _chain_opts = {'family': family, 'table': 'filter', 'name': chain_name} + _list = {'list': {'chain': _chain_opts}} + command = self.get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = self.nft_exec_dict(command) + if kernel_ruleset: + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): + continue + rule = _object["rule"] + if rule.get("comment") and rule["comment"] == "mailcow": + rule_found = True + break + + position+=1 + + return position if rule_found else False diff --git a/data/Dockerfiles/netfilter/modules/__init__.py b/data/Dockerfiles/netfilter/modules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/docker-compose.yml b/docker-compose.yml index 8d84e3a7..bba0610f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -434,7 +434,7 @@ services: - acme netfilter-mailcow: - image: mailcow/netfilter:1.52 + image: mailcow/netfilter:1.53 stop_grace_period: 30s depends_on: - dovecot-mailcow From 340980bdd0b3e44c13bd8b4b3ef37dbea823d108 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 27 Nov 2023 17:32:41 +0100 Subject: [PATCH 20/36] [Netfilter] set image back to mailcow/netfilter:1.52 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index bba0610f..8d84e3a7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -434,7 +434,7 @@ services: - acme netfilter-mailcow: - image: mailcow/netfilter:1.53 + image: mailcow/netfilter:1.52 stop_grace_period: 30s depends_on: - dovecot-mailcow From 0257736c64374f44b69dd5be4ab9bbd8320dce18 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:57:53 +0100 Subject: [PATCH 21/36] Update actions/stale action to v9 (#5579) Signed-off-by: milkmaker Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/workflows/close_old_issues_and_prs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/close_old_issues_and_prs.yml b/.github/workflows/close_old_issues_and_prs.yml index 21ab3a8e..391de66d 100644 --- a/.github/workflows/close_old_issues_and_prs.yml +++ b/.github/workflows/close_old_issues_and_prs.yml @@ -14,7 +14,7 @@ jobs: pull-requests: write steps: - name: Mark/Close Stale Issues and Pull Requests 🗑️ - uses: actions/stale@v8.0.0 + uses: actions/stale@v9.0.0 with: repo-token: ${{ secrets.STALE_ACTION_PAT }} days-before-stale: 60 From 888200670043cc1ef21448fbe83df9604c165574 Mon Sep 17 00:00:00 2001 From: milkmaker Date: Sun, 10 Dec 2023 18:07:28 +0100 Subject: [PATCH 22/36] Translations update from Weblate (#5583) * [Web] Updated lang.cs-cz.json Co-authored-by: Kristian Feldsam * [Web] Updated lang.de-de.json Co-authored-by: Peter * [Web] Updated lang.sk-sk.json Co-authored-by: Kristian Feldsam * [Web] Updated lang.pt-br.json [Web] Updated lang.pt-br.json Co-authored-by: Abner Santana Co-authored-by: xmacaba --------- Co-authored-by: Kristian Feldsam Co-authored-by: Peter Co-authored-by: Abner Santana Co-authored-by: xmacaba --- data/web/lang/lang.cs-cz.json | 75 ++++++++++++++++++++++++++++++----- data/web/lang/lang.de-de.json | 10 ++++- data/web/lang/lang.pt-br.json | 9 +++-- data/web/lang/lang.sk-sk.json | 37 ++++++++++++++--- 4 files changed, 109 insertions(+), 22 deletions(-) diff --git a/data/web/lang/lang.cs-cz.json b/data/web/lang/lang.cs-cz.json index 25d4c67c..2d0b1014 100644 --- a/data/web/lang/lang.cs-cz.json +++ b/data/web/lang/lang.cs-cz.json @@ -107,7 +107,8 @@ "username": "Uživatelské jméno", "validate": "Ověřit", "validation_success": "Úspěšně ověřeno", - "tags": "Štítky" + "tags": "Štítky", + "dry": "Simulovat synchronizaci" }, "admin": { "access": "Přístupy", @@ -218,7 +219,7 @@ "loading": "Prosím čekejte...", "login_time": "Čas přihlášení", "logo_info": "Obrázek bude zmenšen na výšku 40 pixelů pro horní navigační lištu a na max. šířku 250 pixelů pro úvodní stránku.", - "lookup_mx": "Ověřit cíl proti MX záznamu (.outlook.com bude směrovat všechnu poštu pro MX *.outlook.com přes tento uzel)", + "lookup_mx": "Cíl je regulární výraz, který se porovná s názvem MX (.*\\.google\\.com pro směrování veškeré pošty cílené na MX, který končí na google.com přes tento skok)", "main_name": "Název webu (\"mailcow UI\")", "merged_vars_hint": "Šedé řádky byly přidány z vars.(local.)inc.php a zde je nelze upravit.", "message": "Zpráva", @@ -343,7 +344,9 @@ "verify": "Ověřit", "yes": "✓", "f2b_ban_time_increment": "Délka banu je prodlužována s každým dalším banem", - "f2b_max_ban_time": "Maximální délka banu (s)" + "f2b_max_ban_time": "Maximální délka banu (s)", + "cors_settings": "Nastavení CORS", + "queue_unban": "zrušit ban" }, "danger": { "access_denied": "Přístup odepřen nebo jsou neplatná data ve formuláři", @@ -465,7 +468,14 @@ "username_invalid": "Uživatelské jméno %s nelze použít", "validity_missing": "Zdejte dobu platnosti", "value_missing": "Prosím, uveďte všechny hodnoty", - "yotp_verification_failed": "Yubico OTP ověření selhalo: %s" + "yotp_verification_failed": "Yubico OTP ověření selhalo: %s", + "webauthn_authenticator_failed": "Zvolený ověřovací prostředek nebyl nalezen", + "cors_invalid_method": "Zadaná neplatná metoda Allow-Method", + "cors_invalid_origin": "Zadán neplatný Allow-Origin", + "webauthn_publickey_failed": "Pro vybraný ověřovací prostředek nebyl uložen žádný veřejný klíč", + "webauthn_username_failed": "Zvolený ověřovací prostředek patří k jinému účtu", + "extended_sender_acl_denied": "chybějící ACL pro nastavení externích adres odesílatele", + "demo_mode_enabled": "Demo režim je zapnutý" }, "datatables": { "emptyTable": "Tabulka neobsahuje žádná data", @@ -488,7 +498,9 @@ "processing": "Zpracovávání...", "search": "Vyhledávání:", "decimal": ",", - "thousands": " " + "thousands": " ", + "collapse_all": "Sbalit vše", + "expand_all": "Rozbalit vše" }, "debug": { "chart_this_server": "Graf (tento server)", @@ -515,7 +527,20 @@ "success": "Úspěch", "system_containers": "Systém a kontejnery", "uptime": "Doba běhu", - "username": "Uživatelské meno" + "username": "Uživatelské meno", + "architecture": "Architektura", + "error_show_ip": "Nepodařilo se přeložit veřejné IP adresy", + "show_ip": "Zobrazit veřejné IP adresy", + "container_running": "Běží", + "container_stopped": "Zastaven", + "current_time": "Systémový čas", + "timezone": "Časové pásmo", + "update_available": "K dispozici je aktualizace", + "no_update_available": "Systém je na nejnovější verzi", + "update_failed": "Nepodařilo se zkontrolovat aktualizace", + "wip": "Nedokončená vývojová verze", + "memory": "Paměť", + "container_disabled": "Kontejner je zastaven nebo zakázán" }, "diagnostics": { "cname_from_a": "Hodnota odvozena z A/AAAA záznamu. Lze použít, pokud záznam ukazuje na správný zdroj.", @@ -640,7 +665,19 @@ "title": "Úprava objektu", "unchanged_if_empty": "Pokud se nemění, ponechte prázdné", "username": "Uživatelské jméno", - "validate_save": "Ověřit a uložit" + "validate_save": "Ověřit a uložit", + "domain_footer_info": "Patičky pro celou doménu se přidávají ke všem odchozím e-mailům spojeným s adresou v rámci této domény.
Pro patičku lze použít následující proměnné:", + "domain_footer_info_vars": { + "from_name": "{= from_name =} - Jméno odesílatele, např. pro \"Mailcow <moo@mailcow.tld>\" vrátí \"Mailcow\"", + "auth_user": "{= auth_user =} - Ověřené uživatelské jméno zadané MTA", + "from_user": "{= from_user =} - uživatelská část odesílatele, např. pro \"moo@mailcow.tld\" vrátí \"moo\"", + "from_domain": "{= from_domain =} - Doména odesílatele", + "from_addr": "{= from_addr =} - E-mailová adresa odesílatele" + }, + "domain_footer": "Patička pro celou doménu", + "domain_footer_html": "HTML text", + "domain_footer_plain": "Prostý text", + "pushover_sound": "Zvukové upozornění" }, "fido2": { "confirm": "Potvrdit", @@ -870,7 +907,8 @@ "username": "Uživatelské jméno", "waiting": "Čekání", "weekly": "Každý týden", - "yes": "✓" + "yes": "✓", + "relay_unknown": "Předávání neexistujících schránek" }, "oauth2": { "access_denied": "K udělení přístupu se přihlašte jako vlastník mailové schránky.", @@ -935,7 +973,19 @@ "type": "Typ" }, "queue": { - "queue_manager": "Správce fronty" + "queue_manager": "Správce fronty", + "delete": "Vymazat vše", + "info": "Poštovní fronta obsahuje všechny e-maily, které čekají na doručení. Pokud e-mail uvízne v poštovní frontě na delší dobu, systém jej automaticky odstraní.
Chybové hlášení příslušného e-mailu poskytuje informace o tom, proč se e-mail nepodařilo doručit.", + "flush": "Vyprázdnit frontu", + "legend": "Funkce operací poštovní fronty:", + "ays": "Potvrďte, že chcete opravdu odstranit všechny položky z aktuální fronty.", + "deliver_mail": "Doručit", + "deliver_mail_legend": "Opětovný pokus o doručení vybraných e-mailů.", + "hold_mail": "Podržet", + "hold_mail_legend": "Podrží vybrané e-maily. (Zabrání dalším pokusům o doručení)", + "show_message": "Zobrazit zprávu", + "unhold_mail": "Uvolnit", + "unhold_mail_legend": "Uvolnit vybrané e-maily k doručení. (Pouze v případě předchozího podržení)" }, "ratelimit": { "disabled": "Vypnuto", @@ -1029,7 +1079,9 @@ "verified_fido2_login": "Ověřené FIDO2 přihlášení", "verified_totp_login": "TOTP přihlášení ověřeno", "verified_webauthn_login": "WebAuthn přihlášení ověřeno", - "verified_yotp_login": "Yubico OTP přihlášení ověřeno" + "verified_yotp_login": "Yubico OTP přihlášení ověřeno", + "cors_headers_edited": "Nastavení CORS byla uložena", + "domain_footer_modified": "Změny patičky domény %s byly uloženy" }, "tfa": { "api_register": "%s používá Yubico Cloud API. Prosím získejte API klíč pro své Yubico ZDE", @@ -1215,7 +1267,8 @@ "weeks": "týdny", "with_app_password": "s heslem aplikace", "year": "rok", - "years": "let" + "years": "let", + "pushover_sound": "Zvukové upozornění" }, "warning": { "cannot_delete_self": "Nelze smazat právě přihlášeného uživatele", diff --git a/data/web/lang/lang.de-de.json b/data/web/lang/lang.de-de.json index 5737a664..31cc07b9 100644 --- a/data/web/lang/lang.de-de.json +++ b/data/web/lang/lang.de-de.json @@ -346,7 +346,9 @@ "oauth2_apps": "OAuth2 Apps", "queue_unban": "entsperren", "allowed_methods": "Access-Control-Allow-Methods", - "allowed_origins": "Access-Control-Allow-Origin" + "allowed_origins": "Access-Control-Allow-Origin", + "logo_dark_label": "Invertiert für den Darkmode", + "logo_normal_label": "Normal" }, "danger": { "access_denied": "Zugriff verweigert oder unvollständige/ungültige Daten", @@ -675,7 +677,11 @@ "unchanged_if_empty": "Unverändert, wenn leer", "username": "Benutzername", "validate_save": "Validieren und speichern", - "pushover_sound": "Ton" + "pushover_sound": "Ton", + "domain_footer_info_vars": { + "auth_user": "{= auth_user =} - Angemeldeter Benutzername vom MTA", + "from_user": "{= from_user =} - Von Teil des Benutzers z.B. \"moo@mailcow.tld\" wird \"moo\" zurückgeben." + } }, "fido2": { "confirm": "Bestätigen", diff --git a/data/web/lang/lang.pt-br.json b/data/web/lang/lang.pt-br.json index 4361b607..0c7cb083 100644 --- a/data/web/lang/lang.pt-br.json +++ b/data/web/lang/lang.pt-br.json @@ -509,7 +509,7 @@ "architecture": "Arquitetura", "chart_this_server": "Gráfico (este servidor)", "containers_info": "Informações do contêiner", - "container_running": "Correndo", + "container_running": "Executando", "container_disabled": "Contêiner parado ou desativado", "container_stopped": "Parado", "cores": "Núcleos", @@ -592,7 +592,8 @@ "from_user": "{= from_user =} - Da parte do envelope do usuário, por exemplo, para \"moo@mailcow.tld\", ele retorna “moo”", "from_name": "{= from_name =} - Do nome do envelope, por exemplo, para “Mailcow < moo@mailcow.tld >”, ele retorna “Mailcow”", "from_addr": "{= from_addr =} - Do endereço, parte do envelope", - "from_domain": "{= from_domain =} - Da parte do domínio do envelope" + "from_domain": "{= from_domain =} - Da parte do domínio do envelope", + "custom": "{= foo =} - Se o mailbox tiver o atributo personalizado \"foo\" com valor \"bar\", retornará \"bar\"" }, "domain_footer_plain": "Rodapé simples", "domain_quota": "Cota de domínio", @@ -864,7 +865,7 @@ "relay_unknown": "Retransmitir caixas de correio desconhecidas", "remove": "Remover", "resources": "Recursos", - "running": "Correndo", + "running": "Executando", "sender": "Remetente", "set_postfilter": "Marcar como postfilter", "set_prefilter": "Marcar como pré-filtro", @@ -1219,7 +1220,7 @@ "quarantine_notification_info": "Depois que uma notificação for enviada, os itens serão marcados como “notificados” e nenhuma outra notificação será enviada para esse item específico.", "recent_successful_connections": "Conexões bem-sucedidas vistas", "remove": "Remover", - "running": "Correndo", + "running": "Executando", "save": "Salvar alterações", "save_changes": "Salvar alterações", "sender_acl_disabled": "A verificação do remetente está desativada", diff --git a/data/web/lang/lang.sk-sk.json b/data/web/lang/lang.sk-sk.json index 79490261..d656a2cd 100644 --- a/data/web/lang/lang.sk-sk.json +++ b/data/web/lang/lang.sk-sk.json @@ -107,7 +107,8 @@ "username": "Používateľské meno", "validate": "Overiť", "validation_success": "Úspešne overené", - "tags": "Štítky" + "tags": "Štítky", + "dry": "Simulovať synchronizáciu" }, "admin": { "access": "Prístup", @@ -486,7 +487,9 @@ }, "emptyTable": "Nie sú k dispozícii žiadne dáta.", "decimal": ",", - "thousands": " " + "thousands": " ", + "collapse_all": "Zbaliť všetko", + "expand_all": "Rozbaliť všetko" }, "debug": { "chart_this_server": "Graf (tento server)", @@ -639,7 +642,18 @@ "title": "Upraviť objekt", "unchanged_if_empty": "Ak nemeníte, nechajte prázdne", "username": "Používateľské meno", - "validate_save": "Validovať a uložiť" + "validate_save": "Validovať a uložiť", + "domain_footer_info_vars": { + "from_addr": "{= from_addr =} - E-mailová adresa odosielateľa", + "from_domain": "{= from_domain =} - Doména odosielateľa", + "auth_user": "{= auth_user =} - Prihlasovacie meno odosielateľa", + "from_user": "{= from_user =} - Používateľská časť e-mailovej adresy odosielateľa, napr. pre \"moo@mailcow.tld\" vráti \"moo\"", + "from_name": "{= from_name =} - Meno odosielateľa, napr. pre \"Mailcow <moo@mailcow.tld>\" vráti \"Mailcow\"" + }, + "domain_footer": "Pätička pre celú doménu", + "domain_footer_html": "HTML text", + "domain_footer_info": "Pätička pre celú doménu sa pridáva do všetkých odchádzajúcich e-mailov spojených s adresou v rámci tejto domény.
Pre pätičku je možné použiť nasledujúce premenné:", + "domain_footer_plain": "Obyčajný text" }, "fido2": { "confirm": "Potvrdiť", @@ -934,7 +948,19 @@ "type": "Typ" }, "queue": { - "queue_manager": "Správca fronty" + "queue_manager": "Správca fronty", + "delete": "Vymazať všetko", + "flush": "Vyprázdnit frontu", + "info": "Poštová fronta obsahuje všetky e-maily, ktoré čakajú na doručenie. Ak e-mail uviazne v poštovej fronte na dlhší čas, systém ho automaticky vymaže.
Chybové hlásenie príslušného e-mailu poskytuje informácie o tom, prečo sa e-mail nepodarilo doručiť.", + "legend": "Možnosti akcií nad poštovou frontou:", + "ays": "Potvrďte, že chcete naozaj odstrániť všetky položky z aktuálnej fronty.", + "deliver_mail": "Doručiť", + "deliver_mail_legend": "Pokus o opätovné doručenie vybraných e-mailov.", + "show_message": "Zobraziť správu", + "unhold_mail": "Uvoľniť", + "unhold_mail_legend": "Uvoľniť vybrané e-maily na doručenie. (Len v prípade predchádzajúceho podržania)", + "hold_mail": "Podržať", + "hold_mail_legend": "Podržať vybrané e-maily. (Zabráni ďalším pokusom o doručenie)" }, "ratelimit": { "disabled": "Vypnuté", @@ -1028,7 +1054,8 @@ "verified_fido2_login": "Overené FIDO2 prihlásenie", "verified_totp_login": "Overené TOTP prihlásenie", "verified_webauthn_login": "Overené WebAuthn prihlásenie", - "verified_yotp_login": "Overené Yubico OTP prihlásenie" + "verified_yotp_login": "Overené Yubico OTP prihlásenie", + "domain_footer_modified": "Zmeny v pätičke domény %s boli uložené" }, "tfa": { "api_register": "%s využíva Yubico Cloud API. Prosím, zaobstarajte si API kľúč pre váš kľúč tu", From 86fa8634eeed0ccbf1ba89f67be6194586c3842b Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 11 Dec 2023 11:38:48 +0100 Subject: [PATCH 23/36] [Netfilter] do not ignore RETRY_WINDOW --- data/Dockerfiles/netfilter/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/data/Dockerfiles/netfilter/main.py b/data/Dockerfiles/netfilter/main.py index a6859c95..75768009 100644 --- a/data/Dockerfiles/netfilter/main.py +++ b/data/Dockerfiles/netfilter/main.py @@ -167,8 +167,12 @@ def ban(address): if not net in bans: bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0} + current_attempt = time.time() + if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW: + bans[net]['attempts'] = 0 + bans[net]['attempts'] += 1 - bans[net]['last_attempt'] = time.time() + bans[net]['last_attempt'] = current_attempt if bans[net]['attempts'] >= MAX_ATTEMPTS: cur_time = int(round(time.time())) From f4b838cad8f376bea4522b822f1edd77076f64f7 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 11 Dec 2023 11:51:28 +0100 Subject: [PATCH 24/36] [Netfilter] update image & delete old server.py --- data/Dockerfiles/netfilter/server.py | 610 --------------------------- docker-compose.yml | 2 +- 2 files changed, 1 insertion(+), 611 deletions(-) delete mode 100644 data/Dockerfiles/netfilter/server.py diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py deleted file mode 100644 index 698137bf..00000000 --- a/data/Dockerfiles/netfilter/server.py +++ /dev/null @@ -1,610 +0,0 @@ -#!/usr/bin/env python3 - -import re -import os -import sys -import time -import atexit -import signal -import ipaddress -from collections import Counter -from random import randint -from threading import Thread -from threading import Lock -import redis -import json -import iptc -import dns.resolver -import dns.exception - -while True: - try: - redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '') - redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '') - if "".__eq__(redis_slaveof_ip): - r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0) - else: - r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0) - r.ping() - except Exception as ex: - print('%s - trying again in 3 seconds' % (ex)) - time.sleep(3) - else: - break - -pubsub = r.pubsub() - -WHITELIST = [] -BLACKLIST= [] - -bans = {} - -quit_now = False -exit_code = 0 -lock = Lock() - -def log(priority, message): - tolog = {} - tolog['time'] = int(round(time.time())) - tolog['priority'] = priority - tolog['message'] = message - r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False)) - print(message) - -def logWarn(message): - log('warn', message) - -def logCrit(message): - log('crit', message) - -def logInfo(message): - log('info', message) - -def refreshF2boptions(): - global f2boptions - global quit_now - global exit_code - - f2boptions = {} - - if not r.get('F2B_OPTIONS'): - f2boptions['ban_time'] = r.get('F2B_BAN_TIME') - f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME') - f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT') - f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') - f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') - f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') - f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') - else: - try: - f2boptions = json.loads(r.get('F2B_OPTIONS')) - except ValueError: - print('Error loading F2B options: F2B_OPTIONS is not json') - quit_now = True - exit_code = 2 - - verifyF2boptions(f2boptions) - r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) - -def verifyF2boptions(f2boptions): - verifyF2boption(f2boptions,'ban_time', 1800) - verifyF2boption(f2boptions,'max_ban_time', 10000) - verifyF2boption(f2boptions,'ban_time_increment', True) - verifyF2boption(f2boptions,'max_attempts', 10) - verifyF2boption(f2boptions,'retry_window', 600) - verifyF2boption(f2boptions,'netban_ipv4', 32) - verifyF2boption(f2boptions,'netban_ipv6', 128) - -def verifyF2boption(f2boptions, f2boption, f2bdefault): - f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault - -def refreshF2bregex(): - global f2bregex - global quit_now - global exit_code - if not r.get('F2B_REGEX'): - f2bregex = {} - f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)' - f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)' - f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+' - f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+' - f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+' - f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),' - f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' - f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' - f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked' - f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+' - r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False)) - else: - try: - f2bregex = {} - f2bregex = json.loads(r.get('F2B_REGEX')) - except ValueError: - print('Error loading F2B options: F2B_REGEX is not json') - quit_now = True - exit_code = 2 - -if r.exists('F2B_LOG'): - r.rename('F2B_LOG', 'NETFILTER_LOG') - -def mailcowChainOrder(): - global lock - global quit_now - global exit_code - while not quit_now: - time.sleep(10) - with lock: - filter4_table = iptc.Table(iptc.Table.FILTER) - filter6_table = iptc.Table6(iptc.Table6.FILTER) - filter4_table.refresh() - filter6_table.refresh() - for f in [filter4_table, filter6_table]: - forward_chain = iptc.Chain(f, 'FORWARD') - input_chain = iptc.Chain(f, 'INPUT') - for chain in [forward_chain, input_chain]: - target_found = False - for position, item in enumerate(chain.rules): - if item.target.name == 'MAILCOW': - target_found = True - if position > 2: - logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position)) - quit_now = True - exit_code = 2 - if not target_found: - logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name)) - quit_now = True - exit_code = 2 - -def ban(address): - global lock - refreshF2boptions() - BAN_TIME = int(f2boptions['ban_time']) - BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) - MAX_ATTEMPTS = int(f2boptions['max_attempts']) - RETRY_WINDOW = int(f2boptions['retry_window']) - NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4']) - NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6']) - - ip = ipaddress.ip_address(address) - if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped: - ip = ip.ipv4_mapped - address = str(ip) - if ip.is_private or ip.is_loopback: - return - - self_network = ipaddress.ip_network(address) - - with lock: - temp_whitelist = set(WHITELIST) - - if temp_whitelist: - for wl_key in temp_whitelist: - wl_net = ipaddress.ip_network(wl_key, False) - if wl_net.overlaps(self_network): - logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net)) - return - - net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False) - net = str(net) - - if not net in bans: - bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0} - - bans[net]['attempts'] += 1 - bans[net]['last_attempt'] = time.time() - - if bans[net]['attempts'] >= MAX_ATTEMPTS: - cur_time = int(round(time.time())) - NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] - logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 )) - if type(ip) is ipaddress.IPv4Address: - with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) - else: - with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) - r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME) - else: - logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) - -def unban(net): - global lock - if not net in bans: - logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net) - r.hdel('F2B_QUEUE_UNBAN', '%s' % net) - return - logInfo('Unbanning %s' % net) - if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network: - with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule in chain.rules: - chain.delete_rule(rule) - else: - with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule in chain.rules: - chain.delete_rule(rule) - r.hdel('F2B_ACTIVE_BANS', '%s' % net) - r.hdel('F2B_QUEUE_UNBAN', '%s' % net) - if net in bans: - bans[net]['attempts'] = 0 - bans[net]['ban_counter'] += 1 - -def permBan(net, unban=False): - global lock - if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: - with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) - else: - with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) - -def quit(signum, frame): - global quit_now - quit_now = True - -def clear(): - global lock - logInfo('Clearing all bans') - for net in bans.copy(): - unban(net) - with lock: - filter4_table = iptc.Table(iptc.Table.FILTER) - filter6_table = iptc.Table6(iptc.Table6.FILTER) - for filter_table in [filter4_table, filter6_table]: - filter_table.autocommit = False - forward_chain = iptc.Chain(filter_table, "FORWARD") - input_chain = iptc.Chain(filter_table, "INPUT") - mailcow_chain = iptc.Chain(filter_table, "MAILCOW") - if mailcow_chain in filter_table.chains: - for rule in mailcow_chain.rules: - mailcow_chain.delete_rule(rule) - for rule in forward_chain.rules: - if rule.target.name == 'MAILCOW': - forward_chain.delete_rule(rule) - for rule in input_chain.rules: - if rule.target.name == 'MAILCOW': - input_chain.delete_rule(rule) - filter_table.delete_chain("MAILCOW") - filter_table.commit() - filter_table.refresh() - filter_table.autocommit = True - r.delete('F2B_ACTIVE_BANS') - r.delete('F2B_PERM_BANS') - pubsub.unsubscribe() - -def watch(): - logInfo('Watching Redis channel F2B_CHANNEL') - pubsub.subscribe('F2B_CHANNEL') - - global quit_now - global exit_code - - while not quit_now: - try: - for item in pubsub.listen(): - refreshF2bregex() - for rule_id, rule_regex in f2bregex.items(): - if item['data'] and item['type'] == 'message': - try: - result = re.search(rule_regex, item['data']) - except re.error: - result = False - if result: - addr = result.group(1) - ip = ipaddress.ip_address(addr) - if ip.is_private or ip.is_loopback: - continue - logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data'])) - ban(addr) - except Exception as ex: - logWarn('Error reading log line from pubsub: %s' % ex) - quit_now = True - exit_code = 2 - -def snat4(snat_target): - global lock - global quit_now - - def get_snat4_rule(): - rule = iptc.Rule() - rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24' - rule.dst = '!' + rule.src - target = rule.create_target("SNAT") - target.to_source = snat_target - match = rule.create_match("comment") - match.comment = f'{int(round(time.time()))}' - return rule - - while not quit_now: - time.sleep(10) - with lock: - try: - table = iptc.Table('nat') - table.refresh() - chain = iptc.Chain(table, 'POSTROUTING') - table.autocommit = False - new_rule = get_snat4_rule() - - if not chain.rules: - # if there are no rules in the chain, insert the new rule directly - logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') - chain.insert_rule(new_rule) - else: - for position, rule in enumerate(chain.rules): - if not hasattr(rule.target, 'parameter'): - continue - match = all(( - new_rule.get_src() == rule.get_src(), - new_rule.get_dst() == rule.get_dst(), - new_rule.target.parameters == rule.target.parameters, - new_rule.target.name == rule.target.name - )) - if position == 0: - if not match: - logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') - chain.insert_rule(new_rule) - else: - if match: - logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') - chain.delete_rule(rule) - - table.commit() - table.autocommit = True - except: - print('Error running SNAT4, retrying...') - -def snat6(snat_target): - global lock - global quit_now - - def get_snat6_rule(): - rule = iptc.Rule6() - rule.src = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64') - rule.dst = '!' + rule.src - target = rule.create_target("SNAT") - target.to_source = snat_target - return rule - - while not quit_now: - time.sleep(10) - with lock: - try: - table = iptc.Table6('nat') - table.refresh() - chain = iptc.Chain(table, 'POSTROUTING') - table.autocommit = False - if get_snat6_rule() not in chain.rules: - logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target)) - chain.insert_rule(get_snat6_rule()) - table.commit() - else: - for position, item in enumerate(chain.rules): - if item == get_snat6_rule(): - if position != 0: - chain.delete_rule(get_snat6_rule()) - table.commit() - table.autocommit = True - except: - print('Error running SNAT6, retrying...') - -def autopurge(): - while not quit_now: - time.sleep(10) - refreshF2boptions() - BAN_TIME = int(f2boptions['ban_time']) - MAX_BAN_TIME = int(f2boptions['max_ban_time']) - BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) - MAX_ATTEMPTS = int(f2boptions['max_attempts']) - QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN') - if QUEUE_UNBAN: - for net in QUEUE_UNBAN: - unban(str(net)) - for net in bans.copy(): - if bans[net]['attempts'] >= MAX_ATTEMPTS: - NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] - TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt'] - if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME: - unban(net) - -def isIpNetwork(address): - try: - ipaddress.ip_network(address, False) - except ValueError: - return False - return True - - -def genNetworkList(list): - resolver = dns.resolver.Resolver() - hostnames = [] - networks = [] - for key in list: - if isIpNetwork(key): - networks.append(key) - else: - hostnames.append(key) - for hostname in hostnames: - hostname_ips = [] - for rdtype in ['A', 'AAAA']: - try: - answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3) - except dns.exception.Timeout: - logInfo('Hostname %s timedout on resolve' % hostname) - break - except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): - continue - except dns.exception.DNSException as dnsexception: - logInfo('%s' % dnsexception) - continue - for rdata in answer: - hostname_ips.append(rdata.to_text()) - networks.extend(hostname_ips) - return set(networks) - -def whitelistUpdate(): - global lock - global quit_now - global WHITELIST - while not quit_now: - start_time = time.time() - list = r.hgetall('F2B_WHITELIST') - new_whitelist = [] - if list: - new_whitelist = genNetworkList(list) - with lock: - if Counter(new_whitelist) != Counter(WHITELIST): - WHITELIST = new_whitelist - logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST)) - time.sleep(60.0 - ((time.time() - start_time) % 60.0)) - -def blacklistUpdate(): - global quit_now - global BLACKLIST - while not quit_now: - start_time = time.time() - list = r.hgetall('F2B_BLACKLIST') - new_blacklist = [] - if list: - new_blacklist = genNetworkList(list) - if Counter(new_blacklist) != Counter(BLACKLIST): - addban = set(new_blacklist).difference(BLACKLIST) - delban = set(BLACKLIST).difference(new_blacklist) - BLACKLIST = new_blacklist - logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST)) - if addban: - for net in addban: - permBan(net=net) - if delban: - for net in delban: - permBan(net=net, unban=True) - time.sleep(60.0 - ((time.time() - start_time) % 60.0)) - -def initChain(): - # Is called before threads start, no locking - print("Initializing mailcow netfilter chain") - # IPv4 - if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains: - iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW") - for c in ['FORWARD', 'INPUT']: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c) - rule = iptc.Rule() - rule.src = '0.0.0.0/0' - rule.dst = '0.0.0.0/0' - target = iptc.Target(rule, "MAILCOW") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) - # IPv6 - if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains: - iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW") - for c in ['FORWARD', 'INPUT']: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c) - rule = iptc.Rule6() - rule.src = '::/0' - rule.dst = '::/0' - target = iptc.Target(rule, "MAILCOW") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) - -if __name__ == '__main__': - - # In case a previous session was killed without cleanup - clear() - # Reinit MAILCOW chain - initChain() - - watch_thread = Thread(target=watch) - watch_thread.daemon = True - watch_thread.start() - - if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n': - try: - snat_ip = os.getenv('SNAT_TO_SOURCE') - snat_ipo = ipaddress.ip_address(snat_ip) - if type(snat_ipo) is ipaddress.IPv4Address: - snat4_thread = Thread(target=snat4,args=(snat_ip,)) - snat4_thread.daemon = True - snat4_thread.start() - except ValueError: - print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address') - - if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n': - try: - snat_ip = os.getenv('SNAT6_TO_SOURCE') - snat_ipo = ipaddress.ip_address(snat_ip) - if type(snat_ipo) is ipaddress.IPv6Address: - snat6_thread = Thread(target=snat6,args=(snat_ip,)) - snat6_thread.daemon = True - snat6_thread.start() - except ValueError: - print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address') - - autopurge_thread = Thread(target=autopurge) - autopurge_thread.daemon = True - autopurge_thread.start() - - mailcowchainwatch_thread = Thread(target=mailcowChainOrder) - mailcowchainwatch_thread.daemon = True - mailcowchainwatch_thread.start() - - blacklistupdate_thread = Thread(target=blacklistUpdate) - blacklistupdate_thread.daemon = True - blacklistupdate_thread.start() - - whitelistupdate_thread = Thread(target=whitelistUpdate) - whitelistupdate_thread.daemon = True - whitelistupdate_thread.start() - - signal.signal(signal.SIGTERM, quit) - atexit.register(clear) - - while not quit_now: - time.sleep(0.5) - - sys.exit(exit_code) diff --git a/docker-compose.yml b/docker-compose.yml index 8d84e3a7..bba0610f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -434,7 +434,7 @@ services: - acme netfilter-mailcow: - image: mailcow/netfilter:1.52 + image: mailcow/netfilter:1.53 stop_grace_period: 30s depends_on: - dovecot-mailcow From 218ba6950187c2648769e8eeef0c3529023cecd7 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 11 Dec 2023 15:44:11 +0100 Subject: [PATCH 25/36] [Watchdog] add curl verbose & use | as sed delimiter --- data/Dockerfiles/watchdog/watchdog.sh | 6 ++++-- docker-compose.yml | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/data/Dockerfiles/watchdog/watchdog.sh b/data/Dockerfiles/watchdog/watchdog.sh index 2b8ff78f..1e0d6813 100755 --- a/data/Dockerfiles/watchdog/watchdog.sh +++ b/data/Dockerfiles/watchdog/watchdog.sh @@ -19,9 +19,11 @@ fi if [[ "${WATCHDOG_VERBOSE}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then SMTP_VERBOSE="--verbose" + CURL_VERBOSE="--verbose" set -xv else SMTP_VERBOSE="" + CURL_VERBOSE="" exec 2>/dev/null fi @@ -168,10 +170,10 @@ function notify_error() { fi # Replace subject and body placeholders - WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s/\$SUBJECT\|\${SUBJECT}/$SUBJECT/g" | sed "s/\$BODY\|\${BODY}/$BODY/") + WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s|\$SUBJECT\|\${SUBJECT}|$SUBJECT|g" | sed "s|\$BODY\|\${BODY}|$BODY|") # POST to webhook - curl -X POST -H "Content-Type: application/json" -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK} + curl -X POST -H "Content-Type: application/json" ${CURL_VERBOSE} -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK} log_msg "Sent notification using webhook" fi diff --git a/docker-compose.yml b/docker-compose.yml index 61e7a78e..8ab7bb8c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -448,7 +448,7 @@ services: - /lib/modules:/lib/modules:ro watchdog-mailcow: - image: mailcow/watchdog:1.97 + image: mailcow/watchdog:1.99 dns: - ${IPV4_NETWORK:-172.22.1}.254 tmpfs: From f38ec686959fd618b3a0e4f8ed71ce3457949eb4 Mon Sep 17 00:00:00 2001 From: DerLinkman Date: Tue, 12 Dec 2023 11:00:16 +0100 Subject: [PATCH 26/36] [SOGo] Update to 5.9.1 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 05fc1f2d..c6244c4b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -171,7 +171,7 @@ services: - phpfpm sogo-mailcow: - image: mailcow/sogo:1.119 + image: mailcow/sogo:1.120 environment: - DBNAME=${DBNAME} - DBUSER=${DBUSER} From 66b9245b2870224fbc9f662856af20040dd431bf Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Tue, 12 Dec 2023 11:10:10 +0100 Subject: [PATCH 27/36] fix WATCHDOG_NOTIFY_WEBHOOK env vars --- docker-compose.yml | 4 ++-- update.sh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index c6244c4b..26bec79e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -487,8 +487,8 @@ services: - WATCHDOG_NOTIFY_EMAIL=${WATCHDOG_NOTIFY_EMAIL:-} - WATCHDOG_NOTIFY_BAN=${WATCHDOG_NOTIFY_BAN:-y} - WATCHDOG_SUBJECT=${WATCHDOG_SUBJECT:-Watchdog ALERT} - - WATCHDOG_NOTIFY_WEBHOOK=${WATCHDOG_NOTIFY_WEBHOOK} - - WATCHDOG_NOTIFY_WEBHOOK_BODY=${WATCHDOG_NOTIFY_WEBHOOK_BODY} + - WATCHDOG_NOTIFY_WEBHOOK=${WATCHDOG_NOTIFY_WEBHOOK:-} + - WATCHDOG_NOTIFY_WEBHOOK_BODY=${WATCHDOG_NOTIFY_WEBHOOK_BODY:-} - WATCHDOG_EXTERNAL_CHECKS=${WATCHDOG_EXTERNAL_CHECKS:-n} - WATCHDOG_MYSQL_REPLICATION_CHECKS=${WATCHDOG_MYSQL_REPLICATION_CHECKS:-n} - WATCHDOG_VERBOSE=${WATCHDOG_VERBOSE:-n} diff --git a/update.sh b/update.sh index cb018b4f..313d41fb 100755 --- a/update.sh +++ b/update.sh @@ -637,7 +637,8 @@ for option in ${CONFIG_ARRAY[@]}; do echo "Adding new option \"${option}\" to mailcow.conf" echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf - echo '#WATCHDOG_NOTIFY_WEBHOOK_BODY=\'{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}\'' >> mailcow.conf + WEBHOOK_BODY='{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}' + echo "#WATCHDOG_NOTIFY_WEBHOOK_BODY='${WEBHOOK_BODY}'" >> mailcow.conf fi elif [[ ${option} == "WATCHDOG_NOTIFY_BAN" ]]; then if ! grep -q ${option} mailcow.conf; then From c3b5474cbf0a820ea39c7b72bf6eb6cc88113398 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 13:30:18 +0000 Subject: [PATCH 28/36] Update dependency nextcloud/server to v28 Signed-off-by: milkmaker --- helper-scripts/nextcloud.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper-scripts/nextcloud.sh b/helper-scripts/nextcloud.sh index 92df1bde..7461e787 100755 --- a/helper-scripts/nextcloud.sh +++ b/helper-scripts/nextcloud.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # renovate: datasource=github-releases depName=nextcloud/server versioning=semver extractVersion=^v(?.*)$ -NEXTCLOUD_VERSION=27.1.4 +NEXTCLOUD_VERSION=28.0.0 echo -ne "Checking prerequisites..." sleep 1 From 06ad5f6652a5dda96a78d891bb905d52d0ddb3f3 Mon Sep 17 00:00:00 2001 From: milkmaker Date: Tue, 12 Dec 2023 17:49:29 +0100 Subject: [PATCH 29/36] Translations update from Weblate (#5590) * [Web] Updated lang.ru-ru.json Co-authored-by: Oleksii Kruhlenko * [Web] Updated lang.uk-ua.json Co-authored-by: Oleksii Kruhlenko --------- Co-authored-by: Oleksii Kruhlenko --- data/web/lang/lang.ru-ru.json | 8 ++++++-- data/web/lang/lang.uk-ua.json | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/data/web/lang/lang.ru-ru.json b/data/web/lang/lang.ru-ru.json index 2a909f29..2a959ab3 100644 --- a/data/web/lang/lang.ru-ru.json +++ b/data/web/lang/lang.ru-ru.json @@ -345,7 +345,10 @@ "allowed_methods": "Access-Control-Allow-Methods", "ip_check": "Проверить IP", "ip_check_disabled": "Проверка IP отключена. Вы можете включить его в разделе
Система > Конфигурация > Параметры > Настроить.", - "ip_check_opt_in": "Согласие на использование сторонних служб ipv4.mailcow.email и ipv6.mailcow.email для разрешения внешних IP-адресов." + "ip_check_opt_in": "Согласие на использование сторонних служб ipv4.mailcow.email и ipv6.mailcow.email для разрешения внешних IP-адресов.", + "f2b_manage_external": "Внешнее управление Fail2Ban", + "f2b_manage_external_info": "Fail2ban по-прежнему будет вести банлист, но не будет активно устанавливать правила для блокировки трафика. Используйте сгенерированный ниже банлист для внешнего блокирования трафика.", + "copy_to_clipboard": "Текст скопирован в буфер обмена!" }, "danger": { "access_denied": "Доступ запрещён, или указаны неверные данные", @@ -1015,7 +1018,8 @@ "verified_webauthn_login": "Авторизация WebAuthn пройдена", "verified_yotp_login": "Авторизация Yubico OTP пройдена", "cors_headers_edited": "Настройки CORS сохранены", - "domain_footer_modified": "Изменения в нижнем колонтитуле домена %s сохранены" + "domain_footer_modified": "Изменения в нижнем колонтитуле домена %s сохранены", + "f2b_banlist_refreshed": "Идентификатор банлиста был успешно обновлен." }, "tfa": { "api_register": "%s использует Yubico Cloud API. Пожалуйста, получите ключ API для вашего ключа здесь", diff --git a/data/web/lang/lang.uk-ua.json b/data/web/lang/lang.uk-ua.json index 67fcb655..e778f156 100644 --- a/data/web/lang/lang.uk-ua.json +++ b/data/web/lang/lang.uk-ua.json @@ -346,7 +346,10 @@ "ip_check_disabled": "Перевірка IP вимкнена. Ви можете ввімкнути його в меню
Система > Конфігурація > Параметри > Налаштувати", "ip_check_opt_in": "Згода на використання сторонніх служб ipv4.mailcow.email і ipv6.mailcow.email для визначення зовнішніх IP-адрес.", "options": "Параметри", - "queue_unban": "розблокувати" + "queue_unban": "розблокувати", + "f2b_manage_external": "Керування Fail2Ban ззовні", + "f2b_manage_external_info": "Fail2ban буде підтримувати список заборонених, але не буде активно встановлювати правила для блокування трафіку. Використовуйте згенерований список заборон нижче для зовнішнього блокування трафіку.", + "copy_to_clipboard": "Текст скопійовано в буфер обміну!" }, "danger": { "alias_domain_invalid": "Неприпустимий псевдонім домену: %s", @@ -1063,7 +1066,8 @@ "template_modified": "Зміни до шаблону %s збережено", "cors_headers_edited": "Налаштування CORS збережено", "ip_check_opt_in_modified": "Перевірка IP-адреси успішно збережено", - "template_removed": "Шаблону із ID %s видалено" + "template_removed": "Шаблону із ID %s видалено", + "f2b_banlist_refreshed": "Ідентифікатор списку заборонених успішно оновлено." }, "tfa": { "confirm": "Підтвердьте", From 4e252f8243b721c399482d4d182f16a5731743b5 Mon Sep 17 00:00:00 2001 From: milkmaker Date: Wed, 13 Dec 2023 17:50:13 +0100 Subject: [PATCH 30/36] [Web] Updated lang.pt-br.json (#5591) Co-authored-by: Abner Santana --- data/web/lang/lang.pt-br.json | 46 +++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/data/web/lang/lang.pt-br.json b/data/web/lang/lang.pt-br.json index 0c7cb083..54628989 100644 --- a/data/web/lang/lang.pt-br.json +++ b/data/web/lang/lang.pt-br.json @@ -37,7 +37,7 @@ "add_domain_only": "Adicionar somente domínio", "add_domain_restart": "Adicionar domínio e reiniciar o SoGo", "alias_address": "Endereço (s) de alias", - "alias_address_info": "Endereço de e-mail completo/es ou @example .com, para capturar todas as mensagens de um domínio (separadas por vírgula). somente domínios mailcow.", + "alias_address_info": "Endereço de e-mail completo/es ou @example .com, para capturar todas as mensagens de um domínio (separadas por vírgula). somente domínios mailcow.", "alias_domain": "Domínio de alias", "alias_domain_info": "Somente nomes de domínio válidos (separados por vírgula).", "app_name": "Nome do aplicativo", @@ -216,10 +216,10 @@ "includes": "Inclua esses destinatários", "ip_check": "Verificação de IP", "ip_check_disabled": "A verificação de IP está desativada. Você pode ativá-lo em
Sistema > Configuração > Opções > Personalizar", - "ip_check_opt_in": "Opte por usar o serviço de terceiros ipv4.mailcow.email e ipv6.mailcow.email para resolver endereços IP externos.", + "ip_check_opt_in": "Opte por usar o serviço de terceiros ipv4.mailcow.email. e ipv6.mailcow.email para resolver endereços IP externos.", "is_mx_based": "Baseado em MX", "last_applied": "Aplicado pela última vez", - "license_info": "Uma licença não é necessária, mas ajuda no desenvolvimento futuro.
Registre seu GUID aqui ou compre suporte para a instalação do mailcow.", + "license_info": "Uma licença não é necessária, mas ajuda no desenvolvimento.
Registre seu GUID aqui ou comprar suporte para sua instalação de mailcow.", "link": "Link", "loading": "Por favor, espere...", "login_time": "Hora do login", @@ -238,7 +238,7 @@ "oauth2_add_client": "Adicionar cliente OAuth2", "oauth2_client_id": "ID do cliente", "oauth2_client_secret": "Segredo do cliente", - "oauth2_info": "A implementação do OAuth2 suporta o tipo de concessão “Código de Autorização” e emite tokens de atualização.
\r\nO servidor também emite automaticamente novos tokens de atualização, após o uso de um token de atualização.

\r\n• O escopo padrão é perfil. Somente usuários de caixas de correio podem ser autenticados no OAuth2. Se o parâmetro do escopo for omitido, ele retornará ao perfil.
\r\n• O parâmetro state deve ser enviado pelo cliente como parte da solicitação de autorização.

\r\nCaminhos para solicitações para a API OAuth2:
\r\n
    \r\n
  • Ponto final de autorização: /oauth/authorize
  • \r\n
  • Ponto final do token: /oauth/token
  • \r\n
  • Página de recursos: /oauth/profile
\r\nA regeneração do segredo do cliente não expirará os códigos de autorização existentes, mas eles falharão na renovação do token.

\r\nA revogação dos tokens do cliente causará o encerramento imediato de todas as sessões ativas. Todos os clientes precisam se autenticar novamente.", + "oauth2_info": "A implementação OAuth2 suporta o tipo de concessão \"Código de Autorização\" e emite tokens de atualização.
\nO servidor também emite automaticamente novos tokens de atualização, depois que um token de atualização foi usado.

\n• O escopo padrão é perfil. Somente usuários com caixa de e-mail podem ser autenticados contra o OAuth2. Se o parâmetro de escopo for omitido, ele voltará para perfil.
\nCaminhos para solicitações OAuth2 API:
\n
    \n\n
  • Endpoint de autorização: /oauth/authorize
  • \n\n
  • Endpoint token: /oauth/token
  • \n\n
  • Página de recursos: /oauth/profile
  • \n\n
\nRegenerar o segredo do cliente não expirará os códigos de autorização existentes, mas eles não renovarão seu token.

\n\nA revogação dos tokens do cliente causará o término imediato de todas as sessões ativas. Todos os clientes precisam se autenticar novamente.", "oauth2_redirect_uri": "URI de redirecionamento", "oauth2_renew_secret": "Gere um novo segredo de cliente", "oauth2_revoke_tokens": "Revogar todos os tokens do cliente", @@ -256,15 +256,15 @@ "priority": "Prioridade", "private_key": "Chave privada", "quarantine": "Quarentena", - "quarantine_bcc": "Envie uma cópia de todas as notificações (BCC) para esse destinatário:
deixe em branco para desativar. Correio não assinado e não verificado. Deve ser entregue somente internamente.", + "quarantine_bcc": "Envie uma cópia de todas as notificações (BCC) para esse destinatário:
deixe em branco para desativar. Correio não assinado e não verificado. Deve ser entregue somente internamente.", "quarantine_exclude_domains": "Excluir domínios e domínios de alias", "quarantine_max_age": "Idade máxima em dias
O valor deve ser igual ou superior a 1 dia.", "quarantine_max_score": "Descarte a notificação se a pontuação de spam de um e-mail for maior que esse valor: O
padrão é 9999,0", - "quarantine_max_size": "Tamanho máximo em MiB (elementos maiores são descartados):
0 não indica ilimitado.", + "quarantine_max_size": "Tamanho máximo em MiB (elementos maiores são descartados):
0 não indica ilimitado.", "quarantine_notification_html": "Modelo de e-mail de notificação:
deixe em branco para restaurar o modelo padrão.", "quarantine_notification_sender": "Remetente do e-mail de notificação", "quarantine_notification_subject": "Assunto do e-mail de notificação", - "quarantine_redirect": "Redirecione todas as notificações para esse destinatário:
deixe em branco para desativar. Correio não assinado e não verificado. Deve ser entregue somente internamente.", + "quarantine_redirect": "Redirecione todas as notificações para esse destinatário:
deixe em branco para desativar. E-mail não assinado e não verificado. Deve ser entregue somente internamente.", "quarantine_release_format": "Formato dos itens lançados", "quarantine_release_format_att": "Como anexo", "quarantine_release_format_raw": "Original não modificado", @@ -348,7 +348,10 @@ "username": "Nome de usuário", "validate_license_now": "Valide o GUID em relação ao servidor de licenças", "verify": "Verificar", - "yes": "✓" + "yes": "✓", + "copy_to_clipboard": "Texto copiado para a área de transferência!", + "f2b_manage_external": "Gerenciar Fail2Ban externamente", + "f2b_manage_external_info": "O Fail2ban ainda manterá a lista de banimentos, mas não definirá ativamente regras para bloquear o tráfego. Use a lista de banimento gerada abaixo para bloquear externamente o tráfego." }, "danger": { "access_denied": "Acesso negado ou dados de formulário inválidos", @@ -489,20 +492,20 @@ "infoFiltered": "(filtrado do total de entradas _MAX_)", "infoPostFix": "", "thousands": ",", - "lengthMenu": "Show _MENU_ entries", - "loadingRecords": "Loading...", - "processing": "Please wait...", - "search": "Search:", - "zeroRecords": "No matching records found", + "lengthMenu": "Mostrar _ MENU_ entradas", + "loadingRecords": "Carregando...", + "processing": "Por favor, aguarde...", + "search": "Pesquisa:", + "zeroRecords": "Nenhum registro correspondente encontrado", "paginate": { - "first": "First", - "last": "Last", + "first": "Primeiro", + "last": "Última", "next": "Next", - "previous": "Previous" + "previous": "Anterior" }, "aria": { - "sortAscending": ": activate to sort column ascending", - "sortDescending": ": activate to sort column descending" + "sortAscending": ": Ative para classificar a coluna ascendente", + "sortDescending": ": Ative para classificar a coluna decrescente" } }, "debug": { @@ -572,7 +575,7 @@ "automap": "Tente mapear pastas automaticamente (“Itens enviados”, “Enviados” => “Enviados” etc.)", "backup_mx_options": "Opções de relé", "bcc_dest_format": "O destino do BCC deve ser um único endereço de e-mail válido.
Se precisar enviar uma cópia para vários endereços, crie um alias e use-o aqui.", - "client_id": "ID do cliente", + "client_id": "ID Cliente", "client_secret": "Segredo do cliente", "comment_info": "Um comentário privado não é visível para o usuário, enquanto um comentário público é mostrado como dica de ferramenta ao passar o mouse sobre ele na visão geral do usuário", "created_on": "Criado em", @@ -909,7 +912,7 @@ "tls_map_parameters_info": "Vazio ou parâmetros, por exemplo: protocols=! Cifras SSLv2 = média, exclusão = 3DES", "tls_map_policy": "Política", "tls_policy_maps": "Mapas de políticas de TLS", - "tls_policy_maps_enforced_tls": "Essas políticas também substituirão o comportamento dos usuários de caixas de correio que impõem conexões TLS de saída. Se nenhuma política existir abaixo, esses usuários aplicarão os valores padrão especificados como smtp_tls_mandatory_protocols e smtp_tls_mandatory_ciphers.", + "tls_policy_maps_enforced_tls": "Essas políticas também substituirão o comportamento das caixas de e-mail dos usuários, que impõem conexões TLS de saída. Se não houver nenhuma política abaixo, esses usuários aplicarão os valores padrão especificados como smtp_tls_mandatory_protocols e smtp_tls_mandatory_ciphers.", "tls_policy_maps_info": "Esse mapa de políticas substitui as regras de transporte TLS de saída, independentemente das configurações de política de TLS do usuário.
\r\n Consulte a documentação do “smtp_tls_policy_maps” para obter mais informações.", "tls_policy_maps_long": "Substituições do mapa de políticas de TLS de saída", "toggle_all": "Alternar tudo", @@ -1092,7 +1095,8 @@ "verified_fido2_login": "Login FIDO2 verificado", "verified_totp_login": "Login TOTP verificado", "verified_webauthn_login": "Login verificado do WebAuthn", - "verified_yotp_login": "Login OTP verificado do Yubico" + "verified_yotp_login": "Login OTP verificado do Yubico", + "f2b_banlist_refreshed": "O Banlist ID foi atualizado com sucesso." }, "tfa": { "api_register": "%s usa a API Yubico Cloud. Obtenha uma chave de API para sua chave aqui", From 904b37c4befae305e757b9d9e0687bcc36fc03f0 Mon Sep 17 00:00:00 2001 From: milkmaker Date: Sat, 16 Dec 2023 19:23:27 +0100 Subject: [PATCH 31/36] [Web] Updated lang.pt-br.json (#5598) Co-authored-by: Abner Santana --- data/web/lang/lang.pt-br.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/web/lang/lang.pt-br.json b/data/web/lang/lang.pt-br.json index 54628989..1613b49b 100644 --- a/data/web/lang/lang.pt-br.json +++ b/data/web/lang/lang.pt-br.json @@ -37,7 +37,7 @@ "add_domain_only": "Adicionar somente domínio", "add_domain_restart": "Adicionar domínio e reiniciar o SoGo", "alias_address": "Endereço (s) de alias", - "alias_address_info": "Endereço de e-mail completo/es ou @example .com, para capturar todas as mensagens de um domínio (separadas por vírgula). somente domínios mailcow.", + "alias_address_info": "Endereço/s de e-mail completo ou @example .com, para capturar todas as mensagens de um domínio (separadas por vírgula). somente domínios mailcow.", "alias_domain": "Domínio de alias", "alias_domain_info": "Somente nomes de domínio válidos (separados por vírgula).", "app_name": "Nome do aplicativo", @@ -238,7 +238,7 @@ "oauth2_add_client": "Adicionar cliente OAuth2", "oauth2_client_id": "ID do cliente", "oauth2_client_secret": "Segredo do cliente", - "oauth2_info": "A implementação OAuth2 suporta o tipo de concessão \"Código de Autorização\" e emite tokens de atualização.
\nO servidor também emite automaticamente novos tokens de atualização, depois que um token de atualização foi usado.

\n• O escopo padrão é perfil. Somente usuários com caixa de e-mail podem ser autenticados contra o OAuth2. Se o parâmetro de escopo for omitido, ele voltará para perfil.
\nCaminhos para solicitações OAuth2 API:
\n
    \n\n
  • Endpoint de autorização: /oauth/authorize
  • \n\n
  • Endpoint token: /oauth/token
  • \n\n
  • Página de recursos: /oauth/profile
  • \n\n
\nRegenerar o segredo do cliente não expirará os códigos de autorização existentes, mas eles não renovarão seu token.

\n\nA revogação dos tokens do cliente causará o término imediato de todas as sessões ativas. Todos os clientes precisam se autenticar novamente.", + "oauth2_info": "A implementação OAuth2 suporta o tipo de concessão \"Código de Autorização\" e emite tokens de atualização.
\nO servidor também emite automaticamente novos tokens de atualização, depois que um token de atualização foi usado.

\n• O escopo padrão é perfil. Somente usuários com caixa de e-mail podem ser autenticados contra o OAuth2. Se o parâmetro de escopo for omitido, ele voltará para perfil.
\nCaminhos para solicitações OAuth2 API:
\n
    \n
  • Endpoint de autorização: /oauth/authorize
  • \n
  • Endpoint token: /oauth/token
  • \n
  • Página de recursos: /oauth/profile
  • \n
\nRegenerar o segredo do cliente não expirará os códigos de autorização existentes, mas eles não renovarão seu token.

\nA revogação dos tokens do cliente causará o término imediato de todas as sessões ativas. Todos os clientes precisam se autenticar novamente.", "oauth2_redirect_uri": "URI de redirecionamento", "oauth2_renew_secret": "Gere um novo segredo de cliente", "oauth2_revoke_tokens": "Revogar todos os tokens do cliente", @@ -256,7 +256,7 @@ "priority": "Prioridade", "private_key": "Chave privada", "quarantine": "Quarentena", - "quarantine_bcc": "Envie uma cópia de todas as notificações (BCC) para esse destinatário:
deixe em branco para desativar. Correio não assinado e não verificado. Deve ser entregue somente internamente.", + "quarantine_bcc": "Envie uma cópia de todas as notificações (BCC) para este destinatário:
Deixe em branco para desativar. E-mail não assinado e não verificado. Deve ser entregue apenas internamente.", "quarantine_exclude_domains": "Excluir domínios e domínios de alias", "quarantine_max_age": "Idade máxima em dias
O valor deve ser igual ou superior a 1 dia.", "quarantine_max_score": "Descarte a notificação se a pontuação de spam de um e-mail for maior que esse valor: O
padrão é 9999,0", From 5c35b42844df36f3d56c471817ac16bc0e506e30 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 18 Dec 2023 11:53:30 +0100 Subject: [PATCH 32/36] Update Netfilter and Watchdog Image --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index d8f0db78..1156854e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -434,7 +434,7 @@ services: - acme netfilter-mailcow: - image: mailcow/netfilter:1.53 + image: mailcow/netfilter:1.54 stop_grace_period: 30s depends_on: - dovecot-mailcow @@ -457,7 +457,7 @@ services: - /lib/modules:/lib/modules:ro watchdog-mailcow: - image: mailcow/watchdog:1.99 + image: mailcow/watchdog:2.00 dns: - ${IPV4_NETWORK:-172.22.1}.254 tmpfs: From 71defbf2f92f157faee8fed134a7ffbd74d18e17 Mon Sep 17 00:00:00 2001 From: FreddleSpl0it Date: Mon, 18 Dec 2023 14:02:05 +0100 Subject: [PATCH 33/36] escapeHtml in qhandler.js --- data/web/js/site/qhandler.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/web/js/site/qhandler.js b/data/web/js/site/qhandler.js index 8a8471f2..34f5d853 100644 --- a/data/web/js/site/qhandler.js +++ b/data/web/js/site/qhandler.js @@ -40,7 +40,7 @@ jQuery(function($){ if (value.score > 0) highlightClass = 'negative'; else if (value.score < 0) highlightClass = 'positive'; else highlightClass = 'neutral'; - $('#qid_detail_symbols').append('' + value.name + ' (' + value.score + ')'); + $('#qid_detail_symbols').append('' + value.name + ' (' + value.score + ')'); }); $('[data-bs-toggle="tooltip"]').tooltip(); } From 987ca68ca66a1f4fb01a7759273fc1d575a32230 Mon Sep 17 00:00:00 2001 From: DerLinkman Date: Mon, 18 Dec 2023 16:02:59 +0100 Subject: [PATCH 34/36] issue_templates: corrected links + added premium support link --- .github/ISSUE_TEMPLATE/config.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index dcda8f02..5c422bb4 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,11 @@ blank_issues_enabled: false contact_links: - - name: ❓ Community-driven support + - name: ❓ Community-driven support (Free) url: https://docs.mailcow.email/#get-support about: Please use the community forum for questions or assistance + - name: 🔥 Premium Support (Paid) + url: https://www.servercow.de/mailcow?lang=en#support + about: Buy a support subscription for any critical issues and get assisted by the mailcow Team. See conditions! - name: 🚨 Report a security vulnerability - url: https://www.servercow.de/anfrage?lang=en + url: "mailto:info@servercow.de?subject=mailcow: dockerized Security Vulnerability" about: Please give us appropriate time to verify, respond and fix before disclosure. From 89adaabb646a4941b1989ecccda0c9b349d8948d Mon Sep 17 00:00:00 2001 From: DerLinkman Date: Tue, 19 Dec 2023 09:47:12 +0100 Subject: [PATCH 35/36] contributing.md: Updated guidelines --- CONTRIBUTING.md | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 920dd4f3..d7a3d86d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,33 @@ -When a problem occurs, then always for a reason! What you want to do in such a case is: +# Contribution Guidelines (Last modified on 18th December 2023) +First of all, thank you for wanting to provide a bugfix or a new feature for the mailcow community, it's because of your help that the project can continue to grow! + +## Pull Requests (Last modified on 18th December 2023) + +However, please note the following regarding pull requests: + +1. **ALWAYS** create your PR using the staging branch of your locally cloned mailcow instance, as the pull request will end up in said staging branch of mailcow once approved. Ideally, you should simply create a new branch for your pull request that is named after the type of your PR (e.g. `feat/` for function updates or `fix/` for bug fixes) and the actual content (e.g. `sogo-6.0.0` for an update from SOGo to version 6 or `html-escape` for a fix that includes escaping HTML in mailcow). +2. Please **keep** this pull request branch **clean** and free of commits that have nothing to do with the changes you have made (e.g. commits from other users from other branches). *If you make changes to the `update.sh` script or other scripts that trigger a commit, there is usually a developer mode for clean working in this case. +3. **Test your changes before you commit them as a pull request.** If possible, write a small **test log** or demonstrate the functionality with a **screenshot or GIF**. *We will of course also test your pull request ourselves, but proof from you will save us the question of whether you have tested your own changes yourself.* +4. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.* +5. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project. +6. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort! + +--- + +## Issue Reporting (Last modified on 18th December 2023) + +If you plan to report a issue within mailcow please read and understand the following rules: + +1. **ONLY** use the issue tracker for bug reports or improvement requests and NOT for support questions. For support questions you can either contact the [mailcow community on Telegram](https://docs.mailcow.email/#community-support-and-chat) or the mailcow team directly in exchange for a [support fee](https://docs.mailcow.email/#commercial-support). +2. **ONLY** report an error if you have the **necessary know-how (at least the basics)** for the administration of an e-mail server and the usage of Docker. mailcow is a complex and fully-fledged e-mail server including groupware components on a Docker basement and it requires a bit of technical know-how for debugging and operating. +3. **ONLY** report bugs that are contained in the latest mailcow release series. *The definition of the latest release series includes the last major patch (e.g. 2023-12) and all minor patches (revisions) below it (e.g. 2023-12a, b, c etc.).* New issue reports published starting from January 1, 2024 must meet this criterion, as versions below the latest releases are no longer supported by us. +4. When reporting a problem, please be as detailed as possible and include even the smallest changes to your mailcow installation. Simply fill out the corresponding bug report form in detail and accurately to minimize possible questions. +5. **Before you open an issue/feature request**, please first check whether a similar request already exists in the mailcow tracker on GitHub. If so, please include yourself in this request. +6. When you create a issue/feature request: Please note that the creation does **not guarantee an instant implementation or fix by the mailcow team or the community**. +7. Please **ALWAYS** anonymize any sensitive information in your bug report or feature request before submitting it. + +### Quick guide to reporting problems: 1. Read your logs; follow them to see what the reason for your problem is. 2. Follow the leads given to you in your logfiles and start investigating. 3. Restarting the troubled service or the whole stack to see if the problem persists. @@ -7,3 +35,5 @@ When a problem occurs, then always for a reason! What you want to do in such a c 5. Search our [issues](https://github.com/mailcow/mailcow-dockerized/issues) for your problem. 6. [Create an issue](https://github.com/mailcow/mailcow-dockerized/issues/new/choose) over at our GitHub repository if you think your problem might be a bug or a missing feature you badly need. But please make sure, that you include **all the logs** and a full description to your problem. 7. Ask your questions in our community-driven [support channels](https://docs.mailcow.email/#community-support-and-chat). + +## When creating an issue/feature request or a pull request, you will be asked to confirm these guidelines. \ No newline at end of file From f27e41d19cf97032a02a028c68be8980b7333ff4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 08:48:40 +0000 Subject: [PATCH 36/36] chore(deps): update alpine docker tag to v3.19 Signed-off-by: milkmaker --- .../EXTERNAL_MYSQL_SOCKET/docker-compose.override.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper-scripts/docker-compose.override.yml.d/EXTERNAL_MYSQL_SOCKET/docker-compose.override.yml b/helper-scripts/docker-compose.override.yml.d/EXTERNAL_MYSQL_SOCKET/docker-compose.override.yml index f014ea67..53d9193b 100644 --- a/helper-scripts/docker-compose.override.yml.d/EXTERNAL_MYSQL_SOCKET/docker-compose.override.yml +++ b/helper-scripts/docker-compose.override.yml.d/EXTERNAL_MYSQL_SOCKET/docker-compose.override.yml @@ -26,6 +26,6 @@ services: - /var/run/mysqld/mysqld.sock:/var/run/mysqld/mysqld.sock mysql-mailcow: - image: alpine:3.18 + image: alpine:3.19 command: /bin/true restart: "no"