From 231c9521fa970c81bdfe2da4d077124f030bfb70 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Mon, 13 Apr 2026 14:07:05 +0200 Subject: [PATCH 01/39] feat: added ns-stats --- packages/ns-api/files/ns.dpireport | 286 ++++++++++-------- packages/ns-monitoring/Makefile | 24 +- packages/ns-monitoring/files/99-ns-stats | 8 + .../files/netifyd/netify-ns-stats-proc.json | 14 + .../files/netifyd/netify-ns-stats-sink.json | 12 + .../files/netifyd/plugins.d/10-ns-stats.conf | 15 + packages/ns-monitoring/files/ns-stats.conf | 5 + packages/ns-monitoring/files/ns-stats.init | 47 +++ 8 files changed, 283 insertions(+), 128 deletions(-) create mode 100644 packages/ns-monitoring/files/99-ns-stats create mode 100644 packages/ns-monitoring/files/netifyd/netify-ns-stats-proc.json create mode 100644 packages/ns-monitoring/files/netifyd/netify-ns-stats-sink.json create mode 100644 packages/ns-monitoring/files/netifyd/plugins.d/10-ns-stats.conf create mode 100644 packages/ns-monitoring/files/ns-stats.conf create mode 100644 packages/ns-monitoring/files/ns-stats.init diff --git a/packages/ns-api/files/ns.dpireport b/packages/ns-api/files/ns.dpireport index 885cf45e6..6ccef0b2c 100755 --- a/packages/ns-api/files/ns.dpireport +++ b/packages/ns-api/files/ns.dpireport @@ -10,10 +10,12 @@ import sys import json import glob +import sqlite3 import socket -from datetime import date +from datetime import date, datetime, timedelta BASE_PATH = '/var/run/dpireport' +DB_PATH = '/var/run/dpireport/stats.db' def reverse_dns(ip): try: @@ -124,6 +126,59 @@ def _extract_data(dpi_file: str): return dict() +def _day_bounds(year, month, day): + start = date(int(year), int(month), int(day)) + end = start + timedelta(days=1) + return ( + int(datetime(start.year, start.month, start.day).timestamp()), + int(datetime(end.year, end.month, end.day).timestamp()), + ) + + +def _append_filter(filters, params, clause, value): + filters.append(clause) + params.append(value) + + +def _build_where(year, month, day, narrow_client=None, narrow_section=None, narrow_value=None): + start_ts, end_ts = _day_bounds(year, month, day) + filters = ["ts.log_time_end >= ?", "ts.log_time_end < ?"] + params = [start_ts, end_ts] + + if narrow_client is not None: + _append_filter(filters, params, "s.local_ip = ?", narrow_client) + + if narrow_section == 'application': + _append_filter(filters, params, "lower(s.detected_application_name) = lower(?)", narrow_value) + elif narrow_section == 'protocol': + _append_filter(filters, params, "lower(s.detected_protocol_name) = lower(?)", narrow_value) + elif narrow_section == 'host': + _append_filter(filters, params, "s.other_ip = ?", narrow_value) + + return " AND ".join(filters), params + + +def _query_rows(cursor, sql, params): + cursor.execute(sql, params) + return cursor.fetchall() + + +def _base_query(where_clause): + return f''' +FROM stats s +JOIN stats_timestamps ts ON ts.id = s.stats_timestamp_id +WHERE {where_clause} +''' + + +def _empty_summary_response(): + return { + 'total_traffic': 0, + 'hourly_traffic': [{'id': f'{i:02}', 'traffic': 0} for i in range(24)], + 'clients': [], + } + + def summary_v2(year=None, month=None, day=None, narrow_client=None, narrow_section=None, narrow_value=None, limit=20): if year is None: year = f'{date.today().year:02}' @@ -131,93 +186,108 @@ def summary_v2(year=None, month=None, day=None, narrow_client=None, narrow_secti month = f'{date.today().month:02}' if day is None: day = f'{date.today().day:02}' - data = _load_data(year, month, day) + where_clause, params = _build_where(year, month, day, narrow_client, narrow_section, narrow_value) total_traffic = 0 - raw_hourly_traffic = dict[str, int]() - for i in range(24): - raw_hourly_traffic[f'{i:02}'] = 0 - raw_applications = dict[str, int]() - raw_clients = list[dict]() - raw_remote_hosts = dict[str, int]() - raw_protocols = dict[str, int]() - - for client in data: - if narrow_client is not None and narrow_client != client: - continue - resolved_client = reverse_dns(client) - raw_client_total_traffic = 0 - - for time in data[client]: - for application in data[client][time]['application']: - if narrow_section == 'application' and application != narrow_value: - continue - elif narrow_section is not None and narrow_section != 'application': - break - if application not in raw_applications: - raw_applications[application] = 0 - raw_applications[application] += data[client][time]['application'][application] - for host in data[client][time]['host']: - if narrow_section == 'host' and host != narrow_value: - continue - elif narrow_section is not None and narrow_section != 'host': - break - if host not in raw_remote_hosts: - raw_remote_hosts[host] = 0 - raw_remote_hosts[host] += data[client][time]['host'][host] - for protocol in data[client][time]['protocol']: - if narrow_section == 'protocol' and protocol != narrow_value: - continue - elif narrow_section is not None and narrow_section != 'protocol': - break - if protocol not in raw_protocols: - raw_protocols[protocol] = 0 - raw_protocols[protocol] += data[client][time]['protocol'][protocol] - - match narrow_section: - case 'host': - if narrow_value not in data[client][time]['host']: - continue - total_traffic += data[client][time]['host'][narrow_value] - raw_hourly_traffic[time] += data[client][time]['host'][narrow_value] - raw_client_total_traffic += data[client][time]['host'][narrow_value] - case 'protocol': - if narrow_value not in data[client][time]['protocol']: - continue - total_traffic += data[client][time]['protocol'][narrow_value] - raw_hourly_traffic[time] += data[client][time]['protocol'][narrow_value] - raw_client_total_traffic += data[client][time]['protocol'][narrow_value] - case 'application': - if narrow_value not in data[client][time]['application']: - continue - total_traffic += data[client][time]['application'][narrow_value] - raw_hourly_traffic[time] += data[client][time]['application'][narrow_value] - raw_client_total_traffic += data[client][time]['application'][narrow_value] - case _: - total_traffic += data[client][time]['total'] - raw_hourly_traffic[time] += data[client][time]['total'] - raw_client_total_traffic += data[client][time]['total'] - - # append client - raw_clients.append({ - 'id': client, - 'label': resolved_client, - 'traffic': raw_client_total_traffic - }) - - # do not display empty values when seeing details - if narrow_section and narrow_value: - raw_clients = [client for client in raw_clients if client['traffic'] > 0] - raw_clients.sort(key=lambda x: x['traffic'], reverse=True) - final_clients = raw_clients[:limit] - - final_hourly_traffic = list() - for item in raw_hourly_traffic: - final_hourly_traffic.append({ - 'id': item, - 'traffic': raw_hourly_traffic[item] - }) - final_hourly_traffic.sort(key=lambda x: x['id']) + final_hourly_traffic = [] + final_clients = [] + final_applications = [] + final_remote_hosts = [] + final_protocols = [] + + try: + conn = sqlite3.connect(f'file:{DB_PATH}?mode=ro', uri=True) + except sqlite3.Error: + return _empty_summary_response() + + with conn: + cursor = conn.cursor() + + row = cursor.execute( + f'''SELECT COALESCE(SUM(s.local_bytes + s.other_bytes), 0) {_base_query(where_clause)}''', + params, + ).fetchone() + total_traffic = row[0] if row and row[0] is not None else 0 + + hourly_rows = _query_rows( + cursor, + f''' +SELECT strftime('%H', ts.log_time_end, 'unixepoch', 'localtime') AS hour, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS traffic +{_base_query(where_clause)} +GROUP BY hour +ORDER BY hour +''', + params, + ) + hourly_map = {f'{i:02}': 0 for i in range(24)} + for hour, traffic in hourly_rows: + if hour is not None: + hourly_map[hour] = traffic + final_hourly_traffic = [{'id': hour, 'traffic': hourly_map[hour]} for hour in hourly_map] + + client_rows = _query_rows( + cursor, + f''' +SELECT s.local_ip, COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS traffic +{_base_query(where_clause)} +GROUP BY s.local_ip +ORDER BY traffic DESC, s.local_ip ASC +LIMIT ? +''', + params + [limit], + ) + final_clients = [ + {'id': client, 'label': reverse_dns(client), 'traffic': traffic} + for client, traffic in client_rows + ] + + if narrow_section is None: + application_rows = _query_rows( + cursor, + f''' +SELECT lower(s.detected_application_name) AS application, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS traffic +{_base_query(where_clause)} +GROUP BY application +ORDER BY traffic DESC, application ASC +LIMIT ? +''', + params + [limit], + ) + for application, traffic in application_rows: + label = 'Unknown' if application == 'unknown' else application.removeprefix('netify.').capitalize() + final_applications.append({'id': application, 'label': label, 'traffic': traffic}) + + host_rows = _query_rows( + cursor, + f''' +SELECT s.other_ip, COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS traffic +{_base_query(where_clause)} +GROUP BY s.other_ip +ORDER BY traffic DESC, s.other_ip ASC +LIMIT ? +''', + params + [limit], + ) + final_remote_hosts = [{'id': host, 'traffic': traffic} for host, traffic in host_rows] + + protocol_rows = _query_rows( + cursor, + f''' +SELECT lower(s.detected_protocol_name) AS protocol, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS traffic +{_base_query(where_clause)} +GROUP BY protocol +ORDER BY traffic DESC, protocol ASC +LIMIT ? +''', + params + [limit], + ) + final_protocols = [ + {'id': protocol, 'label': protocol.upper(), 'traffic': traffic} + for protocol, traffic in protocol_rows + ] response = { 'total_traffic': total_traffic, @@ -225,42 +295,14 @@ def summary_v2(year=None, month=None, day=None, narrow_client=None, narrow_secti 'clients': final_clients, } - if len(raw_applications) > 0: - final_applications = list() - for item in raw_applications: - label = item - if item == 'unknown': - label = 'Unknown' - else: - label = label.removeprefix('netify.').capitalize() - final_applications.append({ - 'id': item, - 'label': label, - 'traffic': raw_applications[item] - }) - final_applications.sort(key=lambda x: x['traffic'], reverse=True) - response['applications'] = final_applications[:limit] - - if len(raw_remote_hosts) > 0: - final_remote_hosts = list() - for item in raw_remote_hosts: - final_remote_hosts.append({ - 'id': item, - 'traffic': raw_remote_hosts[item] - }) - final_remote_hosts.sort(key=lambda x: x['traffic'], reverse=True) - response['remote_hosts'] = final_remote_hosts[:limit] - - if len(raw_protocols) > 0: - final_protocols = list() - for item in raw_protocols: - final_protocols.append({ - 'id': item, - 'label': item.upper(), - 'traffic': raw_protocols[item] - }) - final_protocols.sort(key=lambda x: x['traffic'], reverse=True) - response['protocols'] = final_protocols[:limit] + if narrow_section is None and len(final_applications) > 0: + response['applications'] = final_applications + + if narrow_section is None and len(final_remote_hosts) > 0: + response['remote_hosts'] = final_remote_hosts + + if narrow_section is None and len(final_protocols) > 0: + response['protocols'] = final_protocols return response diff --git a/packages/ns-monitoring/Makefile b/packages/ns-monitoring/Makefile index 5fdd6dfd7..df77633d7 100644 --- a/packages/ns-monitoring/Makefile +++ b/packages/ns-monitoring/Makefile @@ -10,8 +10,9 @@ PKG_NAME:=ns-monitoring PKG_VERSION:=1.0.2 PKG_RELEASE:=1 -PKG_SOURCE:=nethsecurity-monitoring-v$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=https://codeload.github.com/nethserver/nethsecurity-monitoring/tar.gz/v$(PKG_VERSION)? +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/NethServer/nethsecurity-monitoring.git +PKG_SOURCE_VERSION:=910167f116fe6862a1bd35a4d93d1faea385ce0f PKG_SOURCE_SUBDIR:=nethsecurity-monitoring-$(PKG_VERSION) PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR) @@ -24,6 +25,10 @@ PKG_BUILD_PARALLEL:=1 PKG_BUILD_FLAGS:=no-mips16 GO_PKG:=github.com/nethserver/nethsecurity-monitoring +GO_PKG_BUILD_PKG:= \ + $(GO_PKG)/cmd/ns-flows \ + $(GO_PKG)/cmd/ns-stats +GO_PKG_INSTALL_BIN_PATH:=/usr/sbin include $(INCLUDE_DIR)/package.mk include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk @@ -38,27 +43,34 @@ endef define Package/ns-monitoring/conffiles /etc/config/ns-flows +/etc/config/ns-stats endef define Package/ns-monitoring/install - $(INSTALL_DIR) $(1)/usr/sbin - $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/nethsecurity-monitoring $(1)/usr/sbin/ns-flows + $(call GoPackage/Package/Install/Bin,$(1)) $(INSTALL_DIR) $(1)/etc/config $(INSTALL_CONF) ./files/ns-flows.conf $(1)/etc/config/ns-flows + $(INSTALL_CONF) ./files/ns-stats.conf $(1)/etc/config/ns-stats $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/ns-flows.init $(1)/etc/init.d/ns-flows + $(INSTALL_BIN) ./files/ns-stats.init $(1)/etc/init.d/ns-stats + $(INSTALL_DIR) $(1)/etc/uci-defaults + $(INSTALL_BIN) ./files/99-ns-stats $(1)/etc/uci-defaults/99-ns-stats $(INSTALL_DIR) $(1)/etc/netifyd $(INSTALL_DIR) $(1)/etc/netifyd/plugins.d + # ns-flows $(INSTALL_CONF) ./files/netifyd/plugins.d/10-netify-flows.conf $(1)/etc/netifyd/plugins.d/10-netify-flows.conf $(INSTALL_CONF) ./files/netifyd/netify-sink-socket-flows.json $(1)/etc/netifyd/netify-sink-socket-flows.json $(INSTALL_CONF) ./files/netifyd/netify-proc-core-flows.json $(1)/etc/netifyd/netify-proc-core-flows.json + # ns-stats + $(INSTALL_CONF) ./files/netifyd/plugins.d/10-ns-stats.conf $(1)/etc/netifyd/plugins.d/10-ns-stats.conf + $(INSTALL_CONF) ./files/netifyd/netify-ns-stats-proc.json $(1)/etc/netifyd/netify-ns-stats-proc.json + $(INSTALL_CONF) ./files/netifyd/netify-ns-stats-sink.json $(1)/etc/netifyd/netify-ns-stats-sink.json endef define Package/ns-monitoring/postinst #!/bin/sh if [ -z "$${IPKG_INSTROOT}" ]; then - /etc/init.d/ns-flows enable - /etc/init.d/ns-flows restart /etc/init.d/netifyd restart fi exit 0 diff --git a/packages/ns-monitoring/files/99-ns-stats b/packages/ns-monitoring/files/99-ns-stats new file mode 100644 index 000000000..c2a2c982c --- /dev/null +++ b/packages/ns-monitoring/files/99-ns-stats @@ -0,0 +1,8 @@ +#!/bin/sh + +/etc/init.d/ns-flows enable +/etc/init.d/ns-flows start +/etc/init.d/ns-stats enable +/etc/init.d/ns-stats start + +exit 0 diff --git a/packages/ns-monitoring/files/netifyd/netify-ns-stats-proc.json b/packages/ns-monitoring/files/netifyd/netify-ns-stats-proc.json new file mode 100644 index 000000000..0f2ef045a --- /dev/null +++ b/packages/ns-monitoring/files/netifyd/netify-ns-stats-proc.json @@ -0,0 +1,14 @@ +{ + "aggregator": 3, + "batched_rows": 100, + "log_interval": 10, + "compressor": "none", + "format": "json", + "nested_mode": false, + "privacy_mode": false, + "sinks": { + "sink-ns-stats": { + "default": { } + } + } +} diff --git a/packages/ns-monitoring/files/netifyd/netify-ns-stats-sink.json b/packages/ns-monitoring/files/netifyd/netify-ns-stats-sink.json new file mode 100644 index 000000000..237035f6b --- /dev/null +++ b/packages/ns-monitoring/files/netifyd/netify-ns-stats-sink.json @@ -0,0 +1,12 @@ +{ + "timeout_connect": 30, + "timeout_transfer": 300, + "tls_verify": false, + "tls_version1": false, + "channels": { + "default": { + "enable": true, + "url": "http://127.0.0.1:8081/stats" + } + } +} diff --git a/packages/ns-monitoring/files/netifyd/plugins.d/10-ns-stats.conf b/packages/ns-monitoring/files/netifyd/plugins.d/10-ns-stats.conf new file mode 100644 index 000000000..6255c7164 --- /dev/null +++ b/packages/ns-monitoring/files/netifyd/plugins.d/10-ns-stats.conf @@ -0,0 +1,15 @@ +# ns-stats configuration +# +############################################################################## + +[proc-ns-stats] +enable = yes +plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-ns-stats-proc.json + +[sink-ns-stats] +enable = yes +plugin_library = ${path_plugin_libdir}/libnetify-sink-http.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-ns-stats-sink.json + +# vim: set ft=dosini : diff --git a/packages/ns-monitoring/files/ns-stats.conf b/packages/ns-monitoring/files/ns-stats.conf new file mode 100644 index 000000000..1e4277bfe --- /dev/null +++ b/packages/ns-monitoring/files/ns-stats.conf @@ -0,0 +1,5 @@ +config config 'daemon' + option addr '127.0.0.1:8081' + option db_path '/var/run/dpireport/stats.db' + option log_level 'info' + option retention '24h' diff --git a/packages/ns-monitoring/files/ns-stats.init b/packages/ns-monitoring/files/ns-stats.init new file mode 100644 index 000000000..4ee3e2b33 --- /dev/null +++ b/packages/ns-monitoring/files/ns-stats.init @@ -0,0 +1,47 @@ +#!/bin/sh /etc/rc.common + +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +USE_PROCD=1 +START=99 + +start_service() { + config_load ns-stats + + local addr db_path log_level retention + config_get addr daemon addr "127.0.0.1:8081" + config_get db_path daemon db_path "/var/run/dpireport/stats.db" + config_get log_level daemon log_level "info" + config_get retention daemon retention "24h" + + mkdir -p /var/run/dpireport + + procd_open_instance + procd_set_param command "/usr/sbin/ns-stats" + procd_append_param command "-addr" + procd_append_param command "$addr" + procd_append_param command "-db-path" + procd_append_param command "$db_path" + procd_append_param command "-log-level" + procd_append_param command "$log_level" + procd_append_param command "-retention" + procd_append_param command "$retention" + procd_set_param stdout 1 + procd_set_param stderr 1 + procd_set_param respawn 3600 5 0 + procd_close_instance +} + +service_triggers() +{ + procd_add_reload_trigger "ns-stats" +} + +reload_service() +{ + stop + start +} From 0e8276a5d2471d6f9c9a025501c7ba1eb3b190fb Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Tue, 14 Apr 2026 09:16:40 +0200 Subject: [PATCH 02/39] controller fixes --- packages/ns-api/files/ns.controller | 93 +++++++++++++++++++++++------ 1 file changed, 75 insertions(+), 18 deletions(-) diff --git a/packages/ns-api/files/ns.controller b/packages/ns-api/files/ns.controller index 6ca392511..d7ecd9e41 100755 --- a/packages/ns-api/files/ns.controller +++ b/packages/ns-api/files/ns.controller @@ -22,6 +22,7 @@ import glob import socket AUTHORIZED_KEYS = "/etc/dropbear/authorized_keys" +DPIREPORT_DB = "/var/run/dpireport/stats.db" dns_cache = {} @@ -237,27 +238,83 @@ def dump_mwan_events(): return {"data": ret} def dump_dpi_stats(): - # Parse /var/run/dpireport for the last 20 minutes - # filename example /var/run/dpireport/2024/08/20/172.25.5.17/15.json + # Read the last 20 minutes from the SQLite stats database. + # Host is mapped to the remote IP to preserve the output schema. ret = [] - for file, timestamp in find_recent_files("/var/run/dpireport", 20): - # parse the filename to get the date client - file_split = file.removeprefix('/var/run/dpireport/').split('/') - year = int(file_split[0]) - month = int(file_split[1]) - day = int(file_split[2]) - client_address = file_split[3] - hour = int(file_split[4].replace('.json', '')) - - with open(file, "r") as f: - data = json.load(f) - for key in ("protocol", "host", "application"): - for el in data.get(key, {}): + cutoff = int(time.time()) - 1200 + + def _load_rows(cursor, sql, metric): + try: + cursor.execute(sql, (cutoff,)) + for timestamp, client_address, value, bytes_count in cursor.fetchall(): + if not value: + continue ret.append({ - "timestamp": int(datetime(year, month, day, hour, 0, 0).timestamp()), - "client_address": client_address, "client_name": reverse_dns(client_address), key: el, - "bytes": data[key][el] + "timestamp": int(timestamp), + "client_address": client_address, + "client_name": reverse_dns(client_address), + metric: value, + "bytes": bytes_count, }) + except sqlite3.Error: + pass + + try: + conn = sqlite3.connect(f'file:{DPIREPORT_DB}?mode=ro', uri=True) + except sqlite3.Error: + return {"data": ret} + + with conn: + cursor = conn.cursor() + + _load_rows( + cursor, + ''' +SELECT ts.log_time_end, + s.local_ip, + lower(s.detected_protocol_name) AS protocol, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS bytes +FROM stats s +JOIN stats_timestamps ts ON ts.id = s.stats_timestamp_id +WHERE ts.log_time_end >= ? AND s.detected_protocol_name IS NOT NULL AND s.detected_protocol_name != '' +GROUP BY ts.log_time_end, s.local_ip, protocol +ORDER BY ts.log_time_end DESC +''', + "protocol", + ) + + _load_rows( + cursor, + ''' +SELECT ts.log_time_end, + s.local_ip, + lower(s.detected_application_name) AS application, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS bytes +FROM stats s +JOIN stats_timestamps ts ON ts.id = s.stats_timestamp_id +WHERE ts.log_time_end >= ? AND s.detected_application_name IS NOT NULL AND s.detected_application_name != '' +GROUP BY ts.log_time_end, s.local_ip, application +ORDER BY ts.log_time_end DESC +''', + "application", + ) + + _load_rows( + cursor, + ''' +SELECT ts.log_time_end, + s.local_ip, + s.other_ip AS host, + COALESCE(SUM(s.local_bytes + s.other_bytes), 0) AS bytes +FROM stats s +JOIN stats_timestamps ts ON ts.id = s.stats_timestamp_id +WHERE ts.log_time_end >= ? AND s.other_ip IS NOT NULL AND s.other_ip != '' +GROUP BY ts.log_time_end, s.local_ip, s.other_ip +ORDER BY ts.log_time_end DESC +''', + "host", + ) + return {"data": ret} def dump_openvpn_connections(): From 96007a58a4551e9388fb19eb0f2f7a1d53601259 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 15 Apr 2026 10:31:36 +0200 Subject: [PATCH 03/39] removed deprecations --- packages/ns-api/files/ns.dpireport | 131 ++++------------------------- 1 file changed, 15 insertions(+), 116 deletions(-) diff --git a/packages/ns-api/files/ns.dpireport b/packages/ns-api/files/ns.dpireport index 6ccef0b2c..a6f0aaf95 100755 --- a/packages/ns-api/files/ns.dpireport +++ b/packages/ns-api/files/ns.dpireport @@ -9,12 +9,10 @@ import sys import json -import glob import sqlite3 import socket from datetime import date, datetime, timedelta -BASE_PATH = '/var/run/dpireport' DB_PATH = '/var/run/dpireport/stats.db' def reverse_dns(ip): @@ -24,106 +22,22 @@ def reverse_dns(ip): return ip def list_days(): - ret = list() - for day in glob.glob('/var/run/dpireport/*/*/*'): - tmp = day.removeprefix('/var/run/dpireport/').split('/') - ret.append((tmp[0], tmp[1], tmp[2])) - return {"days": ret} - -def summary(year, month, day, client="*", limit=10): - """ - Deprecated, please use summary_v2 - """ - hours = dict() - for i in range(24): - hours[f'{i:02}'] = 0 - ret = {"total": 0, "clients": {}, "hours": hours, "names": {}, "protocol": {}, "host": {}, "application": {}} - # prepenad leading zero, if needed - month = f'{int(month):02}' - day = f'{int(day):02}' - for client_f in glob.glob(f'/var/run/dpireport/{year}/{month}/{day}/{client}'): - client = client_f.removeprefix(f'/var/run/dpireport/{year}/{month}/{day}/') - cdetails = _details(year, month, day, client) - ret["clients"][client] = cdetails["total"] - ret["names"][client] = cdetails["name"] - for hour in cdetails["hours"]: - try: - ret["hours"][hour] += cdetails["hours"][hour]["total"] - except: - pass - ret["total"] += cdetails["total"] - - for key in ("protocol", "host", "application"): - for el in cdetails[key]: - if el not in ret[key]: - ret[key][el] = 0 - ret[key][el] += cdetails[key][el] try: - tmp_c = sorted(ret['clients'].items(), key=lambda x: x[1], reverse=True) - except: - tmp_c = list() - try: - tmp_h = sorted(ret['hours'].items()) - except: - tmp_h = list() - - ret['clients'] = tmp_c - ret['hours'] = tmp_h - - for key in ("protocol", "host", "application"): - try: - tmp = sorted(ret[key].items(), key=lambda x: x[1], reverse=True) - except: - tmp = list() - ret[key] = tmp[0:10] - - return ret - -def _details(year, month, day, client): - """ - Deprecated, please use summary_v2 - """ - hours = dict() - if not client: - return ret - for i in range(24): - hours[f'{i:02}'] = dict() - ret = {"hours": hours, "total": 0, "name": reverse_dns(client), "protocol": {}, "host": {}, "application": {}} - ddir = f'/var/run/dpireport/{year}/{month}/{day}/{client}/' - for hour_f in glob.glob(f'{ddir}??.json'): - hour = hour_f.removesuffix(".json").removeprefix(ddir) - with open(hour_f, 'r') as fp: - ret["hours"][hour] = json.load(fp) - for key in ("protocol", "host", "application"): - for el in ret["hours"][hour][key]: - if el not in ret[key]: - ret[key][el] = 0 - ret[key][el] += ret["hours"][hour][key][el] - ret["total"] += ret["hours"][hour]["total"] - return ret - - -def _load_data(year, month, day): - search_path = f'{BASE_PATH}/{year}/{month}/{day}/' - clients = dict() - for client_file in glob.glob(f'{search_path}/*'): - client_name = client_file.removeprefix(search_path) - client_data = dict() - client_hourly = glob.glob(f'{client_file}/*.json') - for data_file in client_hourly: - time = data_file.removeprefix(f'{client_file}/').removesuffix('.json') - client_data[time] = _extract_data(data_file) - clients[client_name] = client_data - - return clients - - -def _extract_data(dpi_file: str): - try: - with open(dpi_file, 'r') as file: - return json.load(file) - except: - return dict() + with sqlite3.connect(f'file:{DB_PATH}?mode=ro', uri=True) as conn: + cursor = conn.cursor() + cursor.execute( + ''' +SELECT DISTINCT + strftime('%Y', log_time_end, 'unixepoch', 'localtime') AS year, + strftime('%m', log_time_end, 'unixepoch', 'localtime') AS month, + strftime('%d', log_time_end, 'unixepoch', 'localtime') AS day +FROM stats_timestamps +ORDER BY year DESC, month DESC, day DESC +''' + ) + return {"days": [tuple(row) for row in cursor.fetchall()]} + except sqlite3.Error: + return {"days": []} def _day_bounds(year, month, day): @@ -311,9 +225,6 @@ cmd = sys.argv[1] if cmd == 'list': print(json.dumps({ - "summary": {"year": "2023", "month": "06", "day": "02", "limit": 10}, - "summary-by-client": {"year": "2023", "month": "06", "day": "02", "client": "192.168.1.1", "limit": 10}, - "details": {"year": "2023", "month": "06", "day": "16", "client": "192.168.100.22"}, "days": {}, "summary-v2": {"year": "2024", "month": "06", "day": "02", "client": "127.0.0.1", "section": "application", "value": "netify.http", "limit": 20} @@ -326,15 +237,3 @@ else: args = json.loads(sys.stdin.read()) print(json.dumps(summary_v2(args.get('year'), args.get('month'), args.get('day'), args.get('client'), args.get('section'), args.get('value'), args.get('limit', 20)))) - else: - args = json.loads(sys.stdin.read()) - year = args.get('year', f'{date.today().year:02}') - month = args.get('month', f'{date.today().month:02}') - day = args.get('day', f'{date.today().day:02}') - if action == "summary": - limit = args.get('limit', 10) - print(json.dumps(summary(year, month, day, limit=limit))) - elif action == "summary-by-client": - client = args.get('client', '*') - limit = args.get('limit', 10) - print(json.dumps(summary(year, month, day, client, limit=limit))) From 2ade2d35cdd7b4ee2e599576b307010f7ef24ed7 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Thu, 27 Nov 2025 15:43:29 +0100 Subject: [PATCH 04/39] added complete management of metrics and data --- config/monitoring.conf | 2 + grafana-dashboards/aggregator-netifyd.json | 922 ++++++++++++++++++ packages/netifyd/Makefile | 1 + .../netify-proc-aggregator-telegraf.json | 14 + .../files/etc/netifyd/netify-sink-log.json | 5 + .../plugins.d/10-netify-proc-aggregator.conf | 5 + packages/telegraf/Makefile | 70 ++ packages/telegraf/files/telegraf.conf | 39 + packages/telegraf/files/telegraf.initd | 30 + packages/victoria-metrics/Makefile | 74 ++ .../files/victoria-metrics.conf | 3 + .../files/victoria-metrics.initd | 40 + 12 files changed, 1205 insertions(+) create mode 100644 config/monitoring.conf create mode 100644 grafana-dashboards/aggregator-netifyd.json create mode 100644 packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json create mode 100644 packages/telegraf/Makefile create mode 100644 packages/telegraf/files/telegraf.conf create mode 100644 packages/telegraf/files/telegraf.initd create mode 100644 packages/victoria-metrics/Makefile create mode 100644 packages/victoria-metrics/files/victoria-metrics.conf create mode 100644 packages/victoria-metrics/files/victoria-metrics.initd diff --git a/config/monitoring.conf b/config/monitoring.conf new file mode 100644 index 000000000..5994a2760 --- /dev/null +++ b/config/monitoring.conf @@ -0,0 +1,2 @@ +CONFIG_PACKAGE_victoria-metrics=y +CONFIG_PACKAGE_telegraf=y diff --git a/grafana-dashboards/aggregator-netifyd.json b/grafana-dashboards/aggregator-netifyd.json new file mode 100644 index 000000000..57a8c63f9 --- /dev/null +++ b/grafana-dashboards/aggregator-netifyd.json @@ -0,0 +1,922 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "repeat": "interfaces", + "title": "Generic", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(local_ip) (netifyd_other_bytes)", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Download", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(local_ip) (netifyd_local_bytes)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Upload", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 11, + "panels": [], + "title": "Applications", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_application_name) (netifyd_other_bytes)", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Application download", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_application_name) (netifyd_local_bytes)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Application upload", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 13, + "panels": [], + "title": "Remote IPs", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(other_ip) (netifyd_other_bytes)", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Remote IPs downloaded from", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(other_ip) (netifyd_local_bytes)", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Remote IP uploaded to", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 12, + "panels": [], + "title": "Protocols", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_protocol_name) (netifyd_other_bytes)", + "hide": false, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Protocol Download", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic-by-name" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_protocol_name) (netifyd_local_bytes)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Protocol uploads", + "type": "timeseries" + } + ], + "preload": false, + "refresh": "auto", + "schemaVersion": 42, + "tags": [], + "templating": { + "list": [ + { + "allowCustomValue": false, + "current": { + "text": "VictoriaMetrics", + "value": "P4169E866C3094E38" + }, + "label": "Datasource", + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allowCustomValue": true, + "baseFilters": [], + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "filters": [], + "label": "Filter", + "name": "filter", + "type": "adhoc" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "nowDelay": "" + }, + "timezone": "browser", + "title": "Netifyd", + "uid": "duwghav", + "version": 1 +} \ No newline at end of file diff --git a/packages/netifyd/Makefile b/packages/netifyd/Makefile index 46574ae64..92e02054e 100644 --- a/packages/netifyd/Makefile +++ b/packages/netifyd/Makefile @@ -220,6 +220,7 @@ define Package/netifyd/install $(LN) /usr/lib/libnetify-plm.so.1.0.0 $(1)/usr/lib/libnetify-plm.so.1 # netify-proc-aggregator $(INSTALL_DATA) ./files/etc/netifyd/netify-proc-aggregator.json $(1)/etc/netifyd/netify-proc-aggregator.json + $(INSTALL_DATA) ./files/etc/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json $(INSTALL_DATA) ./files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf $(1)/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf $(INSTALL_BIN) $(PKG_BUILD_DIR)/libnetify-proc-aggregator.so.0.0.0 $(1)/usr/lib/libnetify-proc-aggregator.so.0.0.0 $(LN) /usr/lib/libnetify-proc-aggregator.so.0.0.0 $(1)/usr/lib/libnetify-proc-aggregator.so diff --git a/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json b/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json new file mode 100644 index 000000000..28db2387c --- /dev/null +++ b/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json @@ -0,0 +1,14 @@ +{ + "aggregator": 3, + "batched_rows": 0, + "log_interval": 15, + "compressor": "none", + "format": "json", + "nested_mode": false, + "privacy_mode": false, + "sinks": { + "sink-log": { + "telegraf": { } + } + } +} \ No newline at end of file diff --git a/packages/netifyd/files/etc/netifyd/netify-sink-log.json b/packages/netifyd/files/etc/netifyd/netify-sink-log.json index 1bd2fdd58..878038c96 100644 --- a/packages/netifyd/files/etc/netifyd/netify-sink-log.json +++ b/packages/netifyd/files/etc/netifyd/netify-sink-log.json @@ -5,6 +5,11 @@ "log_path": "/var/run/netifyd", "log_name": "aggregator-stats", "overwrite": true + }, + "telegraf": { + "log_path": "/var/run/netifyd", + "log_name": "telegraf", + "overwrite": true } } } diff --git a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf index 783ec2de8..22eaa57d3 100644 --- a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf +++ b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf @@ -7,4 +7,9 @@ enable = yes plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 conf_filename = ${path_state_persistent}/netify-proc-aggregator.json +[proc-aggregator-telegraf] +enable = yes +plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-proc-aggregator-telegraf.json + # vim: set ft=dosini : diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile new file mode 100644 index 000000000..90047e103 --- /dev/null +++ b/packages/telegraf/Makefile @@ -0,0 +1,70 @@ +# +# Copyright (C) 2025 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=telegraf +# renovate: datasource=github-tags depName=influxdata/telegraf +PKG_VERSION:=1.33.2 +PKG_RELEASE:=1 + +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://codeload.github.com/influxdata/telegraf/tar.gz/v$(PKG_VERSION)? +PKG_SOURCE_SUBDIR:=telegraf-$(PKG_VERSION) +PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR) + +PKG_HASH:=skip +PKG_MAINTAINER:=Tommaso Bailetti +PKG_LICENSE:=MIT + +PKG_BUILD_DEPENDS:=golang/host +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_FLAGS:=no-mips16 + +GO_PKG:=github.com/influxdata/telegraf/cmd/$(PKG_NAME) +GO_BUILD_PKG:=github.com/influxdata/telegraf/cmd/$(PKG_NAME) +GO_PKG_LDFLAGS_X:=github.com/influxdata/telegraf/internal.Version=$(PKG_VERSION) +GO_PKG_TAGS:= \ + custom \ + inputs.file \ + outputs.influxdb \ + parsers.json_v2 + +include $(INCLUDE_DIR)/package.mk +include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk + +define Package/telegraf + SECTION:=base + CATEGORY:=NethServer + TITLE:=Telegraf + URL:=https://github.com/influxdata/telegraf + DEPENDS:=$(GO_ARCH_DEPENDS) +endef + +define Package/telegraf/description + Telegraf is an agent for collecting, processing, aggregating, and writing metrics. +endef + +define Package/telegraf/conffiles +/etc/telegraf.conf +endef + + +define Package/telegraf/install + $(call GoPackage/Package/Install/Bin,$(1)) + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/telegraf.initd $(1)/etc/init.d/telegraf + $(INSTALL_DIR) $(1)/etc/config + $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf +endef + +define Package/telegraf/postinst +#!/bin/sh +[ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/telegraf restart +exit 0 +endef + +$(eval $(call GoBinPackage,telegraf)) +$(eval $(call BuildPackage,telegraf)) \ No newline at end of file diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf new file mode 100644 index 000000000..328cd095d --- /dev/null +++ b/packages/telegraf/files/telegraf.conf @@ -0,0 +1,39 @@ +[global_tags] + +[agent] + interval = "15s" + round_interval = true + metric_batch_size = 1000 + metric_buffer_limit = 10000 + collection_jitter = "0s" + flush_interval = "15s" + flush_jitter = "0s" + precision = "0s" + omit_hostname = true + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +[[outputs.influxdb]] + urls = ["http://127.0.0.1:8428"] + database = "netifyd" + skip_database_creation = true + exclude_retention_policy_tag = true + content_encoding = "gzip" + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +[[inputs.file]] + files = ["/var/run/netifyd/telegraf.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "netifyd" + timestamp_path = "log_time_end" + timestamp_format = "unix" + [[inputs.file.json_v2.object]] + path = "stats" + tags = ["detected_application", "detected_application_name", "detected_protocol", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] + excluded_keys = ["digests"] diff --git a/packages/telegraf/files/telegraf.initd b/packages/telegraf/files/telegraf.initd new file mode 100644 index 000000000..9071b62d2 --- /dev/null +++ b/packages/telegraf/files/telegraf.initd @@ -0,0 +1,30 @@ +#!/bin/sh /etc/rc.common + +# +# Copyright (C) 2025 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +# shellcheck disable=SC3043 + +START=99 +USE_PROCD=1 + +PROG="/usr/bin/telegraf" + +start_service() { + procd_open_instance + procd_set_param stdout 1 + procd_set_param stderr 1 + procd_set_param respawn 3600 5 0 + procd_set_param file /etc/telegraf.conf + procd_set_param command $PROG + procd_append_param command --config /etc/telegraf.conf + procd_close_instance +} + +reload_service() +{ + stop + start +} diff --git a/packages/victoria-metrics/Makefile b/packages/victoria-metrics/Makefile new file mode 100644 index 000000000..f41b238a7 --- /dev/null +++ b/packages/victoria-metrics/Makefile @@ -0,0 +1,74 @@ +# +# Copyright (C) 2025 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=victoria-metrics +# renovate: datasource=github-tags depName=VictoriaMetrics/VictoriaMetrics +PKG_VERSION:=1.110.1 +PKG_RELEASE:=1 + +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://codeload.github.com/VictoriaMetrics/VictoriaMetrics/tar.gz/v$(PKG_VERSION)? +PKG_SOURCE_SUBDIR:=VictoriaMetrics-$(PKG_VERSION) +PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR) + +PKG_HASH:=skip +PKG_MAINTAINER:=Tommaso Bailetti +PKG_LICENSE:=Apache-2.0 + +PKG_BUILD_DEPENDS:=golang/host +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_FLAGS:=no-mips16 + +GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/$(PKG_NAME) +GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/$(PKG_NAME) +GO_PKG_GCFLAGS:= \ + -trimpath \ + -buildvcs=false +GO_PKG_LDFLAGS:= \ + -extldflags \ + -static +GO_PKG_LDFLAGS_X:=github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo.Version=$(PKG_NAME)-v$(PKG_VERSION) +GO_PKG_TAGS:= \ + netgo \ + osusergo \ + musl + +include $(INCLUDE_DIR)/package.mk +include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk + +define Package/victoria-metrics + SECTION:=base + CATEGORY:=NethServer + TITLE:=Victoria Metrics + URL:=https://github.com/VictoriaMetrics/VictoriaMetrics + DEPENDS:=$(GO_ARCH_DEPENDS) +endef + +define Package/victoria-metrics/description + VictoriaMetrics time series database / single-node server. +endef + +define Package/victoria-metrics/conffiles +/etc/config/victoria-metrics +endef + +define Package/victoria-metrics/install + $(call GoPackage/Package/Install/Bin,$(1)) + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/victoria-metrics.initd $(1)/etc/init.d/victoria-metrics + $(INSTALL_DIR) $(1)/etc/config + $(INSTALL_DATA) ./files/victoria-metrics.conf $(1)/etc/config/victoria-metrics +endef + +define Package/victoria-metrics/postinst +#!/bin/sh +[ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/victoria-metrics restart +exit 0 +endef + +$(eval $(call GoBinPackage,victoria-metrics)) +$(eval $(call BuildPackage,victoria-metrics)) \ No newline at end of file diff --git a/packages/victoria-metrics/files/victoria-metrics.conf b/packages/victoria-metrics/files/victoria-metrics.conf new file mode 100644 index 000000000..fcceb6e96 --- /dev/null +++ b/packages/victoria-metrics/files/victoria-metrics.conf @@ -0,0 +1,3 @@ +config victoriametrics 'main' + option storage_path '/var/lib/victoriametrics' + option retention_period '1y' diff --git a/packages/victoria-metrics/files/victoria-metrics.initd b/packages/victoria-metrics/files/victoria-metrics.initd new file mode 100644 index 000000000..f2d2bcdaf --- /dev/null +++ b/packages/victoria-metrics/files/victoria-metrics.initd @@ -0,0 +1,40 @@ +#!/bin/sh /etc/rc.common + +# +# Copyright (C) 2023 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +# shellcheck disable=SC3043 + +START=99 +USE_PROCD=1 + +PROG="/usr/bin/victoria-metrics" + +start_service() { + config_load victoria-metrics + local storage_path retention_period + config_get storage_path main storage_path /var/lib/victoriametrics + config_get retention_period main retention_period 1 + + procd_open_instance + procd_set_param stdout 1 + procd_set_param stderr 1 + procd_set_param respawn 3600 5 0 + procd_set_param command $PROG + procd_append_param command -storageDataPath="$storage_path" + procd_append_param command -retentionPeriod="$retention_period" + procd_close_instance +} + +service_triggers() +{ + procd_add_reload_trigger victoria-metrics +} + +reload_service() +{ + stop + start +} From fb6ea96f1175e98430ddce86c37a8e98eae608a8 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 5 Dec 2025 10:19:20 +0100 Subject: [PATCH 05/39] moved telegraf configuration where it's more appropriate --- packages/telegraf/Makefile | 6 +++--- packages/telegraf/files/telegraf.initd | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 90047e103..0bbe2381f 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -48,7 +48,7 @@ define Package/telegraf/description endef define Package/telegraf/conffiles -/etc/telegraf.conf +/etc/telegraf/telegraf.d/ endef @@ -56,8 +56,8 @@ define Package/telegraf/install $(call GoPackage/Package/Install/Bin,$(1)) $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/telegraf.initd $(1)/etc/init.d/telegraf - $(INSTALL_DIR) $(1)/etc/config - $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf + $(INSTALL_DIR) $(1)/etc/telegraf + $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf/telegraf.conf endef define Package/telegraf/postinst diff --git a/packages/telegraf/files/telegraf.initd b/packages/telegraf/files/telegraf.initd index 9071b62d2..3be4216b0 100644 --- a/packages/telegraf/files/telegraf.initd +++ b/packages/telegraf/files/telegraf.initd @@ -17,9 +17,8 @@ start_service() { procd_set_param stdout 1 procd_set_param stderr 1 procd_set_param respawn 3600 5 0 - procd_set_param file /etc/telegraf.conf procd_set_param command $PROG - procd_append_param command --config /etc/telegraf.conf + procd_append_param command --watch-config notify procd_close_instance } From 56d6dff2e8f1264592372495ad6821fcb5f00b34 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 5 Dec 2025 12:00:13 +0100 Subject: [PATCH 06/39] added correct values for netifyd --- grafana-dashboards/aggregator-netifyd.json | 113 +++++++++++++++++++-- 1 file changed, 105 insertions(+), 8 deletions(-) diff --git a/grafana-dashboards/aggregator-netifyd.json b/grafana-dashboards/aggregator-netifyd.json index 57a8c63f9..097f4eab3 100644 --- a/grafana-dashboards/aggregator-netifyd.json +++ b/grafana-dashboards/aggregator-netifyd.json @@ -125,11 +125,24 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(local_ip) (netifyd_other_bytes)", + "expr": "sum by(local_ip) (netifyd_other_bytes{local_origin=\"true\"})", "hide": false, "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(local_ip) (netifyd_local_bytes{local_origin=\"false\"})", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Download", @@ -225,10 +238,22 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(local_ip) (netifyd_local_bytes)", + "expr": "sum by(local_ip) (netifyd_local_bytes{local_origin=\"true\"})", "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(local_ip) (netifyd_other_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Upload", @@ -337,11 +362,23 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(detected_application_name) (netifyd_other_bytes)", + "expr": "sum by(detected_application_name) (netifyd_other_bytes{local_origin=\"true\"})", "hide": false, "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_application_name) (netifyd_local_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Application download", @@ -437,10 +474,22 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(detected_application_name) (netifyd_local_bytes)", + "expr": "sum by(detected_application_name) (netifyd_local_bytes{local_origin=\"true\"})", "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_application_name) (netifyd_other_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Application upload", @@ -549,11 +598,23 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(other_ip) (netifyd_other_bytes)", + "expr": "sum by(other_ip) (netifyd_other_bytes{local_origin=\"true\"})", "hide": false, "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(other_ip) (netifyd_local_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Remote IPs downloaded from", @@ -650,11 +711,23 @@ }, "editorMode": "builder", "exemplar": false, - "expr": "sum by(other_ip) (netifyd_local_bytes)", + "expr": "sum by(other_ip) (netifyd_local_bytes{local_origin=\"true\"})", "instant": false, "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(other_ip) (netifyd_other_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Remote IP uploaded to", @@ -763,12 +836,24 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(detected_protocol_name) (netifyd_other_bytes)", + "expr": "sum by(detected_protocol_name) (netifyd_other_bytes{local_origin=\"true\"})", "hide": false, "interval": "", "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_protocol_name) (netifyd_local_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Protocol Download", @@ -864,10 +949,22 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "sum by(detected_protocol_name) (netifyd_local_bytes)", + "expr": "sum by(detected_protocol_name) (netifyd_local_bytes{local_origin=\"true\"})", "legendFormat": "__auto", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "builder", + "expr": "sum by(detected_protocol_name) (netifyd_other_bytes{local_origin=\"false\"})", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" } ], "title": "Protocol uploads", From 8ad881098d4b57214ad7a3f246fca15ff042745a Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 5 Dec 2025 12:31:43 +0100 Subject: [PATCH 07/39] added grafana for monitoring --- grafana/docker-compose.yml | 44 ++++++++ .../victoriametrics-netifyd.json | 100 ++++++++---------- 2 files changed, 87 insertions(+), 57 deletions(-) create mode 100644 grafana/docker-compose.yml rename grafana-dashboards/aggregator-netifyd.json => grafana/victoriametrics-netifyd.json (93%) diff --git a/grafana/docker-compose.yml b/grafana/docker-compose.yml new file mode 100644 index 000000000..c4b6b76c4 --- /dev/null +++ b/grafana/docker-compose.yml @@ -0,0 +1,44 @@ +services: + grafana: + image: grafana/grafana:12.3 + restart: on-failure + ports: + - "3000:3000" + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_PLUGINS_PREINSTALL=victoriametrics-metrics-datasource + volumes: + - grafana-data:/var/lib/grafana + configs: + - source: grafana-datasources + target: /etc/grafana/provisioning/datasources/datasource.yml + - source: grafana-dashboards + target: /etc/grafana/provisioning/dashboards/dashboards.yml + - source: netifyd-dashboard + target: /var/lib/grafana/dashboards/netifyd.json + +volumes: + grafana-data: { } + +configs: + grafana-datasources: + content: | + apiVersion: 1 + datasources: + - name: VictoriaMetrics - Plugin + type: victoriametrics-metrics-datasource + access: proxy + url: http://10.0.1.1:8428 + uid: victoriametrics_plugin + default: true + grafana-dashboards: + content: | + apiVersion: 1 + providers: + - name: Default + type: file + options: + path: /var/lib/grafana/dashboards + netifyd-dashboard: + file: ./victoriametrics-netifyd.json diff --git a/grafana-dashboards/aggregator-netifyd.json b/grafana/victoriametrics-netifyd.json similarity index 93% rename from grafana-dashboards/aggregator-netifyd.json rename to grafana/victoriametrics-netifyd.json index 097f4eab3..028bf7d94 100644 --- a/grafana-dashboards/aggregator-netifyd.json +++ b/grafana/victoriametrics-netifyd.json @@ -38,7 +38,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -103,7 +103,7 @@ "x": 0, "y": 1 }, - "id": 14, + "id": 16, "options": { "legend": { "calcs": [], @@ -117,15 +117,16 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, - "editorMode": "builder", + "editorMode": "code", "expr": "sum by(local_ip) (netifyd_other_bytes{local_origin=\"true\"})", + "format": "time_series", "hide": false, "legendFormat": "__auto", "range": true, @@ -134,10 +135,11 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, - "editorMode": "builder", + "editorMode": "code", "expr": "sum by(local_ip) (netifyd_local_bytes{local_origin=\"false\"})", + "format": "time_series", "hide": false, "instant": false, "legendFormat": "__auto", @@ -151,7 +153,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -216,7 +218,7 @@ "x": 12, "y": 1 }, - "id": 5, + "id": 17, "options": { "legend": { "calcs": [], @@ -230,12 +232,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(local_ip) (netifyd_local_bytes{local_origin=\"true\"})", @@ -246,7 +248,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(local_ip) (netifyd_other_bytes{local_origin=\"false\"})", @@ -275,7 +277,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -354,12 +356,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_application_name) (netifyd_other_bytes{local_origin=\"true\"})", @@ -371,7 +373,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_application_name) (netifyd_local_bytes{local_origin=\"false\"})", @@ -387,7 +389,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -466,12 +468,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_application_name) (netifyd_local_bytes{local_origin=\"true\"})", @@ -482,7 +484,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_application_name) (netifyd_other_bytes{local_origin=\"false\"})", @@ -511,7 +513,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -590,12 +592,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(other_ip) (netifyd_other_bytes{local_origin=\"true\"})", @@ -607,7 +609,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(other_ip) (netifyd_local_bytes{local_origin=\"false\"})", @@ -623,7 +625,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -702,12 +704,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "exemplar": false, @@ -720,7 +722,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(other_ip) (netifyd_other_bytes{local_origin=\"false\"})", @@ -749,7 +751,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -828,12 +830,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_protocol_name) (netifyd_other_bytes{local_origin=\"true\"})", @@ -846,7 +848,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_protocol_name) (netifyd_local_bytes{local_origin=\"false\"})", @@ -862,7 +864,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "fieldConfig": { "defaults": { @@ -941,12 +943,12 @@ "sort": "none" } }, - "pluginVersion": "12.2.0", + "pluginVersion": "12.3.0", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_protocol_name) (netifyd_local_bytes{local_origin=\"true\"})", @@ -957,7 +959,7 @@ { "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "editorMode": "builder", "expr": "sum by(detected_protocol_name) (netifyd_other_bytes{local_origin=\"false\"})", @@ -977,26 +979,12 @@ "tags": [], "templating": { "list": [ - { - "allowCustomValue": false, - "current": { - "text": "VictoriaMetrics", - "value": "P4169E866C3094E38" - }, - "label": "Datasource", - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, { "allowCustomValue": true, "baseFilters": [], "datasource": { "type": "prometheus", - "uid": "${datasource}" + "uid": "victoriametrics_plugin" }, "filters": [], "label": "Filter", @@ -1009,11 +997,9 @@ "from": "now-1h", "to": "now" }, - "timepicker": { - "nowDelay": "" - }, + "timepicker": {}, "timezone": "browser", - "title": "Netifyd", - "uid": "duwghav", - "version": 1 + "title": "VictoriaMetrics Netifyd", + "uid": "xduhwubc", + "version": 2 } \ No newline at end of file From 15ebde10c05e3d81d5502308cca30ff7a58ec783 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 5 Dec 2025 15:08:42 +0100 Subject: [PATCH 08/39] removed unneeded fields --- packages/telegraf/files/telegraf.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf index 328cd095d..cf409ecd2 100644 --- a/packages/telegraf/files/telegraf.conf +++ b/packages/telegraf/files/telegraf.conf @@ -35,5 +35,5 @@ timestamp_format = "unix" [[inputs.file.json_v2.object]] path = "stats" - tags = ["detected_application", "detected_application_name", "detected_protocol", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] + tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] excluded_keys = ["digests"] From 7dde19e7e387e0f440f6e0d0e310eddfe37369ed Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 17 Dec 2025 12:53:26 +0100 Subject: [PATCH 09/39] unlimited restart --- grafana/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grafana/docker-compose.yml b/grafana/docker-compose.yml index c4b6b76c4..09fc6526d 100644 --- a/grafana/docker-compose.yml +++ b/grafana/docker-compose.yml @@ -1,7 +1,7 @@ services: grafana: image: grafana/grafana:12.3 - restart: on-failure + restart: unless-stopped ports: - "3000:3000" environment: From c72714dbaff588e7bdb7f60c575cd1337f476480 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Thu, 12 Feb 2026 11:59:46 +0100 Subject: [PATCH 10/39] separated telegraf plugins --- packages/netifyd/Makefile | 4 +++- .../etc/netifyd/netify-sink-log-telegraf.json | 10 ++++++++++ .../files/etc/netifyd/netify-sink-log.json | 5 ----- .../plugins.d/10-netify-proc-aggregator.conf | 5 ----- .../etc/netifyd/plugins.d/10-netify-telegraf.conf | 15 +++++++++++++++ 5 files changed, 28 insertions(+), 11 deletions(-) create mode 100644 packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json create mode 100644 packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf diff --git a/packages/netifyd/Makefile b/packages/netifyd/Makefile index 92e02054e..e7995fd3b 100644 --- a/packages/netifyd/Makefile +++ b/packages/netifyd/Makefile @@ -220,7 +220,6 @@ define Package/netifyd/install $(LN) /usr/lib/libnetify-plm.so.1.0.0 $(1)/usr/lib/libnetify-plm.so.1 # netify-proc-aggregator $(INSTALL_DATA) ./files/etc/netifyd/netify-proc-aggregator.json $(1)/etc/netifyd/netify-proc-aggregator.json - $(INSTALL_DATA) ./files/etc/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json $(INSTALL_DATA) ./files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf $(1)/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf $(INSTALL_BIN) $(PKG_BUILD_DIR)/libnetify-proc-aggregator.so.0.0.0 $(1)/usr/lib/libnetify-proc-aggregator.so.0.0.0 $(LN) /usr/lib/libnetify-proc-aggregator.so.0.0.0 $(1)/usr/lib/libnetify-proc-aggregator.so @@ -267,6 +266,9 @@ define Package/netifyd/install $(INSTALL_BIN) $(PKG_BUILD_DIR)/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so.0.0.0 $(LN) /usr/lib/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so $(LN) /usr/lib/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so.0 + # telegraf-flows + $(INSTALL_DATA) ./files/etc/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json + $(INSTALL_DATA) ./files/etc/netifyd/plugins.d/10-netify-telegraf.conf $(1)/etc/netifyd/plugins.d/10-netify-telegraf.conf endef $(eval $(call BuildPackage,netifyd)) diff --git a/packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json b/packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json new file mode 100644 index 000000000..fb661e42b --- /dev/null +++ b/packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json @@ -0,0 +1,10 @@ +{ + "overwrite": false, + "channels": { + "telegraf": { + "log_path": "/var/run/netifyd", + "log_name": "telegraf", + "overwrite": true + } + } +} diff --git a/packages/netifyd/files/etc/netifyd/netify-sink-log.json b/packages/netifyd/files/etc/netifyd/netify-sink-log.json index 878038c96..1bd2fdd58 100644 --- a/packages/netifyd/files/etc/netifyd/netify-sink-log.json +++ b/packages/netifyd/files/etc/netifyd/netify-sink-log.json @@ -5,11 +5,6 @@ "log_path": "/var/run/netifyd", "log_name": "aggregator-stats", "overwrite": true - }, - "telegraf": { - "log_path": "/var/run/netifyd", - "log_name": "telegraf", - "overwrite": true } } } diff --git a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf index 22eaa57d3..783ec2de8 100644 --- a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf +++ b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-proc-aggregator.conf @@ -7,9 +7,4 @@ enable = yes plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 conf_filename = ${path_state_persistent}/netify-proc-aggregator.json -[proc-aggregator-telegraf] -enable = yes -plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 -conf_filename = ${path_state_persistent}/netify-proc-aggregator-telegraf.json - # vim: set ft=dosini : diff --git a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf new file mode 100644 index 000000000..81495d07c --- /dev/null +++ b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf @@ -0,0 +1,15 @@ +# Netify Aggregator Processor Plugin Loader +# +############################################################################## + +[proc-aggregator-telegraf] +enable = yes +plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-proc-aggregator-telegraf.json + +[sink-log-telegraf] +enable = yes +plugin_library = ${path_plugin_libdir}/libnetify-sink-log.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-sink-log-telegraf.json + +# vim: set ft=dosini : From fb514ff9bca2a4907772e23032a5401055535d92 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Thu, 12 Feb 2026 12:40:25 +0100 Subject: [PATCH 11/39] expanded inputs --- packages/telegraf/Makefile | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 0bbe2381f..9651b8b5f 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -28,7 +28,25 @@ GO_BUILD_PKG:=github.com/influxdata/telegraf/cmd/$(PKG_NAME) GO_PKG_LDFLAGS_X:=github.com/influxdata/telegraf/internal.Version=$(PKG_VERSION) GO_PKG_TAGS:= \ custom \ + inputs.bond \ + inputs.cpu \ + inputs.disk \ + inputs.diskio \ + inputs.ethtool \ inputs.file \ + inputs.iptables \ + inputs.kernel \ + inputs.kernel_vmstat \ + inputs.linux_cpu \ + inputs.linux_sysctl_fs \ + inputs.mem \ + inputs.net \ + inputs.netstat \ + inputs.nftables \ + inputs.nstat \ + inputs.processes \ + inputs.sensors \ + inputs.system \ outputs.influxdb \ parsers.json_v2 From 4ba8b61b72044eacb3350af44fb96da313cac79d Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 13 Feb 2026 12:30:28 +0100 Subject: [PATCH 12/39] refactor: telegraf should configure netifyd --- packages/netifyd/Makefile | 3 --- packages/telegraf/Makefile | 10 +++++++++- .../netifyd/netify-proc-aggregator-telegraf.json | 0 .../files}/netifyd/netify-sink-log-telegraf.json | 0 .../files}/netifyd/plugins.d/10-netify-telegraf.conf | 0 5 files changed, 9 insertions(+), 4 deletions(-) rename packages/{netifyd/files/etc => telegraf/files}/netifyd/netify-proc-aggregator-telegraf.json (100%) rename packages/{netifyd/files/etc => telegraf/files}/netifyd/netify-sink-log-telegraf.json (100%) rename packages/{netifyd/files/etc => telegraf/files}/netifyd/plugins.d/10-netify-telegraf.conf (100%) diff --git a/packages/netifyd/Makefile b/packages/netifyd/Makefile index e7995fd3b..46574ae64 100644 --- a/packages/netifyd/Makefile +++ b/packages/netifyd/Makefile @@ -266,9 +266,6 @@ define Package/netifyd/install $(INSTALL_BIN) $(PKG_BUILD_DIR)/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so.0.0.0 $(LN) /usr/lib/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so $(LN) /usr/lib/libnetify-sink-sqlite.so.0.0.0 $(1)/usr/lib/libnetify-sink-sqlite.so.0 - # telegraf-flows - $(INSTALL_DATA) ./files/etc/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json - $(INSTALL_DATA) ./files/etc/netifyd/plugins.d/10-netify-telegraf.conf $(1)/etc/netifyd/plugins.d/10-netify-telegraf.conf endef $(eval $(call BuildPackage,netifyd)) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 9651b8b5f..298148604 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -58,7 +58,10 @@ define Package/telegraf CATEGORY:=NethServer TITLE:=Telegraf URL:=https://github.com/influxdata/telegraf - DEPENDS:=$(GO_ARCH_DEPENDS) + DEPENDS:= \ + $(GO_ARCH_DEPENDS) \ + +victoria-metrics + +netifyd endef define Package/telegraf/description @@ -76,6 +79,11 @@ define Package/telegraf/install $(INSTALL_BIN) ./files/telegraf.initd $(1)/etc/init.d/telegraf $(INSTALL_DIR) $(1)/etc/telegraf $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf/telegraf.conf + $(INSTALL_DIR) $(1)/etc/netifyd + $(INSTALL_DATA) ./files/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json + $(INSTALL_DATA) ./files/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json + $(INSTALL_DIR) $(1)/etc/netifyd/plugins.d + $(INSTALL_DATA) ./files/netifyd/plugins.d/10-netify-telegraf.conf $(1)/etc/netifyd/plugins.d/10-netify-telegraf.conf endef define Package/telegraf/postinst diff --git a/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json similarity index 100% rename from packages/netifyd/files/etc/netifyd/netify-proc-aggregator-telegraf.json rename to packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json diff --git a/packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json b/packages/telegraf/files/netifyd/netify-sink-log-telegraf.json similarity index 100% rename from packages/netifyd/files/etc/netifyd/netify-sink-log-telegraf.json rename to packages/telegraf/files/netifyd/netify-sink-log-telegraf.json diff --git a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf b/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf similarity index 100% rename from packages/netifyd/files/etc/netifyd/plugins.d/10-netify-telegraf.conf rename to packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf From 7b2c75e9b2f896cf8209d78503ef0d74a892b969 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 18 Feb 2026 08:53:55 +0100 Subject: [PATCH 13/39] fixed build issue --- packages/telegraf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 298148604..7a3df450c 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -60,7 +60,7 @@ define Package/telegraf URL:=https://github.com/influxdata/telegraf DEPENDS:= \ $(GO_ARCH_DEPENDS) \ - +victoria-metrics + +victoria-metrics \ +netifyd endef From 175593b7556a3021d4cc80440f265976e6cedc2f Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 27 Feb 2026 12:21:12 +0100 Subject: [PATCH 14/39] added logs --- config/monitoring.conf | 1 + packages/telegraf/Makefile | 1 + packages/telegraf/files/telegraf.conf | 481 +++++++++++++++++- packages/victoria-logs/Makefile | 73 +++ packages/victoria-logs/files/25_victoria-logs | 14 + .../files/rsyslog-victoria-logs.conf | 18 + .../victoria-logs/files/victoria-logs.conf | 3 + .../victoria-logs/files/victoria-logs.initd | 41 ++ 8 files changed, 628 insertions(+), 4 deletions(-) create mode 100644 packages/victoria-logs/Makefile create mode 100644 packages/victoria-logs/files/25_victoria-logs create mode 100644 packages/victoria-logs/files/rsyslog-victoria-logs.conf create mode 100644 packages/victoria-logs/files/victoria-logs.conf create mode 100644 packages/victoria-logs/files/victoria-logs.initd diff --git a/config/monitoring.conf b/config/monitoring.conf index 5994a2760..3ff10c3b6 100644 --- a/config/monitoring.conf +++ b/config/monitoring.conf @@ -1,2 +1,3 @@ CONFIG_PACKAGE_victoria-metrics=y +CONFIG_PACKAGE_victoria-logs=y CONFIG_PACKAGE_telegraf=y diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 7a3df450c..cd7d19a1e 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -60,6 +60,7 @@ define Package/telegraf URL:=https://github.com/influxdata/telegraf DEPENDS:= \ $(GO_ARCH_DEPENDS) \ + +lm-sensors \ +victoria-metrics \ +netifyd endef diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf index cf409ecd2..79dc939bc 100644 --- a/packages/telegraf/files/telegraf.conf +++ b/packages/telegraf/files/telegraf.conf @@ -1,34 +1,410 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + + +# Global tags can be specified here in key="value" format. [global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" +# Configuration for telegraf agent [agent] - interval = "15s" + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - flush_interval = "15s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log format controls the way messages are logged and can be one of "text", + ## "structured" or, on Windows, "eventlog". + # logformat = "text" + + ## Message key for structured logs, to override the default of "msg". + ## Ignored if `logformat` is not "structured". + # structured_log_message_key = "message" + + ## Name of the file to be logged to or stderr if unset or empty. This + ## setting is ignored for the "eventlog" format. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + # hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = true + ## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" + + ## Name of the file to load the state of plugins from and store the state to. + ## If uncommented and not empty, this file will be used to save the state of + ## stateful plugins on termination of Telegraf. If the file exists on start, + ## the state in the file will be restored for the plugins. + # statefile = "" + + ## Flag to skip running processors after aggregators + ## By default, processors are run a second time after aggregators. Changing + ## this setting to true will skip the second run of processors. + # skip_processors_after_aggregators = false + + ############################################################################### # OUTPUT PLUGINS # ############################################################################### + [[outputs.influxdb]] + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] urls = ["http://127.0.0.1:8428"] + + ## Local address to bind when connecting to the server + ## If empty or not set, the local address is automatically chosen. + # local_address = "" + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" database = "netifyd" - skip_database_creation = true - exclude_retention_policy_tag = true + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. content_encoding = "gzip" + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false + + ## When true, Telegraf will omit the timestamp on data to allow InfluxDB + ## to set the timestamp of the data during ingestion. This is generally NOT + ## what you want as it can lead to data points captured at different times + ## getting omitted due to similar data. + # influx_omit_timestamp = false + + ############################################################################### # INPUT PLUGINS # ############################################################################### +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + ## NOTE: The resulting 'time_active' field INCLUDES 'iowait'! + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## Devices to collect stats for + ## Wildcards are supported except for disk synonyms like '/dev/disk/by-id'. + ## ex. devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] + # devices = ["*"] + + ## Skip gathering of the disk's serial numbers. + # skip_serial_number = true + + ## Device metadata tags to add on systems supporting it (Linux only) + ## Use 'udevadm info -q property -n ' to get a list of properties. + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +# Plugin to collect various Linux kernel statistics. +# This plugin ONLY supports Linux +[[inputs.kernel]] + ## Additional gather options + ## Possible options include: + ## * ksm - kernel same-page merging + ## * psi - pressure stall information + # collect = [] + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +# This plugin ONLY supports non-Windows +[[inputs.processes]] + ## Use sudo to run ps command on *BSD systems. Linux systems will read + ## /proc, so this does not apply there. + # use_sudo = false + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + + +# Collect bond interface status, slaves statuses and failures count +[[inputs.bond]] + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" + + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + + ## By default, telegraf gather stats for all bond interfaces + ## Setting interfaces will restrict the stats to the specified + ## bond interfaces. + # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false + + +# Returns ethtool statistics for given interfaces +# This plugin ONLY supports Linux +[[inputs.ethtool]] + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] + + ## Plugin behavior for downed interfaces + ## Available choices: + ## - expose: collect & report metrics for down interfaces + ## - skip: ignore interfaces that are marked down + # down_interfaces = "expose" + + ## Reading statistics from interfaces in additional namespaces is also + ## supported, so long as the namespaces are named (have a symlink in + ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN + ## permission. + ## By default, only the current namespace will be used. For additional + ## namespace support, at least one of `namespace_include` and + ## `namespace_exclude` must be provided. + ## To include all namespaces, set `namespace_include` to `["*"]`. + ## The initial namespace (if anonymous) can be specified with the empty + ## string (""). + + ## List of namespaces to pull metrics for + # namespace_include = [] + + ## List of namespace to ignore when pulling metrics. + # namespace_exclude = [] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# Parse a complete file each interval [[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. files = ["/var/run/netifyd/telegraf.json"] + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json_v2" + + ## Please use caution when using the following options: when file name + ## variation is high, this can increase the cardinality significantly. Read + ## more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + + ## Name of tag to store the name of the file. Disabled if not set. + # file_tag = "" + + ## Name of tag to store the absolute path and name of the file. Disabled if + ## not set. + # file_path_tag = "" [[inputs.file.json_v2]] measurement_name = "netifyd" timestamp_path = "log_time_end" @@ -37,3 +413,100 @@ path = "stats" tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] excluded_keys = ["digests"] + + +# Gather packets and bytes throughput from iptables +# This plugin ONLY supports Linux +[[inputs.iptables]] + ## iptables require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run iptables. + ## Users must configure sudo to allow telegraf user to run iptables with + ## no password. + ## iptables can be restricted to only list command "iptables -nvL". + # use_sudo = false + + ## Setting 'use_lock' to true runs iptables with the "-w" option. + ## Adjust your sudo settings appropriately if using this option + ## ("iptables -w 5 -nvl") + # use_lock = false + + ## Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" + ## defines the table to monitor: + table = "filter" + + ## defines the chains to monitor. + ## NOTE: iptables rules without a comment will not be monitored. + ## Read the plugin documentation for more information. + chains = [ "INPUT" ] + + +# Get kernel statistics from /proc/vmstat +# This plugin ONLY supports Linux +[[inputs.kernel_vmstat]] + # no configuration + + +# Provides Linux CPU metrics +# This plugin ONLY supports Linux +[[inputs.linux_cpu]] + ## Path for sysfs filesystem. + ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + ## Defaults: + # host_sys = "/sys" + + ## CPU metrics collected by the plugin. + ## Supported options: + ## "cpufreq", "thermal" + ## Defaults: + # metrics = ["cpufreq"] + + +# Provides Linux sysctl fs metrics +[[inputs.linux_sysctl_fs]] + # no configuration + + +# Gather metrics about network interfaces +[[inputs.net]] + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. When specifying an interface, glob-style + ## patterns are also supported. + # interfaces = ["eth*", "enp0s[0-1]", "lo"] + + ## On linux systems telegraf also collects protocol stats. + ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. + ## + ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! + ## Please set this to `true` and use the 'inputs.nstat' + ## plugin instead. + # ignore_protocol_stats = false + + +# Read TCP metrics such as established, time wait and sockets counts. +[[inputs.netstat]] + # no configuration + + +# Collect kernel snmp counters and network interface statistics +[[inputs.nstat]] + ## file paths for proc files. If empty default paths will be used: + ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 + ## These can also be overridden with env variables, see README. + proc_net_netstat = "/proc/net/netstat" + proc_net_snmp = "/proc/net/snmp" + proc_net_snmp6 = "/proc/net/snmp6" + ## dump metrics with 0 values too + dump_zeros = true + + +# Monitor sensors, requires lm-sensors package +# This plugin ONLY supports Linux +[[inputs.sensors]] + ## Remove numbers from field names. + ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. + # remove_numbers = true + + ## Timeout is the maximum amount of time that the sensors command can run. + # timeout = "5s" diff --git a/packages/victoria-logs/Makefile b/packages/victoria-logs/Makefile new file mode 100644 index 000000000..7565b307f --- /dev/null +++ b/packages/victoria-logs/Makefile @@ -0,0 +1,73 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:=victoria-logs +# renovate: datasource=github-tags depName=VictoriaMetrics/VictoriaLogs +PKG_VERSION:=1.14.0-victorialogs +PKG_RELEASE:=1 + +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://codeload.github.com/VictoriaMetrics/VictoriaLogs/tar.gz/v$(PKG_VERSION)? +PKG_SOURCE_SUBDIR:=VictoriaLogs-$(PKG_VERSION) +PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR) + +PKG_HASH:=skip +PKG_MAINTAINER:=Tommaso Bailetti +PKG_LICENSE:=Apache-2.0 + +PKG_BUILD_DEPENDS:=golang/host +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_FLAGS:=no-mips16 + +GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs +GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs +GO_PKG_GCFLAGS:= \ + -trimpath \ + -buildvcs=false +GO_PKG_LDFLAGS:= \ + -extldflags \ + -static +GO_PKG_LDFLAGS_X:=github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo.Version=$(PKG_NAME)-v$(PKG_VERSION) +GO_PKG_TAGS:= \ + netgo \ + osusergo \ + musl + +include $(INCLUDE_DIR)/package.mk +include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk + +define Package/victoria-logs + SECTION:=base + CATEGORY:=NethServer + TITLE:=Victoria Logs + URL:=https://github.com/VictoriaMetrics/VictoriaLogs + DEPENDS:=$(GO_ARCH_DEPENDS) +rsyslog +endef + +define Package/victoria-logs/description + VictoriaLogs — fast and easy-to-use database for logs. +endef + +define Package/victoria-logs/conffiles +/etc/config/victoria-logs +endef + +define Package/victoria-logs/install + $(call GoPackage/Package/Install/Bin,$(1)) + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/victoria-logs.initd $(1)/etc/init.d/victoria-logs + $(INSTALL_DIR) $(1)/etc/config + $(INSTALL_DATA) ./files/victoria-logs.conf $(1)/etc/config/victoria-logs + $(INSTALL_DIR) $(1)/etc/rsyslog.d + $(INSTALL_CONF) ./files/rsyslog-victoria-logs.conf $(1)/etc/rsyslog.d/victoria-logs.conf + $(INSTALL_DIR) $(1)/etc/uci-defaults + $(INSTALL_BIN) ./files/25_victoria-logs $(1)/etc/uci-defaults/25_victoria-logs +endef + +define Package/victoria-logs/postinst +#!/bin/sh +[ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/victoria-logs restart +exit 0 +endef + +$(eval $(call GoBinPackage,victoria-logs)) +$(eval $(call BuildPackage,victoria-logs)) diff --git a/packages/victoria-logs/files/25_victoria-logs b/packages/victoria-logs/files/25_victoria-logs new file mode 100644 index 000000000..079e1a4eb --- /dev/null +++ b/packages/victoria-logs/files/25_victoria-logs @@ -0,0 +1,14 @@ +#!/bin/sh + +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +RSYSLOG_CONF="/etc/rsyslog.d/victoria-logs.conf" + +# Register the victoria-logs rsyslog drop-in in rsyslog UCI includes +if ! uci -q get rsyslog.syslog.includes | grep -qF "${RSYSLOG_CONF}"; then + uci add_list rsyslog.syslog.includes="${RSYSLOG_CONF}" + uci commit rsyslog +fi diff --git a/packages/victoria-logs/files/rsyslog-victoria-logs.conf b/packages/victoria-logs/files/rsyslog-victoria-logs.conf new file mode 100644 index 000000000..f10237667 --- /dev/null +++ b/packages/victoria-logs/files/rsyslog-victoria-logs.conf @@ -0,0 +1,18 @@ +# Rsyslog configuration for VictoriaLogs + +ruleset(name="victoria-logs") { + *.info;mail.none;authpriv.none;cron.none action( + type="omfwd" + target="127.0.0.1" + port="5514" + protocol="tcp" + TCP_Framing="octet-counted" + Template="RSYSLOG_SyslogProtocol23Format" + + action.resumeRetryCount="-1" + queue.type="linkedList" + queue.size="10000" + ) +} + +*.info;mail.none;authpriv.none;cron.none call victoria-logs diff --git a/packages/victoria-logs/files/victoria-logs.conf b/packages/victoria-logs/files/victoria-logs.conf new file mode 100644 index 000000000..1bea29047 --- /dev/null +++ b/packages/victoria-logs/files/victoria-logs.conf @@ -0,0 +1,3 @@ +config victorialogs 'main' + option storage_path '/var/lib/victoria-logs' + option max_disk_usage '50MB' diff --git a/packages/victoria-logs/files/victoria-logs.initd b/packages/victoria-logs/files/victoria-logs.initd new file mode 100644 index 000000000..ba7ba2413 --- /dev/null +++ b/packages/victoria-logs/files/victoria-logs.initd @@ -0,0 +1,41 @@ +#!/bin/sh /etc/rc.common + +# +# Copyright (C) 2023 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +# shellcheck disable=SC3043 + +START=99 +USE_PROCD=1 + +PROG="/usr/bin/victoria-logs" + +start_service() { + config_load victoria-logs + local storage_path max_disk_usage + config_get storage_path main storage_path /var/lib/victoria-logs + config_get max_disk_usage main max_disk_usage 50MB + + procd_open_instance + procd_set_param stdout 1 + procd_set_param stderr 1 + procd_set_param respawn 3600 5 0 + procd_set_param command $PROG + procd_append_param command -storageDataPath="$storage_path" + procd_append_param command -retention.maxDiskSpaceUsageBytes="$max_disk_usage" + procd_append_param command -syslog.listenAddr.tcp=127.0.0.1:5514 + procd_close_instance +} + +service_triggers() +{ + procd_add_reload_trigger victoria-logs +} + +reload_service() +{ + stop + start +} From 4f063e4ad40bcb97806f253aa9d3c6a3eed75371 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 4 Mar 2026 09:29:33 +0100 Subject: [PATCH 15/39] uploading all logs --- packages/victoria-logs/files/rsyslog-victoria-logs.conf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/victoria-logs/files/rsyslog-victoria-logs.conf b/packages/victoria-logs/files/rsyslog-victoria-logs.conf index f10237667..c2a66de3b 100644 --- a/packages/victoria-logs/files/rsyslog-victoria-logs.conf +++ b/packages/victoria-logs/files/rsyslog-victoria-logs.conf @@ -1,7 +1,7 @@ # Rsyslog configuration for VictoriaLogs ruleset(name="victoria-logs") { - *.info;mail.none;authpriv.none;cron.none action( + *.* action( type="omfwd" target="127.0.0.1" port="5514" @@ -15,4 +15,4 @@ ruleset(name="victoria-logs") { ) } -*.info;mail.none;authpriv.none;cron.none call victoria-logs +*.* call victoria-logs From 3095eab4c0bdaa454f56bc623b24cfaab7af43dc Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Mon, 9 Mar 2026 08:47:22 +0100 Subject: [PATCH 16/39] fixed some config issues --- .../telegraf/files/netifyd/netify-proc-aggregator-telegraf.json | 2 +- packages/telegraf/files/telegraf.conf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json index 28db2387c..c561b4d48 100644 --- a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json +++ b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json @@ -7,7 +7,7 @@ "nested_mode": false, "privacy_mode": false, "sinks": { - "sink-log": { + "sink-log-telegraf": { "telegraf": { } } } diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf index 79dc939bc..6ba7fc1a2 100644 --- a/packages/telegraf/files/telegraf.conf +++ b/packages/telegraf/files/telegraf.conf @@ -333,7 +333,7 @@ # Returns ethtool statistics for given interfaces # This plugin ONLY supports Linux -[[inputs.ethtool]] +#[[inputs.ethtool]] ## List of interfaces to pull metrics for # interface_include = ["eth0"] From a119fa435234465f5c15aac8527b2f434a143598 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 20 Mar 2026 09:16:41 +0100 Subject: [PATCH 17/39] added ulogd --- config/ulogd.conf | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 config/ulogd.conf diff --git a/config/ulogd.conf b/config/ulogd.conf new file mode 100644 index 000000000..1b0258207 --- /dev/null +++ b/config/ulogd.conf @@ -0,0 +1,13 @@ +CONFIG_PACKAGE_ulogd=y +CONFIG_PACKAGE_ulogd-mod-dbi=y +CONFIG_PACKAGE_ulogd-mod-extra=y +CONFIG_PACKAGE_ulogd-mod-json=y +CONFIG_PACKAGE_ulogd-mod-mysql=y +CONFIG_PACKAGE_ulogd-mod-nfacct=y +CONFIG_PACKAGE_ulogd-mod-nfct=y +CONFIG_PACKAGE_ulogd-mod-nflog=y +CONFIG_PACKAGE_ulogd-mod-pcap=y +CONFIG_PACKAGE_ulogd-mod-pgsql=y +CONFIG_PACKAGE_ulogd-mod-sqlite=y +CONFIG_PACKAGE_ulogd-mod-syslog=y +CONFIG_PACKAGE_ulogd-mod-xml=y From 3476febb4666e1e06b19dc358b671b4c73f6baaf Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 20 Mar 2026 09:59:10 +0100 Subject: [PATCH 18/39] separated telegraf concerns --- packages/telegraf/Makefile | 9 +- packages/telegraf/files/telegraf.conf | 89 +++++++++---------- .../files/telegraf.conf.d/netifyd.conf | 45 ++++++++++ packages/telegraf/files/telegraf.initd | 2 +- 4 files changed, 91 insertions(+), 54 deletions(-) create mode 100644 packages/telegraf/files/telegraf.conf.d/netifyd.conf diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index cd7d19a1e..d7de7ab81 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -69,17 +69,14 @@ define Package/telegraf/description Telegraf is an agent for collecting, processing, aggregating, and writing metrics. endef -define Package/telegraf/conffiles -/etc/telegraf/telegraf.d/ -endef - - define Package/telegraf/install $(call GoPackage/Package/Install/Bin,$(1)) $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/telegraf.initd $(1)/etc/init.d/telegraf $(INSTALL_DIR) $(1)/etc/telegraf - $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf/telegraf.conf + $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf + $(INSTALL_DIR) $(1)/etc/telegraf.conf.d + $(INSTALL_DATA) ./files/telegraf.conf.d/netifyd.conf $(1)/etc/telegraf.conf.d/netifyd.conf $(INSTALL_DIR) $(1)/etc/netifyd $(INSTALL_DATA) ./files/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json $(INSTALL_DATA) ./files/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf index 6ba7fc1a2..151e90832 100644 --- a/packages/telegraf/files/telegraf.conf +++ b/packages/telegraf/files/telegraf.conf @@ -148,15 +148,15 @@ ## The target database for metrics; will be created as needed. ## For UDP url endpoint database needs to be configured on server side. - # database = "telegraf" - database = "netifyd" + ## This is used as a fallback when database_tag is not set on a metric. + database = "nethsecurity" ## The value of this tag will be used to determine the database. If this ## tag is not set the 'database' option is used as the default. - # database_tag = "" + database_tag = "influxdb_db" ## If true, the 'database_tag' will not be included in the written metric. - # exclude_database_tag = false + exclude_database_tag = true ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the @@ -219,13 +219,17 @@ ## to set the timestamp of the data during ingestion. This is generally NOT ## what you want as it can lead to data points captured at different times ## getting omitted due to similar data. - # influx_omit_timestamp = false + # influx_omit_timestamp = false ############################################################################### # INPUT PLUGINS # ############################################################################### +# OS and system metrics collection +# Includes CPU, memory, disk, network, and kernel statistics +# All metrics from this section are tagged with influxdb_db=os-metrics + # Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not @@ -239,6 +243,8 @@ report_active = false ## If true and the info is available then add core_id and physical_id tags core_tags = false + [inputs.cpu.tags] + influxdb_db = "os-metrics" # Read metrics about disk usage by mount point @@ -254,6 +260,8 @@ ## The 'mount' command reports options of all mounts in parathesis. ## Bind mounts can be ignored with the special 'bind' option. # ignore_mount_opts = [] + [inputs.disk.tags] + influxdb_db = "os-metrics" # Read metrics about disk IO by device @@ -281,6 +289,8 @@ ## The typical use case is for LVM volumes, to get the VG/LV name instead of ## the near-meaningless DM-0 name. # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + [inputs.diskio.tags] + influxdb_db = "os-metrics" # Plugin to collect various Linux kernel statistics. @@ -291,11 +301,15 @@ ## * ksm - kernel same-page merging ## * psi - pressure stall information # collect = [] + [inputs.kernel.tags] + influxdb_db = "os-metrics" # Read metrics about memory usage [[inputs.mem]] # no configuration + [inputs.mem.tags] + influxdb_db = "os-metrics" # Get the number of processes and group them by status @@ -304,11 +318,15 @@ ## Use sudo to run ps command on *BSD systems. Linux systems will read ## /proc, so this does not apply there. # use_sudo = false + [inputs.processes.tags] + influxdb_db = "os-metrics" # Read metrics about system load & uptime [[inputs.system]] # no configuration + [inputs.system.tags] + influxdb_db = "os-metrics" # Collect bond interface status, slaves statuses and failures count @@ -329,6 +347,8 @@ ## Tries to collect additional bond details from /sys/class/net/{bond} ## currently only useful for LACP (mode 4) bonds # collect_sys_details = false + [inputs.bond.tags] + influxdb_db = "os-metrics" # Returns ethtool statistics for given interfaces @@ -373,48 +393,6 @@ # normalize_keys = ["snakecase", "trim", "lower", "underscore"] -# Parse a complete file each interval -[[inputs.file]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = ["/var/run/netifyd/telegraf.json"] - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json_v2" - - ## Please use caution when using the following options: when file name - ## variation is high, this can increase the cardinality significantly. Read - ## more about cardinality here: - ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality - - ## Name of tag to store the name of the file. Disabled if not set. - # file_tag = "" - - ## Name of tag to store the absolute path and name of the file. Disabled if - ## not set. - # file_path_tag = "" - [[inputs.file.json_v2]] - measurement_name = "netifyd" - timestamp_path = "log_time_end" - timestamp_format = "unix" - [[inputs.file.json_v2.object]] - path = "stats" - tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] - excluded_keys = ["digests"] - - # Gather packets and bytes throughput from iptables # This plugin ONLY supports Linux [[inputs.iptables]] @@ -439,12 +417,16 @@ ## NOTE: iptables rules without a comment will not be monitored. ## Read the plugin documentation for more information. chains = [ "INPUT" ] + [inputs.iptables.tags] + influxdb_db = "os-metrics" # Get kernel statistics from /proc/vmstat # This plugin ONLY supports Linux [[inputs.kernel_vmstat]] # no configuration + [inputs.kernel_vmstat.tags] + influxdb_db = "os-metrics" # Provides Linux CPU metrics @@ -460,11 +442,15 @@ ## "cpufreq", "thermal" ## Defaults: # metrics = ["cpufreq"] + [inputs.linux_cpu.tags] + influxdb_db = "os-metrics" # Provides Linux sysctl fs metrics [[inputs.linux_sysctl_fs]] # no configuration + [inputs.linux_sysctl_fs.tags] + influxdb_db = "os-metrics" # Gather metrics about network interfaces @@ -482,11 +468,15 @@ ## Please set this to `true` and use the 'inputs.nstat' ## plugin instead. # ignore_protocol_stats = false + [inputs.net.tags] + influxdb_db = "os-metrics" # Read TCP metrics such as established, time wait and sockets counts. [[inputs.netstat]] # no configuration + [inputs.netstat.tags] + influxdb_db = "os-metrics" # Collect kernel snmp counters and network interface statistics @@ -499,6 +489,8 @@ proc_net_snmp6 = "/proc/net/snmp6" ## dump metrics with 0 values too dump_zeros = true + [inputs.nstat.tags] + influxdb_db = "os-metrics" # Monitor sensors, requires lm-sensors package @@ -510,3 +502,6 @@ ## Timeout is the maximum amount of time that the sensors command can run. # timeout = "5s" + [inputs.sensors.tags] + influxdb_db = "os-metrics" + diff --git a/packages/telegraf/files/telegraf.conf.d/netifyd.conf b/packages/telegraf/files/telegraf.conf.d/netifyd.conf new file mode 100644 index 000000000..a163e23a4 --- /dev/null +++ b/packages/telegraf/files/telegraf.conf.d/netifyd.conf @@ -0,0 +1,45 @@ +# Parse netifyd metrics from JSON file +[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/var/run/netifyd/telegraf.json"] + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json_v2" + + ## Please use caution when using the following options: when file name + ## variation is high, this can increase the cardinality significantly. Read + ## more about cardinality here: + ## https://docs.influxdb.com/influxdb/cloud/reference/glossary/#series-cardinality + + ## Name of tag to store the name of the file. Disabled if not set. + # file_tag = "" + + ## Name of tag to store the absolute path and name of the file. Disabled if + ## not set. + # file_path_tag = "" + + ## Global tags to add to all metrics from this input + [inputs.file.tags] + influxdb_db = "netifyd" + + [[inputs.file.json_v2]] + measurement_name = "netifyd" + timestamp_path = "log_time_end" + timestamp_format = "unix" + [[inputs.file.json_v2.object]] + path = "stats" + tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] + excluded_keys = ["digests"] diff --git a/packages/telegraf/files/telegraf.initd b/packages/telegraf/files/telegraf.initd index 3be4216b0..c84e395dd 100644 --- a/packages/telegraf/files/telegraf.initd +++ b/packages/telegraf/files/telegraf.initd @@ -18,7 +18,7 @@ start_service() { procd_set_param stderr 1 procd_set_param respawn 3600 5 0 procd_set_param command $PROG - procd_append_param command --watch-config notify + procd_append_param command --watch-config notify --config /etc/telegraf.conf --config-directory /etc/telegraf.conf.d procd_close_instance } From 50e78b14eb88b7273d635b09e23283c1750968e4 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 20 Mar 2026 15:03:03 +0100 Subject: [PATCH 19/39] added vlogscli and vmalert --- packages/victoria-logs/Makefile | 11 +++++++---- packages/victoria-metrics/Makefile | 7 ++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/packages/victoria-logs/Makefile b/packages/victoria-logs/Makefile index 7565b307f..152bbaefa 100644 --- a/packages/victoria-logs/Makefile +++ b/packages/victoria-logs/Makefile @@ -18,8 +18,9 @@ PKG_BUILD_DEPENDS:=golang/host PKG_BUILD_PARALLEL:=1 PKG_BUILD_FLAGS:=no-mips16 -GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs -GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs +GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app +GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs \ + github.com/VictoriaMetrics/VictoriaMetrics/app/vlogscli GO_PKG_GCFLAGS:= \ -trimpath \ -buildvcs=false @@ -52,7 +53,9 @@ define Package/victoria-logs/conffiles endef define Package/victoria-logs/install - $(call GoPackage/Package/Install/Bin,$(1)) + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(GO_BUILD_BIN_DIR)/victoria-logs $(1)/usr/bin/victoria-logs + $(INSTALL_BIN) $(GO_BUILD_BIN_DIR)/vlogscli $(1)/usr/bin/vlogscli $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/victoria-logs.initd $(1)/etc/init.d/victoria-logs $(INSTALL_DIR) $(1)/etc/config @@ -69,5 +72,5 @@ define Package/victoria-logs/postinst exit 0 endef -$(eval $(call GoBinPackage,victoria-logs)) +$(eval $(call GoPackage,victoria-logs)) $(eval $(call BuildPackage,victoria-logs)) diff --git a/packages/victoria-metrics/Makefile b/packages/victoria-metrics/Makefile index f41b238a7..f9c364f6e 100644 --- a/packages/victoria-metrics/Makefile +++ b/packages/victoria-metrics/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2025 Nethesis S.r.l. +# Copyright (C) 2026 Nethesis S.r.l. # SPDX-License-Identifier: GPL-2.0-only # @@ -23,8 +23,9 @@ PKG_BUILD_DEPENDS:=golang/host PKG_BUILD_PARALLEL:=1 PKG_BUILD_FLAGS:=no-mips16 -GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/$(PKG_NAME) -GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/$(PKG_NAME) +GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics +GO_PKG_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics \ + github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert GO_PKG_GCFLAGS:= \ -trimpath \ -buildvcs=false From 445f60c7dac2d0b822d21070aab432c3eb883158 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 25 Mar 2026 09:42:38 +0100 Subject: [PATCH 20/39] fixed build issue with victoria logs --- packages/victoria-logs/Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/victoria-logs/Makefile b/packages/victoria-logs/Makefile index 152bbaefa..bf8d83f70 100644 --- a/packages/victoria-logs/Makefile +++ b/packages/victoria-logs/Makefile @@ -18,8 +18,8 @@ PKG_BUILD_DEPENDS:=golang/host PKG_BUILD_PARALLEL:=1 PKG_BUILD_FLAGS:=no-mips16 -GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app -GO_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs \ +GO_PKG:=github.com/VictoriaMetrics/VictoriaMetrics +GO_PKG_BUILD_PKG:=github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-logs \ github.com/VictoriaMetrics/VictoriaMetrics/app/vlogscli GO_PKG_GCFLAGS:= \ -trimpath \ @@ -53,9 +53,7 @@ define Package/victoria-logs/conffiles endef define Package/victoria-logs/install - $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) $(GO_BUILD_BIN_DIR)/victoria-logs $(1)/usr/bin/victoria-logs - $(INSTALL_BIN) $(GO_BUILD_BIN_DIR)/vlogscli $(1)/usr/bin/vlogscli + $(call GoPackage/Package/Install/Bin,$(1)) $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/victoria-logs.initd $(1)/etc/init.d/victoria-logs $(INSTALL_DIR) $(1)/etc/config From d6be9749b974391c7c7c97ee6ecbf73d0c7fa01f Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 27 Mar 2026 10:26:36 +0100 Subject: [PATCH 21/39] chore: updated banip --- packages/banip/Makefile | 11 +- packages/banip/files/95-banip-housekeeping | 42 + packages/banip/files/README.md | 500 +--- packages/banip/files/banip-functions.sh | 2178 +++++++++++------ packages/banip/files/banip-service.sh | 111 +- packages/banip/files/banip.cgi | 4 +- packages/banip/files/banip.countries | 11 +- packages/banip/files/banip.feeds | 122 +- packages/banip/files/banip.init | 51 +- packages/banip/files/banip.tpl | 16 +- packages/ns-api/README.md | 7 +- packages/ns-api/files/ns.threatshield | 35 +- .../ns-threat_shield/files/banip-defaults | 7 +- .../files/banip.nethesis.feeds | 55 +- 14 files changed, 1754 insertions(+), 1396 deletions(-) create mode 100644 packages/banip/files/95-banip-housekeeping mode change 100644 => 100755 packages/banip/files/banip-functions.sh diff --git a/packages/banip/Makefile b/packages/banip/Makefile index f1dab45f9..fb7e45ff2 100644 --- a/packages/banip/Makefile +++ b/packages/banip/Makefile @@ -1,11 +1,11 @@ # banIP - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. include $(TOPDIR)/rules.mk PKG_NAME:=banip -PKG_VERSION:=1.0.1 +PKG_VERSION:=1.8.1 PKG_RELEASE:=3 PKG_LICENSE:=GPL-3.0-or-later PKG_MAINTAINER:=Dirk Brenken @@ -16,7 +16,7 @@ define Package/banip SECTION:=net CATEGORY:=Network TITLE:=banIP blocks IPs via named nftables Sets - DEPENDS:=+jshn +jsonfilter +firewall4 +ca-bundle +rpcd +rpcd-mod-rpcsys + DEPENDS:=+jshn +jsonfilter +firewall4 +gawk +ca-bundle +rpcd +rpcd-mod-rpcsys PKGARCH:=all endef @@ -51,7 +51,7 @@ define Package/banip/install $(INSTALL_BIN) ./files/banip.init $(1)/etc/init.d/banip $(INSTALL_DIR) $(1)/usr/lib - $(INSTALL_CONF) ./files/banip-functions.sh $(1)/usr/lib + $(INSTALL_BIN) ./files/banip-functions.sh $(1)/usr/lib $(INSTALL_DIR) $(1)/etc/config $(INSTALL_CONF) ./files/banip.conf $(1)/etc/config/banip @@ -66,6 +66,9 @@ define Package/banip/install $(INSTALL_DIR) $(1)/www/cgi-bin $(INSTALL_BIN) ./files/banip.cgi $(1)/www/cgi-bin/banip + + $(INSTALL_DIR) $(1)/etc/uci-defaults + $(INSTALL_BIN) ./files/95-banip-housekeeping $(1)/etc/uci-defaults endef $(eval $(call BuildPackage,banip)) diff --git a/packages/banip/files/95-banip-housekeeping b/packages/banip/files/95-banip-housekeeping new file mode 100644 index 000000000..9ef25b616 --- /dev/null +++ b/packages/banip/files/95-banip-housekeeping @@ -0,0 +1,42 @@ +#!/bin/sh +# Copyright (c) 2015-2026 Dirk Brenken (dev@brenken.org) +# This is free software, licensed under the GNU General Public License v3. + +# (s)hellcheck exceptions +# shellcheck disable=all + +export LC_ALL=C +export PATH="/usr/sbin:/usr/bin:/sbin:/bin" + +config="banip" +old_options="ban_loginput ban_logforwardwan ban_logforwardlan ban_blockinput ban_blockforwardwan ban_blockforwardlan ban_blocktype ban_blockpolicy" + +for option in ${old_options}; do + old_values="$(uci -q get "${config}.global.${option}" 2>/dev/null)" + for value in ${old_values}; do + case "${option}" in + "ban_loginput" | "ban_logforwardwan") + uci -q set "${config}".global.ban_loginbound="${value}" + ;; + "ban_logforwardlan") + uci -q set "${config}".global.ban_logoutbound="${value}" + ;; + "ban_blockpolicy") + if printf "%s" "${old_values}" | grep -qw "input\|forwardwan\|forwardlan"; then + break + else + continue 2 + fi + ;; + esac + done + uci -q delete "${config}.global.${option}" +done +[ -n "$(uci -q changes "${config}")" ] && uci -q commit "${config}" + +custom_feed="/etc/banip/banip.custom.feeds" +if grep -q '"rule_4"' "${custom_feed}" 2>/dev/null; then + mv -f "${custom_feed}" "${custom_feed}.backup.$(date "+%Y%m%d%H%M%S")" + : > "${custom_feed}" +fi +exit 0 diff --git a/packages/banip/files/README.md b/packages/banip/files/README.md index 2e92d2061..d3d5a496f 100644 --- a/packages/banip/files/README.md +++ b/packages/banip/files/README.md @@ -2,66 +2,61 @@ # banIP - ban incoming and outgoing IP addresses/subnets via Sets in nftables + ## Description -IP address blocking is commonly used to protect against brute force attacks, prevent disruptive or unauthorized address(es) from access or it can be used to restrict access to or from a particular geographic area — for example. Further more banIP scans the log file via logread and bans IPs that make too many password failures, e.g. via ssh. +IP address blocking is commonly used to protect against brute force attacks, prevent disruptive or unauthorized address(es) from access or it can be used to restrict access to or from a particular geographic area — for example. Further more banIP scans the log file via logread and bans IPs that make too many password failures, e.g. via ssh. + ## Main Features -* banIP supports the following fully pre-configured domain blocklist feeds (free for private usage, for commercial use please check their individual licenses). -**Please note:** By default every feed blocks packet traversal in all supported chains, the table columns "WAN-INP", "WAN-FWD" and "LAN-FWD" show for which chains the feeds are suitable in common scenarios: - * WAN-INP chain applies to packets from internet to your router - * WAN-FWD chain applies to packets from internet to other local devices (not your router) - * LAN-FWD chain applies to local packets going out to the internet (not your router) - For instance the first entry should be limited to the LAN forward chain - just set the 'LAN-Forward Chain' option under the 'Feed/Set Seetings' config tab accordingly. - -| Feed | Focus | WAN-INP | WAN-FWD | LAN-FWD | Port-Limit | Information | -| :------------------ | :----------------------------- | :-----: | :-----: | :-----: | :----------: | :----------------------------------------------------------- | -| adaway | adaway IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| adguard | adguard IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| adguardtrackers | adguardtracker IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| antipopads | antipopads IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| asn | ASN segments | x | x | x | | [Link](https://asn.ipinfo.app) | -| backscatterer | backscatterer IPs | x | x | | | [Link](https://www.uceprotect.net/en/index.php) | -| becyber | malicious attacker IPs | x | x | | | [Link](https://github.com/duggytuxy/malicious_ip_addresses) | -| binarydefense | binary defense banlist | x | x | | | [Link](https://iplists.firehol.org/?ipset=bds_atif) | -| bogon | bogon prefixes | x | x | x | | [Link](https://team-cymru.com) | -| bruteforceblock | bruteforceblocker IPs | x | x | | | [Link](https://danger.rulez.sk/index.php/bruteforceblocker/) | -| country | country blocks | x | x | | | [Link](https://www.ipdeny.com/ipblocks) | -| cinsscore | suspicious attacker IPs | x | x | | | [Link](https://cinsscore.com/#list) | -| debl | fail2ban IP blacklist | x | x | | | [Link](https://www.blocklist.de) | -| doh | public DoH-Provider | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/DoH-IP-blocklists) | -| drop | spamhaus drop compilation | x | x | | | [Link](https://www.spamhaus.org) | -| dshield | dshield IP blocklist | x | x | | | [Link](https://www.dshield.org) | -| etcompromised | ET compromised hosts | x | x | | | [Link](https://iplists.firehol.org/?ipset=et_compromised) | -| feodo | feodo tracker | x | x | | | [Link](https://feodotracker.abuse.ch) | -| firehol1 | firehol level 1 compilation | x | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level1) | -| firehol2 | firehol level 2 compilation | x | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level2) | -| firehol3 | firehol level 3 compilation | x | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level3) | -| firehol4 | firehol level 4 compilation | x | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level4) | -| greensnow | suspicious server IPs | x | x | | | [Link](https://greensnow.co) | -| hagezi | Threat IP blocklist | | | x | tcp: 80, 443 | [Link](https://github.com/hagezi/dns-blocklists) | -| ipblackhole | blackhole IPs | x | x | | | [Link](https://github.com/BlackHoleMonster/IP-BlackHole) | -| ipsum | malicious IPs | x | x | | | [Link](https://github.com/stamparm/ipsum) | -| ipthreat | hacker and botnet TPs | x | x | | | [Link](https://ipthreat.net) | -| myip | real-time IP blocklist | x | x | | | [Link](https://myip.ms) | -| oisdbig | OISD-big IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| oisdnsfw | OISD-nsfw IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| oisdsmall | OISD-small IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| pallebone | curated IP blocklist | x | x | | | [Link](https://github.com/pallebone/StrictBlockPAllebone) | -| proxy | open proxies | x | x | | | [Link](https://iplists.firehol.org/?ipset=proxylists) | -| ssbl | SSL botnet IPs | x | x | | | [Link](https://sslbl.abuse.ch) | -| stevenblack | stevenblack IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | -| threat | emerging threats | x | x | | | [Link](https://rules.emergingthreats.net) | -| threatview | malicious IPs | x | x | | | [Link](https://threatview.io) | -| tor | tor exit nodes | x | x | x | | [Link](https://www.dan.me.uk) | -| turris | turris sentinel blocklist | x | x | | | [Link](https://view.sentinel.turris.cz) | -| uceprotect1 | spam protection level 1 | x | x | | | [Link](https://www.uceprotect.net/en/index.php) | -| uceprotect2 | spam protection level 2 | x | x | | | [Link](https://www.uceprotect.net/en/index.php) | -| uceprotect3 | spam protection level 3 | x | x | | | [Link](https://www.uceprotect.net/en/index.php) | -| urlhaus | urlhaus IDS IPs | x | x | | | [Link](https://urlhaus.abuse.ch) | -| urlvir | malware related IPs | x | x | | | [Link](https://iplists.firehol.org/?ipset=urlvir) | -| webclient | malware related IPs | x | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_webclient) | -| voip | VoIP fraud blocklist | x | x | | | [Link](https://voipbl.org) | -| yoyo | yoyo IPs | | | x | tcp: 80, 443 | [Link](https://github.com/dibdot/banIP-IP-blocklists) | +* banIP supports the following fully pre-configured IP blocklist feeds (free for private usage, for commercial use please check their individual licenses). +**Please note:** By default, each feed blocks the packet flow in the chain shown in the table below. _Inbound_ combines the chains WAN-Input and WAN-Forward, _Outbound_ represents the LAN-FWD chain: + * WAN-INP chain applies to packets from internet to your router + * WAN-FWD chain applies to packets from internet to other local devices (not your router) + * LAN-FWD chain applies to local packets going out to the internet (not your router) + The listed standard assignments can be changed to your needs under the 'Feed/Set Settings' config tab. + +| Feed | Focus | Inbound | Outbound | Proto/Port | Information | +| :------------------ | :----------------------------- | :-----: | :------: | :---------------: | :----------------------------------------------------------- | +| asn | ASN segments | x | | | [Link](https://asn.ipinfo.app) | +| backscatterer | backscatterer IPs | x | | | [Link](https://www.uceprotect.net/en/index.php) | +| becyber | malicious attacker IPs | x | | | [Link](https://github.com/duggytuxy/malicious_ip_addresses) | +| binarydefense | binary defense banlist | x | | | [Link](https://iplists.firehol.org/?ipset=bds_atif) | +| bogon | bogon prefixes | x | | | [Link](https://team-cymru.com) | +| bruteforceblock | bruteforceblocker IPs | x | | | [Link](https://danger.rulez.sk/index.php/bruteforceblocker/) | +| country | country blocks | x | | | [Link](https://www.ipdeny.com/ipblocks) | +| cinsscore | suspicious attacker IPs | x | | | [Link](https://cinsscore.com/#list) | +| debl | fail2ban IP blacklist | x | | | [Link](https://www.blocklist.de) | +| dns | public DNS-Server | | x | tcp, udp: 53, 853 | [Link](https://public-dns.info) | +| doh | public DoH-Server | | x | tcp, udp: 80, 443 | [Link](https://github.com/dibdot/DoH-IP-blocklists) | +| drop | spamhaus drop compilation | x | | | [Link](https://www.spamhaus.org) | +| dshield | dshield IP blocklist | x | | | [Link](https://www.dshield.org) | +| etcompromised | ET compromised hosts | x | | | [Link](https://iplists.firehol.org/?ipset=et_compromised) | +| feodo | feodo tracker | x | | | [Link](https://feodotracker.abuse.ch) | +| firehol1 | firehol level 1 compilation | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level1) | +| firehol2 | firehol level 2 compilation | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level2) | +| firehol3 | firehol level 3 compilation | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level3) | +| firehol4 | firehol level 4 compilation | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_level4) | +| greensnow | suspicious server IPs | x | | | [Link](https://greensnow.co) | +| hagezi | Threat IP blocklist | | x | tcp, udp: 80, 443 | [Link](https://github.com/hagezi/dns-blocklists) | +| ipblackhole | blackhole IPs | x | | | [Link](https://github.com/BlackHoleMonster/IP-BlackHole) | +| ipexdbl | IPEX dynamic blocklists | x | | | [Link](https://github.com/ZEROF/ipextractor) | +| ipsum | malicious IPs | x | | | [Link](https://github.com/stamparm/ipsum) | +| ipthreat | hacker and botnet IPs | x | | | [Link](https://ipthreat.net) | +| myip | real-time IP blocklist | x | | | [Link](https://myip.ms) | +| proxy | open proxies | x | | | [Link](https://iplists.firehol.org/?ipset=proxylists) | +| threat | emerging threats | x | | | [Link](https://rules.emergingthreats.net) | +| threatview | malicious IPs | x | | | [Link](https://threatview.io) | +| tor | tor exit nodes | x | | | [Link](https://www.dan.me.uk) | +| turris | turris sentinel blocklist | x | | | [Link](https://view.sentinel.turris.cz) | +| uceprotect1 | spam protection level 1 | x | | | [Link](https://www.uceprotect.net/en/index.php) | +| uceprotect2 | spam protection level 2 | x | | | [Link](https://www.uceprotect.net/en/index.php) | +| uceprotect3 | spam protection level 3 | x | | | [Link](https://www.uceprotect.net/en/index.php) | +| urlhaus | urlhaus IDS IPs | x | | | [Link](https://urlhaus.abuse.ch) | +| urlvir | malware related IPs | x | | | [Link](https://iplists.firehol.org/?ipset=urlvir) | +| webclient | malware related IPs | x | | | [Link](https://iplists.firehol.org/?ipset=firehol_webclient) | +| voip | VoIP fraud blocklist | x | | | [Link](https://voipbl.org) | +| vpn | vpn IPs | x | | | [Link](https://github.com/X4BNet/lists_vpn) | +| vpndc | vpn datacenter IPs | x | | | [Link](https://github.com/X4BNet/lists_vpn) | * Zero-conf like automatic installation & setup, usually no manual changes needed * All Sets are handled in a separate nft table/namespace 'banIP' @@ -78,397 +73,36 @@ IP address blocking is commonly used to protect against brute force attacks, pre * Auto-add unsuccessful LuCI, nginx, Asterisk or ssh login attempts to the local blocklist * Auto-add entire subnets to the blocklist Set based on an additional RDAP request with the monitored suspicious IP * Fast feed processing as they are handled in parallel as background jobs (on capable multi-core hardware) -* Per feed it can be defined whether the wan-input chain, the wan-forward chain or the lan-forward chain should be blocked (default: all chains) +* Per feed it can be defined whether the inbound chain (wan-input, wan-forward) or the outbound chain (lan-forward) should be blocked * Automatic blocklist backup & restore, the backups will be used in case of download errors or during startup -* Automatically selects one of the following download utilities with ssl support: aria2c, curl, uclient-fetch or full wget +* Automatically selects one of the following download utilities with ssl support: curl, uclient-fetch or full wget * Provides HTTP ETag support to download only ressources that have been updated on the server side, to speed up banIP reloads and to save bandwith -* Supports an 'allowlist only' mode, this option skips all blocklists and restricts the internet access only to specific, explicitly allowed IP segments +* Supports an 'allowlist only' mode, this option restricts the internet access only to specific, explicitly allowed IP segments * Supports external allowlist URLs to reference additional IPv4/IPv6 feeds -* Optionally always allow certain protocols/destination ports in wan-input and wan-forward chains +* Optionally always allow certain protocols/destination ports in the inbound chain * Deduplicate IPs accross all Sets (single IPs only, no intervals) +* Implements BCP38 ingress filtering to prevent IP address spoofing * Provides comprehensive runtime information -* Provides a detailed Set report +* Provides a detailed Set report, incl. a map that shows the geolocation of your own uplink addresses (in green) and the location of potential attackers (in red) * Provides a Set search engine for certain IPs * Feed parsing by fast & flexible regex rulesets * Minimal status & error logging to syslog, enable debug logging to receive more output -* Procd based init system support (start/stop/restart/reload/status/report/search/survey/lookup) +* Procd based init system support (start/stop/restart/reload/status/report/search/content) * Procd network interface trigger support * Add new or edit existing banIP feeds on your own with the LuCI integrated custom feed editor -* Supports destination port & protocol limitations for external feeds (see the feed list above). To change the default assignments just use the feed editor +* Supports destination port & protocol limitations for external feeds (see the feed list above). To change the default assignments just use the custom feed editor * Supports allowing / blocking of certain VLAN forwards * Provides an option to transfer logging events on remote servers via cgi interface + ## Prerequisites -* **[OpenWrt](https://openwrt.org)**, latest stable release or a snapshot with nft/firewall 4 support -* A download utility with SSL support: 'aria2c', 'curl', full 'wget' or 'uclient-fetch' with one of the 'libustream-*' SSL libraries, the latter one doesn't provide support for ETag HTTP header +* **[OpenWrt](https://openwrt.org)**, latest stable release or a development snapshot with nft/firewall 4 support +* A download utility with SSL support: 'curl', full 'wget' or 'uclient-fetch' with one of the 'libustream-*' SSL libraries, the latter one doesn't provide support for ETag HTTP header * A certificate store like 'ca-bundle', as banIP checks the validity of the SSL certificates of all download sites by default -* For E-Mail notifications you need to install and setup the additional 'msmtp' package +* For E-Mail notifications you need to install and setup the additional 'msmtp' package **Please note:** -* Devices with less than 256Mb of RAM are **_not_** supported -* Any previous installation of ancient banIP 0.7.x must be uninstalled, and the /etc/banip folder and the /etc/config/banip configuration file must be deleted (they are recreated when this version is installed) - -## Installation & Usage -* Update your local opkg repository (_opkg update_) -* Install banIP (_opkg install banip_) - the banIP service is disabled by default -* Install the LuCI companion package 'luci-app-banip' (opkg install luci-app-banip) -* It's strongly recommended to use the LuCI frontend to easily configure all aspects of banIP, the application is located in LuCI under the 'Services' menu -* To be able to use banIP in a meaningful way, you must activate the service and possibly also activate a few blocklist feeds -* If you're using a complex network setup, e.g. special tunnel interfaces, than untick the 'Auto Detection' option under the 'General Settings' tab and set the required options manually -* Start the service with '/etc/init.d/banip start' and check everything is working by running '/etc/init.d/banip status' and also check the 'Firewall Log' and 'Processing Log' tabs - -## banIP CLI interface -* All important banIP functions are accessible via CLI, too. If you're going to configure banIP via CLI, edit the config file '/etc/config/banip' and enable the service, add pre-configured feeds and add/change other options to your needs, see the options reference table below. -``` -~# /etc/init.d/banip -Syntax: /etc/init.d/banip [command] - -Available commands: - start Start the service - stop Stop the service - restart Restart the service - reload Reload configuration files (or restart if service does not implement reload) - enable Enable service autostart - disable Disable service autostart - enabled Check if service is started on boot - report [text|json|mail] Print banIP related Set statistics - search [|] Check if an element exists in a banIP Set - survey [] List all elements of a given banIP Set - lookup Lookup the IPs of domain names in the local lists and update them - running Check if service is running - status Service status - trace Start with syscall trace - info Dump procd service info -``` - -## banIP config options - -| Option | Type | Default | Description | -| :---------------------- | :----- | :---------------------------- | :---------------------------------------------------------------------------------------------------------------- | -| ban_enabled | option | 0 | enable the banIP service | -| ban_nicelimit | option | 0 | ulimit nice level of the banIP service (range 0-19) | -| ban_filelimit | option | 1024 | ulimit max open/number of files (range 1024-4096) | -| ban_loglimit | option | 100 | scan only the last n log entries permanently. A value of '0' disables the monitor | -| ban_logcount | option | 1 | how many times the IP must appear in the log to be considered as suspicious | -| ban_logterm | list | regex | various regex for logfile parsing (default: dropbear, sshd, luci, nginx, asterisk and cgi-remote events) | -| ban_logreadfile | option | /var/log/messages | alternative location for parsing a log file via tail, to deactivate the standard parsing via logread | -| ban_autodetect | option | 1 | auto-detect wan interfaces, devices and subnets | -| ban_debug | option | 0 | enable banIP related debug logging | -| ban_icmplimit | option | 10 | threshold in number of packets to detect icmp DoS in prerouting chain. A value of '0' disables this safeguard | -| ban_synlimit | option | 10 | threshold in number of packets to detect syn DoS in prerouting chain. A value of '0' disables this safeguard | -| ban_udplimit | option | 100 | threshold in number of packets to detect udp DoS in prerouting chain. A value of '0' disables this safeguard | -| ban_logprerouting | option | 0 | log supsicious packets in the prerouting chain | -| ban_loginput | option | 0 | log supsicious packets in the wan-input chain | -| ban_logforwardwan | option | 0 | log supsicious packets in the wan-forward chain | -| ban_logforwardlan | option | 0 | log supsicious packets in the lan-forward chain | -| ban_autoallowlist | option | 1 | add wan IPs/subnets and resolved domains automatically to the local allowlist (not only to the Sets) | -| ban_autoblocklist | option | 1 | add suspicious attacker IPs and resolved domains automatically to the local blocklist (not only to the Sets) | -| ban_autoblocksubnet | option | 0 | add entire subnets to the blocklist Sets based on an additional RDAP request with the suspicious IP | -| ban_autoallowuplink | option | subnet | limit the uplink autoallow function to: 'subnet', 'ip' or 'disable' it at all | -| ban_allowlistonly | option | 0 | skip all blocklists and restrict the internet access only to specific, explicitly allowed IP segments | -| ban_allowflag | option | - | always allow certain protocols(tcp or udp) plus destination ports or port ranges, e.g.: 'tcp 80 443-445' | -| ban_allowurl | list | - | external allowlist feed URLs, one or more references to simple remote IP lists | -| ban_basedir | option | /tmp | base working directory while banIP processing | -| ban_reportdir | option | /tmp/banIP-report | directory where banIP stores the report files | -| ban_backupdir | option | /tmp/banIP-backup | directory where banIP stores the compressed backup files | -| ban_protov4 | option | - / autodetect | enable IPv4 support | -| ban_protov6 | option | - / autodetect | enable IPv6 support | -| ban_ifv4 | list | - / autodetect | logical wan IPv4 interfaces, e.g. 'wan' | -| ban_ifv6 | list | - / autodetect | logical wan IPv6 interfaces, e.g. 'wan6' | -| ban_dev | list | - / autodetect | wan device(s), e.g. 'eth2' | -| ban_vlanallow | list | - | always allow certain VLAN forwards, e.g. br-lan.20 | -| ban_vlanblock | list | - | always block certain VLAN forwards, e.g. br-lan.10 | -| ban_trigger | list | - | logical reload trigger interface(s), e.g. 'wan' | -| ban_triggerdelay | option | 20 | trigger timeout during interface reload and boot | -| ban_deduplicate | option | 1 | deduplicate IP addresses across all active Sets | -| ban_splitsize | option | 0 | split the processing/loading of Sets in chunks of n lines/members (saves RAM) | -| ban_cores | option | - / autodetect | limit the cpu cores used by banIP (saves RAM) | -| ban_nftloglevel | option | warn | nft loglevel, values: emerg, alert, crit, err, warn, notice, info, debug | -| ban_nftpriority | option | -100 | nft priority for the banIP table (the prerouting table is fixed to priority -150) | -| ban_nftpolicy | option | memory | nft policy for banIP-related Sets, values: memory, performance | -| ban_nftexpiry | option | - | expiry time for auto added blocklist members, e.g. '5m', '2h' or '1d' | -| ban_feed | list | - | external download feeds, e.g. 'yoyo', 'doh', 'country' or 'talos' (see feed table) | -| ban_asn | list | - | ASNs for the 'asn' feed, e.g.'32934' | -| ban_region | list | - | Regional Internet Registry (RIR) country selection. Supported regions are: AFRINIC, ARIN, APNIC, LACNIC and RIPE | -| ban_country | list | - | country iso codes for the 'country' feed, e.g. 'ru' | -| ban_blockpolicy | option | - | limit the default block policy to a certain chain, e.g. 'input', 'forwardwan' or 'forwardlan' | -| ban_blocktype | option | drop | 'drop' packets silently on input and forwardwan chains or actively 'reject' the traffic | -| ban_blockinput | list | - | limit a feed to the wan-input chain, e.g. 'country' | -| ban_blockforwardwan | list | - | limit a feed to the wan-forward chain, e.g. 'debl' | -| ban_blockforwardlan | list | - | limit a feed to the lan-forward chain, e.g. 'doh' | -| ban_fetchcmd | option | - / autodetect | 'uclient-fetch', 'wget', 'curl' or 'aria2c' | -| ban_fetchparm | option | - / autodetect | set the config options for the selected download utility | -| ban_fetchretry | option | 5 | number of download attempts in case of an error (not supported by uclient-fetch) | -| ban_fetchinsecure | option | 0 | don't check SSL server certificates during download | -| ban_mailreceiver | option | - | receiver address for banIP related notification E-Mails | -| ban_mailsender | option | no-reply@banIP | sender address for banIP related notification E-Mails | -| ban_mailtopic | option | banIP notification | topic for banIP related notification E-Mails | -| ban_mailprofile | option | ban_notify | mail profile used in 'msmtp' for banIP related notification E-Mails | -| ban_mailnotification | option | 0 | receive E-Mail notifications with every banIP run | -| ban_reportelements | option | 1 | count Set elements in the report, disable this option to speed up the report significantly | -| ban_resolver | option | - | external resolver used for DNS lookups, by default the local resolver/forwarder will be used | -| ban_remotelog | option | 0 | enable the cgi interface to receive remote logging events | -| ban_remotetoken | option | - | unique token to communicate with the cgi interface | - -## Examples -**banIP report information** -``` -~# /etc/init.d/banip report -::: -::: banIP Set Statistics -::: - Timestamp: 2024-04-17 23:02:15 - ------------------------------ - blocked syn-flood packets : 5 - blocked udp-flood packets : 11 - blocked icmp-flood packets : 6 - blocked invalid ct packets : 277 - blocked invalid tcp packets: 0 - --- - auto-added IPs to allowlist: 0 - auto-added IPs to blocklist: 0 - - Set | Elements | WAN-Input (packets) | WAN-Forward (packets) | LAN-Forward (packets) | Port/Protocol Limit - ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------ - allowlistv4MAC | 0 | - | - | ON: 0 | - - allowlistv6MAC | 0 | - | - | ON: 0 | - - allowlistv4 | 1 | ON: 0 | ON: 0 | ON: 0 | - - allowlistv6 | 2 | ON: 0 | ON: 0 | ON: 0 | - - adguardtrackersv6 | 105 | - | - | ON: 0 | tcp: 80, 443 - adguardtrackersv4 | 816 | - | - | ON: 0 | tcp: 80, 443 - becyberv4 | 229006 | ON: 2254 | ON: 0 | - | - - cinsscorev4 | 7135 | ON: 1630 | ON: 2 | - | - - deblv4 | 10191 | ON: 23 | ON: 0 | - | - - countryv6 | 38233 | ON: 7 | ON: 0 | - | - - countryv4 | 37169 | ON: 2323 | ON: 0 | - | - - deblv6 | 65 | ON: 0 | ON: 0 | - | - - dropv6 | 66 | ON: 0 | ON: 0 | - | - - dohv4 | 1219 | - | - | ON: 0 | tcp: 80, 443 - dropv4 | 895 | ON: 75 | ON: 0 | - | - - dohv6 | 832 | - | - | ON: 0 | tcp: 80, 443 - threatv4 | 20 | ON: 0 | ON: 0 | - | - - firehol1v4 | 753 | ON: 1 | ON: 0 | - | - - ipthreatv4 | 1369 | ON: 20 | ON: 0 | - | - - firehol2v4 | 2216 | ON: 1 | ON: 0 | - | - - turrisv4 | 5613 | ON: 179 | ON: 0 | - | - - blocklistv4MAC | 0 | - | - | ON: 0 | - - blocklistv6MAC | 0 | - | - | ON: 0 | - - blocklistv4 | 0 | ON: 0 | ON: 0 | ON: 0 | - - blocklistv6 | 0 | ON: 0 | ON: 0 | ON: 0 | - - ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------ - 25 | 335706 | 17 (6513) | 17 (2) | 12 (0) -``` - -**banIP runtime information** -``` -::: banIP runtime information - + status : active (nft: ✔, monitor: ✔) - + version : 0.9.6-r1 - + element_count : 108036 - + active_feeds : allowlistv4MAC, allowlistv6MAC, allowlistv4, allowlistv6, cinsscorev4, deblv4, countryv6, countryv4, deblv6, dohv4, dohv6, turrisv4, blocklistv4MAC, blocklistv6MAC, blocklistv4, blocklistv6 - + active_devices : wan: pppoe-wan / wan-if: wan, wan_6 / vlan-allow: - / vlan-block: - - + active_uplink : 217.83.205.130, fe80::9cd6:12e9:c4df:75d3, 2003:ed:b5ff:43bd:9cd5:12e7:c3ef:75d8 - + nft_info : priority: -100, policy: performance, loglevel: warn, expiry: 2h, limit (icmp/syn/udp): 10/10/100 - + run_info : base: /mnt/data/banIP, backup: /mnt/data/banIP/backup, report: /mnt/data/banIP/report - + run_flags : auto: ✔, proto (4/6): ✔/✔, log (pre/inp/fwd/lan): ✔/✘/✘/✘, dedup: ✔, split: ✘, custom feed: ✘, allowed only: ✘ - + last_run : action: reload, log: logread, fetch: curl, duration: 1m 21s, date: 2024-05-27 05:56:29 - + system_info : cores: 4, memory: 1661, device: Bananapi BPI-R3, OpenWrt SNAPSHOT r26353-a96354bcfb -``` - -**banIP search information** -``` -~# /etc/init.d/banip search 221.228.105.173 -::: -::: banIP Search -::: - Looking for IP '221.228.105.173' on 2023-02-08 22:12:48 - --- - IP found in Set 'oisdbasicv4' -``` - -**banIP survey information** -``` -~# /etc/init.d/banip survey cinsscorev4 -::: -::: banIP Survey -::: - List of elements in the Set 'cinsscorev4' on 2023-03-06 14:07:58 - --- -1.10.187.179 -1.10.203.30 -1.10.255.58 -1.11.67.53 -1.11.114.211 -[...] -``` - -## Best practise & tweaks -**Recommendation for low memory systems** -nftables supports the atomic loading of firewall rules (incl. elements), which is cool but unfortunately is also very memory intensive. To reduce the memory pressure on low memory systems (i.e. those with 256-512Mb RAM), you should optimize your configuration with the following options: - -* point 'ban_basedir', 'ban_reportdir' and 'ban_backupdir' to an external usb drive -* set 'ban_cores' to '1' (only useful on a multicore system) to force sequential feed processing -* set 'ban_splitsize' e.g. to '1024' to split the load of an external Set after every 1024 lines/elements -* set 'ban_reportelements' to '0' to disable the CPU intensive counting of Set elements - -**Sensible choice of blocklists** -The following feeds are just my personal recommendation as an initial setup: -* cinsscore, debl, turris in WAN-Input and WAN-Forward chain -* doh in LAN-Forward chain - -In total, this feed selection blocks about 20K IP addresses. It may also be useful to include some countries to the country feed in WAN-Input and WAN-Forward chain. -Please note: don't just blindly activate (too) many feeds at once, sooner or later this will lead to OOM conditions. - -**Log Terms for logfile parsing** -Like fail2ban and crowdsec, banIP supports logfile scanning and automatic blocking of suspicious attacker IPs. -In the default config only the log terms to detect failed login attempts via dropbear and LuCI are in place. The following search pattern has been tested as well - just transfer the required regular expression via cut and paste to your config (without quotation marks): -``` -dropbear : 'Exit before auth from' -LuCI : 'luci: failed login' -sshd1 : 'error: maximum authentication attempts exceeded' -sshd2 : 'sshd.*Connection closed by.*\[preauth\]' -asterisk : 'SecurityEvent=\"InvalidAccountID\".*RemoteAddress=' -nginx : 'received a suspicious remote IP '\''.*'\''' -openvpn : 'TLS Error: could not determine wrapping from \[AF_INET\]' -AdGuard : 'AdGuardHome.*\[error\].*/control/login: from ip' -``` -You find the 'Log Terms' option in LuCI under the 'Log Settings' tab. Feel free to add more log terms to meet your needs and protect additional services. - -**Allow-/Blocklist handling** -banIP supports local allow- and block-lists, MAC/IPv4/IPv6 addresses (incl. ranges in CIDR notation) or domain names. These files are located in /etc/banip/banip.allowlist and /etc/banip/banip.blocklist. -Unsuccessful login attempts or suspicious requests will be tracked and added to the local blocklist (see the 'ban_autoblocklist' option). The blocklist behaviour can be further tweaked with the 'ban_nftexpiry' option. -Depending on the options 'ban_autoallowlist' and 'ban_autoallowuplink' the uplink subnet or the uplink IP will be added automatically to local allowlist. -Furthermore, you can reference external Allowlist URLs with additional IPv4 and IPv6 feeds (see 'ban_allowurl'). -Both local lists also accept domain names as input to allow IP filtering based on these names. The corresponding IPs (IPv4 & IPv6) will be extracted and added to the Sets. You can also start the domain lookup separately via /etc/init.d/banip lookup at any time. - -**Allowlist-only mode** -banIP supports an "allowlist only" mode. This option skips all blocklists and restricts Internet access only to certain, explicitly permitted IP segments - and blocks access to the rest of the Internet. All IPs that are _not_ listed in the allowlist or in the external allowlist URLs are blocked. In this mode it might be useful to limit the allowlist feed to the wan-input / wan-forward chain, to still allow lan-forward communication to the rest of the world. - -**MAC/IP-binding** -banIP supports concatenation of local MAC addresses/ranges with IPv4/IPv6 addresses, e.g. to enforce dhcp assignments. -The following notations in the local allow- and block-list are supported: -``` -MAC-address only: -C8:C2:9B:F7:80:12 => this will be populated to the v4MAC- and v6MAC-Sets with the IP-wildcards 0.0.0.0/0 and ::/0 - -MAC-address range: -C8:C2:9B:F7:80:12/24 => this populate the MAC-range C8:C2:9B:00:00:00", "C8:C2:9B:FF:FF:FF to the v4MAC- and v6MAC-Sets with the IP-wildcards 0.0.0.0/0 and ::/0 - -MAC-address with IPv4 concatenation: -C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated only to v4MAC-Set with the certain IP, no entry in the v6MAC-Set - -MAC-address with IPv6 concatenation: -C8:C2:9B:F7:80:12 2a02:810c:0:80:a10e:62c3:5af:f3f => this will be populated only to v6MAC-Set with the certain IP, no entry in the v4MAC-Set - -MAC-address with IPv4 and IPv6 concatenation: -C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated to v4MAC-Set with the certain IP -C8:C2:9B:F7:80:12 2a02:810c:0:80:a10e:62c3:5af:f3f => this will be populated to v6MAC-Set with the certain IP - -MAC-address with IPv4 and IPv6 wildcard concatenation: -C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated to v4MAC-Set with the certain IP -C8:C2:9B:F7:80:12 => this will be populated to v6MAC-Set with the IP-wildcard ::/0 -``` - -**CGI interface to receive remote logging events** -banIP ships a basic cgi interface in '/www/cgi-bin/banip' to receive remote logging events (disabled by default). The cgi interface evaluates logging events via GET or POST request (see examples below). To enable the cgi interface set the following options: - - * set 'ban_remotelog' to '1' to enbale the cgi interface - * set 'ban_remotetoken' to a secret transfer token, allowed token characters consist of '[A-Za-z]', '[0-9]', '.' and ':' - - Examples to transfer remote logging events from an internal server to banIP via cgi interface: - - * POST request: curl --insecure --data "=" https://192.168.1.1/cgi-bin/banip - * GET request: wget --no-check-certificate https://192.168.1.1/cgi-bin/banip?= - -Please note: for security reasons use this cgi interface only internally and only encrypted via https transfer protocol. - -**Download options** -By default banIP uses the following pre-configured download options: -``` - * aria2c: --timeout=20 --retry-wait=10 --max-tries=5 --max-file-not-found=5 --allow-overwrite=true --auto-file-renaming=false --log-level=warn --dir=/ -o - * curl: --connect-timeout 20 --retry-delay 10 --retry 5 --retry-all-errors --fail --silent --show-error --location -o - * wget: --no-cache --no-cookies --timeout=20 --waitretry=10 --tries=5 --retry-connrefused --max-redirect=0 -O - * uclient-fetch: --timeout=20 -O -``` -To override the default set 'ban_fetchretry', 'ban_fetchinsecure' or globally 'ban_fetchparm' to your needs. - -**Configure E-Mail notifications via 'msmtp'** -To use the email notification you must install and configure the package 'msmtp'. -Modify the file '/etc/msmtprc', e.g.: -``` -[...] -defaults -auth on -tls on -tls_certcheck off -timeout 5 -syslog LOG_MAIL -[...] -account ban_notify -host smtp.gmail.com -port 587 -from
@gmail.com -user -password -``` -Finally add a valid E-Mail receiver address in banIP. - -**Send status E-Mails and update the banIP lists via cron job** -For a regular, automatic status mailing and update of the used lists on a daily basis set up a cron job, e.g. -``` -55 03 * * * /etc/init.d/banip report mail -00 04 * * * /etc/init.d/banip reload -``` -**Redirect asterisk security logs to lodg/logread** -By default banIP scans the logfile via logread, so to monitor attacks on asterisk, its security log must be available via logread. To do this, edit '/etc/asterisk/logger.conf' and add the line 'syslog.local0 = security', then run 'asterisk -rx reload logger' to update the running asterisk configuration. - -**Change/add banIP feeds and port limitations** -The banIP default blocklist feeds are stored in an external JSON file '/etc/banip/banip.feeds'. All custom changes should be stored in an external JSON file '/etc/banip/banip.custom.feeds' (empty by default). It's recommended to use the LuCI based Custom Feed Editor to make changes to this file. -A valid JSON source object contains the following information, e.g.: -``` - [...] -"stevenblack":{ - "url_4": "https://raw.githubusercontent.com/dibdot/banIP-IP-blocklists/main/stevenblack-ipv4.txt", - "url_6": "https://raw.githubusercontent.com/dibdot/banIP-IP-blocklists/main/stevenblack-ipv6.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)[[:space:]]/{printf \"%s,\\n\",$1}", - "descr": "stevenblack IPs", - "flag": "tcp 80 443" - }, - [...] -``` -Add an unique feed name (no spaces, no special chars) and make the required changes: adapt at least the URL, the regex and the description for a new feed. -Please note: the flag field is optional, it's a space separated list of options: supported are 'gz' as an archive format, protocols 'tcp' or 'udp' with port numbers/port ranges for destination port limitations - multiple definitions are possible. - -**Debug options** -Whenever you encounter banIP related processing problems, please check the "Processing Log" tab. -Typical symptoms: -* The nftables initialization failed: untick the 'Auto Detection' option in the 'General Settings' config section and set the required options manually -* A blocklist feed does not work: maybe a temporary server problem or the download URL has been changed. In the latter case, just use the Custom Feed Editor to point this feed to a new URL -To get much more processing information, please enable "Verbose Debug Logging" and restart banIP. - -Whenever you encounter firewall problems, enable the logging of certain chains in the "Log Settings" config section, restart banIP and check the "Firewall Log" tab. -Typical symptoms: -* A feed blocks a legit IP: disable the entire feed or add this IP to your local allowlist and reload banIP -* A feed (e.g. doh) interrupts almost all client connections: check the feed table above for reference and limit the feed to a certain chain in the "Feed/Set Settings" config section -* The allowlist doesn't free a certain IP/MAC address: check the current content of the allowlist with the "Set Survey" under the "Set Reporting" tab to make sure that the desired IP/MAC is listed - if not, reload banIP - -## Support -Please join the banIP discussion in this [forum thread](https://forum.openwrt.org/t/banip-support-thread/16985) or contact me by mail -If you want to report an error, please describe it in as much detail as possible - with (debug) logs, the current banIP status, your banIP configuration, etc. - -## Removal -Stop all banIP related services with _/etc/init.d/banip stop_ and remove the banip package if necessary. - -## Donations -You like this project - is there a way to donate? Generally speaking "No" - I have a well-paying full-time job and my OpenWrt projects are just a hobby of mine in my spare time. - -If you still insist to donate some bucks ... -* I would be happy if you put your money in kind into other, social projects in your area, e.g. a children's hospice -* Let's meet and invite me for a coffee if you are in my area, the “Markgräfler Land” in southern Germany or in Switzerland (Basel) -* Send your money to my [PayPal account](https://www.paypal.me/DirkBrenken) and I will collect your donations over the year to support various social projects in my area - -No matter what you decide - thank you very much for your support! +* Devices with less than 256MB of RAM are **_not_** supported +* After system upgrades it's recommended to start with a fresh banIP default config -Have fun! -Dirk +For more information and documentation, please visit the official [banIP GitHub repository](https://github.com/openwrt/packages/tree/master/net/banip). diff --git a/packages/banip/files/banip-functions.sh b/packages/banip/files/banip-functions.sh old mode 100644 new mode 100755 index 71f9e5723..e57e78799 --- a/packages/banip/files/banip-functions.sh +++ b/packages/banip/files/banip-functions.sh @@ -1,5 +1,5 @@ # banIP shared function library/include - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. # (s)hellcheck exceptions @@ -15,6 +15,7 @@ export PATH="/usr/sbin:/usr/bin:/sbin:/bin" ban_basedir="/tmp" ban_backupdir="/tmp/banIP-backup" ban_reportdir="/tmp/banIP-report" +ban_errordir="/tmp/banIP-error" ban_feedfile="/etc/banip/banip.feeds" ban_countryfile="/etc/banip/banip.countries" ban_customfeedfile="/etc/banip/banip.custom.feeds" @@ -25,34 +26,40 @@ ban_pidfile="/var/run/banip.pid" ban_rtfile="/var/run/banip_runtime.json" ban_rdapfile="/var/run/banip_rdap.json" ban_rdapurl="https://rdap.db.ripe.net/ip/" +ban_geourl="http://ip-api.com/batch" ban_lock="/var/run/banip.lock" -ban_logreadfile="/var/log/messages" +ban_errorlog="/dev/null" +ban_logreadfile="" ban_logreadcmd="" ban_mailsender="no-reply@banIP" ban_mailreceiver="" ban_mailtopic="banIP notification" ban_mailprofile="ban_notify" ban_mailnotification="0" -ban_reportelements="1" ban_remotelog="0" ban_remotetoken="" ban_nftloglevel="warn" ban_nftpriority="-100" ban_nftpolicy="memory" ban_nftexpiry="" -ban_loglimit="100" -ban_icmplimit="10" +ban_nftretry="3" +ban_nftcount="0" +ban_map="0" +ban_bcp38="0" +ban_icmplimit="25" ban_synlimit="10" ban_udplimit="100" +ban_loglimit="100" ban_logcount="1" ban_logterm="" ban_region="" ban_country="" +ban_countrysplit="0" ban_asn="" +ban_asnsplit="0" ban_logprerouting="0" -ban_loginput="0" -ban_logforwardwan="0" -ban_logforwardlan="0" +ban_loginbound="0" +ban_logoutbound="0" ban_allowurl="" ban_allowflag="" ban_allowlistonly="0" @@ -64,11 +71,12 @@ ban_deduplicate="1" ban_splitsize="0" ban_autodetect="1" ban_feed="" -ban_blockpolicy="" -ban_blocktype="drop" -ban_blockinput="" -ban_blockforwardwan="" -ban_blockforwardlan="" +ban_feedin="" +ban_feedout="" +ban_feedinout="" +ban_feedcomplete="" +ban_feedreset="" +ban_blockpolicy="drop" ban_protov4="0" ban_protov6="0" ban_ifv4="" @@ -83,6 +91,7 @@ ban_fetchinsecure="" ban_fetchretry="5" ban_rdapparm="" ban_etagparm="" +ban_geoparm="" ban_cores="" ban_packages="" ban_trigger="" @@ -95,17 +104,29 @@ ban_debug="0" f_system() { local cpu core - if [ -z "${ban_dev}" ]; then - ban_debug="$(uci_get banip global ban_debug "0")" - ban_cores="$(uci_get banip global ban_cores)" + ban_debug="$(uci_get banip global ban_debug "0")" + ban_cores="$(uci_get banip global ban_cores)" + ban_basedir="$(uci_get banip global ban_basedir "/tmp")" + + # set debug log file + # + if [ "${ban_debug}" = "1" ] && [ -d "${ban_basedir}" ]; then + ban_errorlog="${ban_basedir}/ban_error.log" + else + ban_errorlog="/dev/null" fi - ban_packages="$("${ban_ubuscmd}" -S call rpc-sys packagelist '{ "all": true }' 2>/dev/null)" - ban_ver="$(printf "%s" "${ban_packages}" | "${ban_jsoncmd}" -ql1 -e '@.packages.banip')" - ban_sysver="$("${ban_ubuscmd}" -S call system board 2>/dev/null | "${ban_jsoncmd}" -ql1 -e '@.model' -e '@.release.target' -e '@.release.distribution' -e '@.release.version' -e '@.release.revision' | - "${ban_awkcmd}" 'BEGIN{RS="";FS="\n"}{printf "%s, %s, %s %s %s %s",$1,$2,$3,$4,$5,$6}')" + + # get banIP version and system information + # + ban_packages="$("${ban_ubuscmd}" -S call rpc-sys packagelist '{ "all": true }' 2>>"${ban_errorlog}")" + ban_bver="$(printf "%s" "${ban_packages}" | "${ban_jsoncmd}" -ql1 -e '@.packages.banip')" + ban_fver="$(printf "%s" "${ban_packages}" | "${ban_jsoncmd}" -ql1 -e '@.packages["luci-app-banip"]')" + ban_sysver="$("${ban_ubuscmd}" -S call system board 2>>"${ban_errorlog}" | "${ban_jsoncmd}" -ql1 -e '@.model' -e '@.release.target' -e '@.release.distribution' -e '@.release.version' -e '@.release.revision' | + "${ban_awkcmd}" 'BEGIN{RS="";FS="\n"}{printf "%s, %s, %s %s (%s)",$1,$2,$3,$4,$5}')" + if [ -z "${ban_cores}" ]; then - cpu="$("${ban_grepcmd}" -c '^processor' /proc/cpuinfo 2>/dev/null)" - core="$("${ban_grepcmd}" -cm1 '^core id' /proc/cpuinfo 2>/dev/null)" + cpu="$("${ban_grepcmd}" -c '^processor' /proc/cpuinfo 2>>"${ban_errorlog}")" + core="$("${ban_grepcmd}" -cm1 '^core id' /proc/cpuinfo 2>>"${ban_errorlog}")" [ "${cpu}" = "0" ] && cpu="1" [ "${core}" = "0" ] && core="1" ban_cores="$((cpu * core))" @@ -118,11 +139,11 @@ f_system() { f_cmd() { local cmd pri_cmd="${1}" sec_cmd="${2}" - cmd="$(command -v "${pri_cmd}" 2>/dev/null)" + cmd="$(command -v "${pri_cmd}" 2>>"${ban_errorlog}")" if [ ! -x "${cmd}" ]; then if [ -n "${sec_cmd}" ]; then [ "${sec_cmd}" = "optional" ] && return - cmd="$(command -v "${sec_cmd}" 2>/dev/null)" + cmd="$(command -v "${sec_cmd}" 2>>"${ban_errorlog}")" fi if [ -x "${cmd}" ]; then printf "%s" "${cmd}" @@ -163,8 +184,9 @@ f_tmp() { f_mkdir "${ban_basedir}" ban_tmpdir="$(mktemp -p "${ban_basedir}" -d)" ban_tmpfile="$(mktemp -p "${ban_tmpdir}" -tu)" + [ "${ban_debug}" = "1" ] && : >"${ban_errorlog}" - f_log "debug" "f_tmp ::: base_dir: ${ban_basedir:-"-"}, tmp_dir: ${ban_tmpdir:-"-"}" + f_log "debug" "f_tmp ::: base_dir: ${ban_basedir:-"-"}, tmp_dir: ${ban_tmpdir:-"-"}" } # remove directories @@ -174,7 +196,7 @@ f_rmdir() { if [ -d "${dir}" ]; then rm -rf "${dir}" - f_log "debug" "f_rmdir ::: directory: ${dir}" + f_log "debug" "f_rmdir ::: directory: ${dir}" fi } @@ -205,15 +227,29 @@ f_trim() { # remove log monitor # f_rmpid() { - local ppid pid pids + local ppid pid pids_next pids_all childs newchilds - ppid="$("${ban_catcmd}" "${ban_pidfile}" 2>/dev/null)" + ppid="$("${ban_catcmd}" "${ban_pidfile}" 2>>"${ban_errorlog}")" if [ -n "${ppid}" ]; then - pids="$("${ban_pgrepcmd}" -P "${ppid}" 2>/dev/null)" - for pid in ${pids}; do - pids="${pids} $("${ban_pgrepcmd}" -P "${pid}" 2>/dev/null)" + pids_next="$("${ban_pgrepcmd}" -P "${ppid}" 2>>"${ban_errorlog}")" + pids_all="" + while [ -n "${pids_next}" ]; do + for pid in ${pids_next}; do + case " ${pids_all} " in + *" ${pid} "*) + ;; + *) pids_all="${pids_all} ${pid}" + ;; + esac + done + newchilds="" + for pid in ${pids_next}; do + childs="$("${ban_pgrepcmd}" -P "${pid}" 2>>"${ban_errorlog}")" + [ -n "${childs}" ] && newchilds="${newchilds} ${childs}" + done + pids_next="$(f_trim "${newchilds}")" done - for pid in ${pids}; do + for pid in ${pids_all}; do kill -INT "${pid}" >/dev/null 2>&1 done fi @@ -227,9 +263,9 @@ f_log() { if [ -n "${log_msg}" ] && { [ "${class}" != "debug" ] || [ "${ban_debug}" = "1" ]; }; then if [ -x "${ban_logcmd}" ]; then - "${ban_logcmd}" -p "${class}" -t "banIP-${ban_ver}[${$}]" "${log_msg::512}" + "${ban_logcmd}" -p "${class}" -t "banIP-${ban_bver}[${$}]" "${log_msg::256}" else - printf "%s %s %s\n" "${class}" "banIP-${ban_ver}[${$}]" "${log_msg::512}" + printf "%s %s %s\n" "${class}" "banIP-${ban_bver}[${$}]" "${log_msg::256}" fi fi if [ "${class}" = "err" ] || [ "${class}" = "emerg" ]; then @@ -254,67 +290,46 @@ f_log() { f_conf() { local rir ccode region country - unset ban_dev ban_vlanallow ban_vlanblock ban_ifv4 ban_ifv6 ban_feed ban_allowurl ban_blockinput ban_blockforwardwan ban_blockforwardlan ban_logterm ban_region ban_country ban_asn config_cb() { option_cb() { - local option="${1}" - local value="${2}" - eval "${option}=\"${value}\"" - } - list_cb() { - local option="${1}" - local value="${2}" + local option="${1}" value="${2//\"/\\\"}" + case "${option}" in - "ban_ifv4") - eval "${option}=\"$(printf "%s" "${ban_ifv4}")${value} \"" - ;; - "ban_ifv6") - eval "${option}=\"$(printf "%s" "${ban_ifv6}")${value} \"" - ;; - "ban_dev") - eval "${option}=\"$(printf "%s" "${ban_dev}")${value} \"" - ;; - "ban_vlanallow") - eval "${option}=\"$(printf "%s" "${ban_vlanallow}")${value} \"" - ;; - "ban_vlanblock") - eval "${option}=\"$(printf "%s" "${ban_vlanblock}")${value} \"" + *[!a-zA-Z0-9_]*) ;; - "ban_trigger") - eval "${option}=\"$(printf "%s" "${ban_trigger}")${value} \"" + *) + eval "${option}=\"\${value}\"" ;; - "ban_feed") - eval "${option}=\"$(printf "%s" "${ban_feed}")${value} \"" - ;; - "ban_allowurl") - eval "${option}=\"$(printf "%s" "${ban_allowurl}")${value} \"" - ;; - "ban_blockinput") - eval "${option}=\"$(printf "%s" "${ban_blockinput}")${value} \"" - ;; - "ban_blockforwardwan") - eval "${option}=\"$(printf "%s" "${ban_blockforwardwan}")${value} \"" - ;; - "ban_blockforwardlan") - eval "${option}=\"$(printf "%s" "${ban_blockforwardlan}")${value} \"" + esac + } + list_cb() { + local append option="${1}" value="${2//\"/\\\"}" + + case "${option}" in + *[!a-zA-Z0-9_]*) ;; "ban_logterm") - eval "${option}=\"$(printf "%s" "${ban_logterm}")${value}\\|\"" - ;; - "ban_region") - eval "${option}=\"$(printf "%s" "${ban_region}")${value} \"" - ;; - "ban_country") - eval "${option}=\"$(printf "%s" "${ban_country}")${value} \"" + eval "append=\"\${${option}}\"" + if [ -n "${append}" ]; then + eval "${option}=\"${append}\\|${value}\"" + else + eval "${option}=\"${value}\"" + fi ;; - "ban_asn") - eval "${option}=\"$(printf "%s" "${ban_asn}")${value} \"" + *) + eval "append=\"\${${option}}\"" + eval "${option}=\"${append}${value} \"" ;; esac } } config_load banip - [ -f "${ban_logreadfile}" ] && ban_logreadcmd="$(command -v tail)" || ban_logreadcmd="$(command -v logread)" + + if [ -f "${ban_logreadfile}" ]; then + ban_logreadcmd="$(command -v tail)" + else + ban_logreadcmd="$(command -v logread)" + fi for rir in ${ban_region}; do while read -r ccode region country; do @@ -325,26 +340,163 @@ f_conf() { done } +# IPv4/IPv6 validation +# +f_chkip() { + local ipv type prefix separator col1 col2 + + ipv="${1}" + type="${2}" + case "${type}" in + "feed"|"local") + case "${3}" in + [0-9][0-9]) + prefix="" + col1="${3:0:1}" + col2="${3:1:1}" + separator="${4:-[[:space:]]+}" + ;; + [0-9]) + prefix="" + col1="${3}" + col2="" + separator="${4:-[[:space:]]+}" + ;; + *) + prefix="${3}" + col1="${4}" + col2="" + separator="${5:-[[:space:]]+}" + ;; + esac + ;; + "suricata") + prefix="" + col1="${3}" + col2="" + separator="${4:-[[:space:]]+}" + ;; + esac + "${ban_awkcmd}" -v ipv="${ipv}" -v type="${type}" -v pre="${prefix}" -v col1="${col1}" -v col2="${col2}" -F "${separator}" ' + { + # suricata pre-processing + if (type == "suricata") { + delete M + if (ipv == "4") { + match($0, /content:"(([0-9]{1,3}\.){3}[0-9]{1,3})"/, M) + } else if (ipv == "6") { + match($0, /content:"(([A-Fa-f0-9]{0,4}:){2,7}[A-Fa-f0-9]{0,4})"/, M) + } + if (M[1] == "") next + $col1 = M[1] + } + ip = $col1 + gsub(/\r|^[[:space:]]+|[[:space:]]+$/, "", ip) + # prefix filter + if (pre != "" && index($0, pre) != 1) next + # skip empty lines or comments + if (ip == "" || ip ~ /^#/) next + # reject invalid lengths + len = length(ip) + if (len < 3 || len > 43) next + # reject MAC addresses when ipv=6 + if (ipv == "6" && ip ~ /^([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}$/) next + # reject IPv4 when ipv=6 + if (ipv == "6" && ip ~ /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/) next + # reject IPv4-mapped IPv6 addresses + if (ipv == "6" && tolower(ip) ~ /^::ffff:/) next + # reject IPv6 when ipv=4 + if (ipv == "4" && ip ~ /:/) next + # apply mask + if (col2 != "") { + mask = $col2 + lowip = (ipv == "4") ? ip "/" mask : tolower(ip "/" mask) + } else { + lowip = (ipv == "4") ? ip : tolower(ip) + } + # CIDR check + if (lowip ~ /\//) { + if (split(lowip, C, "/") != 2) next + base = C[1] + mask = C[2] + if (mask !~ /^[0-9]+$/) next + # IPv4 CIDR + if (ipv == "4") { + if (base ~ /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/) { + if (mask > 32) next + n = split(base, A, ".") + # reject loopback and unspecified addresses + if (A[1] == 127 || base == "0.0.0.0") next + # reject leading zeros and octets > 255 + for (i=1; i<=4; i++) { + if (length(A[i]) > 1 && substr(A[i], 1, 1) == "0") next + if (A[i] > 255) next + } + print lowip ", " + next + } + } + # IPv6 CIDR + if (ipv == "6") { + if (base ~ /^[0-9a-f:]+$/ && base ~ /:/) { + if (mask > 128) next + if (base == "::1" || base == "::") next + if (base ~ /^fe80:/) next + print lowip ", " + next + } + } + } + # IPv4 check + if (ipv == "4") { + if (lowip ~ /^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$/) { + n = split(lowip, A, ".") + # reject loopback and unspecified addresses + if (A[1] == 127 || lowip == "0.0.0.0") next + # reject leading zeros and octets > 255 + for (i=1; i<=4; i++) { + if (length(A[i]) > 1 && substr(A[i], 1, 1) == "0") next + if (A[i] > 255) next + } + print lowip ", " + next + } + } + # IPv6 check + if (ipv == "6") { + if (lowip ~ /^[0-9a-f:]+$/ && lowip ~ /:/) { + # reject loopback and unspecified addresses + if (lowip == "::1" || lowip == "::") next + # reject link-local addresses + if (lowip ~ /^fe80:/) next + print lowip ", " + next + } + } + }' + + f_log "debug" "f_chkip ::: feed: ${feed}, ipver: ${ipv}, type: ${type}, prefix: ${prefix:-"-"}, col1: ${col1:-"-"}, col2: ${col2:-"-"}, separator: ${separator:-"-"}" +} + # get nft/monitor actuals # f_actual() { local nft monitor ppid pids pid - if "${ban_nftcmd}" -t list set inet banIP allowlistv4MAC >/dev/null 2>&1; then + if "${ban_nftcmd}" -t list table inet banIP >/dev/null 2>&1; then nft="$(f_char "1")" else nft="$(f_char "0")" fi - ppid="$("${ban_catcmd}" "${ban_pidfile}" 2>/dev/null)" + ppid="$("${ban_catcmd}" "${ban_pidfile}" 2>>"${ban_errorlog}")" if [ -n "${ppid}" ]; then - pids="$("${ban_pgrepcmd}" -P "${ppid}" 2>/dev/null)" + monitor="$(f_char "0")" + pids="$("${ban_pgrepcmd}" -P "${ppid}" 2>>"${ban_errorlog}")" for pid in ${pids}; do if "${ban_pgrepcmd}" -f "${ban_logreadcmd##*/}" -P "${pid}" >/dev/null 2>&1; then monitor="$(f_char "1")" break - else - monitor="$(f_char "0")" fi done else @@ -355,29 +507,26 @@ f_actual() { # get fetch utility # -f_getfetch() { - local util utils insecure +f_getdl() { + local fetch fetch_list insecure update="0" ban_fetchcmd="$(command -v "${ban_fetchcmd}")" if { [ "${ban_autodetect}" = "1" ] && [ -z "${ban_fetchcmd}" ]; } || [ ! -x "${ban_fetchcmd}" ]; then - utils="aria2 curl wget-ssl libustream-openssl libustream-wolfssl libustream-mbedtls" - for util in ${utils}; do - if printf "%s" "${ban_packages}" | "${ban_jsoncmd}" -ql1 -e "@.packages[\"${util}\"]" >/dev/null 2>&1; then - case "${util}" in - "aria2") - util="aria2c" - ;; + fetch_list="curl wget-ssl libustream-openssl libustream-wolfssl libustream-mbedtls" + for fetch in ${fetch_list}; do + if printf "%s" "${ban_packages}" | "${ban_grepcmd}" -q "\"${fetch}"; then + case "${fetch}" in "wget-ssl") - util="wget" + fetch="wget" ;; "libustream-openssl" | "libustream-wolfssl" | "libustream-mbedtls") - util="uclient-fetch" + fetch="uclient-fetch" ;; esac - - if [ -x "$(command -v "${util}")" ]; then - ban_fetchcmd="$(command -v "${util}")" - uci_set banip global ban_fetchcmd "${util}" + if [ -x "$(command -v "${fetch}")" ]; then + update="1" + ban_fetchcmd="$(command -v "${fetch}")" + uci_set banip global ban_fetchcmd "${fetch}" uci_commit "banip" break fi @@ -386,34 +535,30 @@ f_getfetch() { fi [ ! -x "${ban_fetchcmd}" ] && f_log "err" "download utility with SSL support not found, please set 'ban_fetchcmd' manually" - case "${ban_fetchcmd##*/}" in - "aria2c") - [ "${ban_fetchinsecure}" = "1" ] && insecure="--check-certificate=false" - ban_fetchparm="${ban_fetchparm:-"${insecure} --timeout=20 --retry-wait=10 --max-tries=${ban_fetchretry} --max-file-not-found=${ban_fetchretry} --allow-overwrite=true --auto-file-renaming=false --log-level=warn --dir=/ -o"}" - ban_rdapparm="--timeout=5 --allow-overwrite=true --auto-file-renaming=false --dir=/ -o" - ban_etagparm="--timeout=5 --allow-overwrite=true --auto-file-renaming=false --dir=/ --dry-run --log -" - ;; "curl") [ "${ban_fetchinsecure}" = "1" ] && insecure="--insecure" ban_fetchparm="${ban_fetchparm:-"${insecure} --connect-timeout 20 --retry-delay 10 --retry ${ban_fetchretry} --retry-max-time $((ban_fetchretry * 20)) --retry-all-errors --fail --silent --show-error --location -o"}" ban_rdapparm="--connect-timeout 5 --silent --location -o" ban_etagparm="--connect-timeout 5 --silent --location --head" + ban_geoparm="--connect-timeout 5 --silent --location --data" ;; "wget") [ "${ban_fetchinsecure}" = "1" ] && insecure="--no-check-certificate" ban_fetchparm="${ban_fetchparm:-"${insecure} --no-cache --no-cookies --timeout=20 --waitretry=10 --tries=${ban_fetchretry} --retry-connrefused -O"}" ban_rdapparm="--timeout=5 -O" ban_etagparm="--timeout=5 --spider --server-response" + ban_geoparm="--timeout=5 --quiet -O- --post-data" ;; "uclient-fetch") [ "${ban_fetchinsecure}" = "1" ] && insecure="--no-check-certificate" ban_fetchparm="${ban_fetchparm:-"${insecure} --timeout=20 -O"}" ban_rdapparm="--timeout=5 -O" + ban_geoparm="--timeout=5 --quiet -O- --post-data" ;; esac - f_log "debug" "f_getfetch ::: auto: ${ban_autodetect}, cmd: ${ban_fetchcmd:-"-"}, fetch_parm: ${ban_fetchparm:-"-"}, rdap_parm: ${ban_rdapparm:-"-"}, etag_parm: ${ban_etagparm:-"-"}" + f_log "debug" "f_getdl ::: auto/update: ${ban_autodetect}/${update}, cmd: ${ban_fetchcmd:-"-"}" } # get wan interfaces @@ -462,7 +607,7 @@ f_getif() { ban_ifv6="$(f_trim "${ban_ifv6}")" [ -z "${ban_ifv4}" ] && [ -z "${ban_ifv6}" ] && f_log "err" "no wan interfaces" - f_log "debug" "f_getif ::: auto/update: ${ban_autodetect}/${update}, interfaces (4/6): ${ban_ifv4}/${ban_ifv6}, protocols (4/6): ${ban_protov4}/${ban_protov6}" + f_log "debug" "f_getif ::: auto/update: ${ban_autodetect}/${update}, interfaces (4/6): ${ban_ifv4}/${ban_ifv6}, protocols (4/6): ${ban_protov4}/${ban_protov6}" } # get wan devices @@ -497,13 +642,13 @@ f_getdev() { ban_dev="$(f_trim "${ban_dev}")" [ -z "${ban_dev}" ] && f_log "err" "no wan devices" - f_log "debug" "f_getdev ::: auto/update: ${ban_autodetect}/${update}, wan_devices: ${ban_dev}" + f_log "debug" "f_getdev ::: auto/update: ${ban_autodetect}/${update}, wan_devices: ${ban_dev}" } # get local uplink # -f_getuplink() { - local uplink iface ip update="0" +f_getup() { + local uplink iface timestamp ip if [ "${ban_autoallowlist}" = "1" ] && [ "${ban_autoallowuplink}" != "disable" ]; then for iface in ${ban_ifv4} ${ban_ifv6}; do @@ -521,27 +666,29 @@ f_getuplink() { elif [ "${ban_autoallowuplink}" = "ip" ]; then network_get_ipaddr6 uplink "${iface}" fi - if [ -n "${uplink}" ] && ! printf " %s " "${ban_uplink}" | "${ban_grepcmd}" -q " ${uplink} "; then + if [ -n "${uplink%fe80::*}" ] && ! printf " %s " "${ban_uplink}" | "${ban_grepcmd}" -q " ${uplink} "; then ban_uplink="${ban_uplink}${uplink} " fi done + ban_uplink="$(f_trim "${ban_uplink}")" for ip in ${ban_uplink}; do if ! "${ban_grepcmd}" -q "${ip} " "${ban_allowlist}"; then - if [ "${update}" = "0" ]; then - "${ban_sedcmd}" -i "/# uplink added on /d" "${ban_allowlist}" - fi - printf "%-45s%s\n" "${ip}" "# uplink added on $(date "+%Y-%m-%d %H:%M:%S")" >>"${ban_allowlist}" + "${ban_sedcmd}" -i "/# uplink added on /d" "${ban_allowlist}" + break + fi + done + timestamp="$(date "+%Y-%m-%d %H:%M:%S")" + for ip in ${ban_uplink}; do + if ! "${ban_grepcmd}" -q "${ip} " "${ban_allowlist}"; then + printf "%-45s%s\n" "${ip}" "# uplink added on ${timestamp}" >>"${ban_allowlist}" f_log "info" "add uplink '${ip}' to local allowlist" - update="1" fi done - ban_uplink="$(f_trim "${ban_uplink}")" elif [ "${ban_autoallowlist}" = "1" ] && [ "${ban_autoallowuplink}" = "disable" ]; then "${ban_sedcmd}" -i "/# uplink added on /d" "${ban_allowlist}" - update="1" fi - f_log "debug" "f_getuplink ::: auto/update: ${ban_autoallowlist}/${update}, uplink: ${ban_uplink:-"-"}" + f_log "debug" "f_getup ::: auto-allow/auto-uplink: ${ban_autoallowlist}/${ban_autoallowuplink}, uplink: ${ban_uplink:-"-"}" } # get feed information @@ -567,145 +714,240 @@ f_getfeed() { f_getelements() { local file="${1}" - [ -s "${file}" ] && printf "%s" "elements={ $("${ban_catcmd}" "${file}" 2>/dev/null) };" + [ -s "${file}" ] && printf "%s" "elements={ $("${ban_catcmd}" "${file}" 2>>"${ban_errorlog}") };" } # handle etag http header # f_etag() { - local http_head http_code etag_id etag_rc out_rc="4" feed="${1}" feed_url="${2}" feed_suffix="${3}" + local http_head http_code etag_id etag_cnt out_rc="4" feed="${1}" feed_url="${2}" feed_suffix="${3}" feed_cnt="${4:-"1"}" if [ -n "${ban_etagparm}" ]; then [ ! -f "${ban_backupdir}/banIP.etag" ] && : >"${ban_backupdir}/banIP.etag" http_head="$("${ban_fetchcmd}" ${ban_etagparm} "${feed_url}" 2>&1)" http_code="$(printf "%s" "${http_head}" | "${ban_awkcmd}" 'tolower($0)~/^http\/[0123\.]+ /{printf "%s",$2}')" etag_id="$(printf "%s" "${http_head}" | "${ban_awkcmd}" 'tolower($0)~/^[[:space:]]*etag: /{gsub("\"","");printf "%s",$2}')" - etag_rc="${?}" - - if [ "${http_code}" = "404" ] || { [ "${etag_rc}" = "0" ] && [ -n "${etag_id}" ] && "${ban_grepcmd}" -q "^${feed}${feed_suffix}[[:space:]]\+${etag_id}\$" "${ban_backupdir}/banIP.etag"; }; then + if [ -z "${etag_id}" ]; then + etag_id="$(printf "%s" "${http_head}" | "${ban_awkcmd}" 'tolower($0)~/^[[:space:]]*last-modified: /{gsub(/[Ll]ast-[Mm]odified:|[[:space:]]|,|:/,"");printf "%s\n",$1}')" + fi + etag_cnt="$("${ban_grepcmd}" -c "^${feed} " "${ban_backupdir}/banIP.etag")" + if [ "${http_code}" = "200" ] && [ "${etag_cnt}" = "${feed_cnt}" ] && [ -n "${etag_id}" ] && + "${ban_grepcmd}" -q "^${feed} ${feed_suffix}[[:space:]]\+${etag_id}\$" "${ban_backupdir}/banIP.etag"; then out_rc="0" - elif [ "${etag_rc}" = "0" ] && [ -n "${etag_id}" ] && ! "${ban_grepcmd}" -q "^${feed}${feed_suffix}[[:space:]]\+${etag_id}\$" "${ban_backupdir}/banIP.etag"; then - "${ban_sedcmd}" -i "/^${feed}${feed_suffix}/d" "${ban_backupdir}/banIP.etag" - printf "%-20s%s\n" "${feed}${feed_suffix}" "${etag_id}" >>"${ban_backupdir}/banIP.etag" + elif [ -n "${etag_id}" ]; then + if [ "${feed_cnt}" -lt "${etag_cnt}" ]; then + "${ban_sedcmd}" -i "/^${feed} /d" "${ban_backupdir}/banIP.etag" + else + "${ban_sedcmd}" -i "/^${feed} ${feed_suffix//\//\\/}/d" "${ban_backupdir}/banIP.etag" + fi + printf "%-50s%s\n" "${feed} ${feed_suffix}" "${etag_id}" >>"${ban_backupdir}/banIP.etag" out_rc="2" fi fi - f_log "debug" "f_etag ::: feed: ${feed}, suffix: ${feed_suffix:-"-"}, http_code: ${http_code:-"-"}, etag_id: ${etag_id:-"-"} , etag_rc: ${etag_rc:-"-"}, rc: ${out_rc}" + f_log "debug" "f_etag ::: feed: ${feed}, suffix: ${feed_suffix:-"-"}, http_code: ${http_code:-"-"}, feed/etag: ${feed_cnt}/${etag_cnt:-"0"}, rc: ${out_rc}" return "${out_rc}" } +# load file in nftset +# +f_nftload() { + local cnt="1" max_cnt="${ban_nftretry:-"3"}" load_rc="4" file="${1}" errmsg="${2}" + + while [ "${load_rc}" != "0" ]; do + "${ban_nftcmd}" -f "${file}" >/dev/null 2>&1 + load_rc="${?}" + if [ "${load_rc}" = "0" ]; then + break + elif [ "${cnt}" = "${max_cnt}" ]; then + [ ! -d "${ban_errordir}" ] && f_mkdir "${ban_errordir}" + "${ban_catcmd}" "${file}" 2>>"${ban_errorlog}" >"${ban_errordir}/err.${file##*/}" + f_log "info" "${errmsg}" + break + fi + cnt="$((cnt + 1))" + done + + f_log "debug" "f_nftload ::: file: ${file##*/}, load_rc: ${load_rc}, cnt/max_cnt: ${cnt}/${max_cnt}" + return "${load_rc}" +} + # build initial nft file with base table, chains and rules # f_nftinit() { - local wan_dev vlan_allow vlan_block log_ct log_icmp log_syn log_udp log_tcp feed_log feed_rc flag tmp_proto tmp_port allow_dport file="${1}" + local wan_dev vlan_allow vlan_block log_ct log_icmp log_syn log_udp log_tcp flag tmp_proto tmp_port allow_dport feed_rc="0" file="${1}" wan_dev="$(printf "%s" "${ban_dev}" | "${ban_sedcmd}" 's/^/\"/;s/$/\"/;s/ /\", \"/g')" [ -n "${ban_vlanallow}" ] && vlan_allow="$(printf "%s" "${ban_vlanallow%%?}" | "${ban_sedcmd}" 's/^/\"/;s/$/\"/;s/ /\", \"/g')" [ -n "${ban_vlanblock}" ] && vlan_block="$(printf "%s" "${ban_vlanblock%%?}" | "${ban_sedcmd}" 's/^/\"/;s/$/\"/;s/ /\", \"/g')" for flag in ${ban_allowflag}; do - if [ "${flag}" = "tcp" ] || [ "${flag}" = "udp" ]; then - if [ -z "${tmp_proto}" ]; then - tmp_proto="${flag}" - elif ! printf "%s" "${tmp_proto}" | "${ban_grepcmd}" -qw "${flag}"; then - tmp_proto="${tmp_proto}, ${flag}" - fi - elif [ -n "${flag//[![:digit:]-]/}" ]; then - if [ -z "${tmp_port}" ]; then - tmp_port="${flag}" - elif ! printf "%s" "${tmp_port}" | "${ban_grepcmd}" -qw "${flag}"; then - tmp_port="${tmp_port}, ${flag}" - fi - fi + case "${flag}" in + "tcp" | "udp") + if [ -z "${tmp_proto}" ]; then + tmp_proto="${flag}" + elif ! printf "%s" "${tmp_proto}" | "${ban_grepcmd}" -qw "${flag}"; then + tmp_proto="${tmp_proto}, ${flag}" + fi + ;; + "${flag//[![:digit:]-]/}") + if [ -z "${tmp_port}" ]; then + tmp_port="${flag}" + elif ! printf "%s" "${tmp_port}" | "${ban_grepcmd}" -qw "${flag}"; then + tmp_port="${tmp_port}, ${flag}" + fi + ;; + esac done if [ -n "${tmp_proto}" ] && [ -n "${tmp_port}" ]; then allow_dport="meta l4proto { ${tmp_proto} } th dport { ${tmp_port} }" fi if [ "${ban_logprerouting}" = "1" ]; then - log_icmp="log level ${ban_nftloglevel} prefix \"banIP/pre-icmp/drop: \"" - log_syn="log level ${ban_nftloglevel} prefix \"banIP/pre-syn/drop: \"" - log_udp="log level ${ban_nftloglevel} prefix \"banIP/pre-udp/drop: \"" - log_tcp="log level ${ban_nftloglevel} prefix \"banIP/pre-tcp/drop: \"" - log_ct="log level ${ban_nftloglevel} prefix \"banIP/pre-ct/drop: \"" + log_icmp="log level ${ban_nftloglevel} prefix \"banIP/pre-icmp/drop: \" limit rate 10/second" + log_syn="log level ${ban_nftloglevel} prefix \"banIP/pre-syn/drop: \" limit rate 10/second" + log_udp="log level ${ban_nftloglevel} prefix \"banIP/pre-udp/drop: \" limit rate 10/second" + log_tcp="log level ${ban_nftloglevel} prefix \"banIP/pre-tcp/drop: \" limit rate 10/second" + log_ct="log level ${ban_nftloglevel} prefix \"banIP/pre-ct/drop: \" limit rate 10/second" fi { - # nft header (tables and chains) + # nft header (tables, base and regular chains) # printf "%s\n\n" "#!${ban_nftcmd} -f" - if "${ban_nftcmd}" -t list set inet banIP allowlistv4MAC >/dev/null 2>&1; then + if "${ban_nftcmd}" -t list table inet banIP >/dev/null 2>&1; then printf "%s\n" "delete table inet banIP" fi printf "%s\n" "add table inet banIP" - printf "%s\n" "add counter inet banIP cnt-icmpflood" - printf "%s\n" "add counter inet banIP cnt-udpflood" - printf "%s\n" "add counter inet banIP cnt-synflood" - printf "%s\n" "add counter inet banIP cnt-tcpinvalid" - printf "%s\n" "add counter inet banIP cnt-ctinvalid" - printf "%s\n" "add chain inet banIP pre-routing { type filter hook prerouting priority -150; policy accept; }" + + # base chains + # + printf "%s\n" "add chain inet banIP pre-routing { type filter hook prerouting priority -175; policy accept; }" printf "%s\n" "add chain inet banIP wan-input { type filter hook input priority ${ban_nftpriority}; policy accept; }" printf "%s\n" "add chain inet banIP wan-forward { type filter hook forward priority ${ban_nftpriority}; policy accept; }" printf "%s\n" "add chain inet banIP lan-forward { type filter hook forward priority ${ban_nftpriority}; policy accept; }" - printf "%s\n" "add chain inet banIP reject-chain" + + # regular chains + # + printf "%s\n" "add chain inet banIP _inbound" + printf "%s\n" "add chain inet banIP _outbound" + printf "%s\n" "add chain inet banIP _reject" + + # named counter + # + printf "%s\n" "add counter inet banIP cnt_icmpflood" + printf "%s\n" "add counter inet banIP cnt_udpflood" + printf "%s\n" "add counter inet banIP cnt_synflood" + printf "%s\n" "add counter inet banIP cnt_tcpinvalid" + printf "%s\n" "add counter inet banIP cnt_ctinvalid" + printf "%s\n" "add counter inet banIP cnt_bcp38" # default reject chain rules # - printf "%s\n" "add rule inet banIP reject-chain meta l4proto tcp reject with tcp reset" - printf "%s\n" "add rule inet banIP reject-chain reject" + printf "%s\n" "add rule inet banIP _reject iifname != { ${wan_dev} } meta l4proto tcp reject with tcp reset" + printf "%s\n" "add rule inet banIP _reject reject with icmpx host-unreachable" # default pre-routing rules # printf "%s\n" "add rule inet banIP pre-routing iifname != { ${wan_dev} } counter accept" - printf "%s\n" "add rule inet banIP pre-routing ct state invalid ${log_ct} counter name cnt-ctinvalid drop" + + # ct state invalid + # + if [ "${ban_logprerouting}" = "1" ]; then + printf "%s\n" "add rule inet banIP pre-routing ct state invalid ${log_ct}" + fi + printf "%s\n" "add rule inet banIP pre-routing ct state invalid counter name cnt_ctinvalid drop" + + # ICMP Flood + # if [ "${ban_icmplimit}" -gt "0" ]; then - printf "%s\n" "add rule inet banIP pre-routing ip protocol icmp limit rate over ${ban_icmplimit}/second ${log_icmp} counter name cnt-icmpflood drop" - printf "%s\n" "add rule inet banIP pre-routing ip6 nexthdr icmpv6 limit rate over ${ban_icmplimit}/second ${log_icmp} counter name cnt-icmpflood drop" + if [ "${ban_logprerouting}" = "1" ]; then + printf "%s\n" "add rule inet banIP pre-routing meta nfproto . meta l4proto { ipv4 . icmp , ipv6 . icmpv6 } limit rate over ${ban_icmplimit}/second ${log_icmp}" + fi + printf "%s\n" "add rule inet banIP pre-routing meta nfproto . meta l4proto { ipv4 . icmp , ipv6 . icmpv6 } limit rate over ${ban_icmplimit}/second counter name cnt_icmpflood drop" fi - [ "${ban_udplimit}" -gt "0" ] && printf "%s\n" "add rule inet banIP pre-routing meta l4proto udp ct state new limit rate over ${ban_udplimit}/second ${log_udp} counter name cnt-udpflood drop" - [ "${ban_synlimit}" -gt "0" ] && printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|ack) == syn limit rate over ${ban_synlimit}/second ${log_syn} counter name cnt-synflood drop" - printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn) == (fin|syn) ${log_tcp} counter name cnt-tcpinvalid drop" - printf "%s\n" "add rule inet banIP pre-routing tcp flags & (syn|rst) == (syn|rst) ${log_tcp} counter name cnt-tcpinvalid drop" - printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) < (fin) ${log_tcp} counter name cnt-tcpinvalid drop" - printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) == (fin|psh|urg) ${log_tcp} counter name cnt-tcpinvalid drop" + + # UDP Flood + # + if [ "${ban_udplimit}" -gt "0" ]; then + if [ "${ban_logprerouting}" = "1" ]; then + printf "%s\n" "add rule inet banIP pre-routing meta l4proto udp ct state new limit rate over ${ban_udplimit}/second ${log_udp}" + fi + printf "%s\n" "add rule inet banIP pre-routing meta l4proto udp ct state new limit rate over ${ban_udplimit}/second counter name cnt_udpflood drop" + fi + + # SYN Flood + # + if [ "${ban_synlimit}" -gt "0" ]; then + if [ "${ban_logprerouting}" = "1" ]; then + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|ack) == syn limit rate over ${ban_synlimit}/second ${log_syn}" + fi + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|ack) == syn limit rate over ${ban_synlimit}/second counter name cnt_synflood drop" + fi + + # TCP Invalid + # + if [ "${ban_logprerouting}" = "1" ]; then + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn) == (fin|syn) ${log_tcp}" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (syn|rst) == (syn|rst) ${log_tcp}" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) < (fin) ${log_tcp}" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) == (fin|psh|urg) ${log_tcp}" + fi + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn) == (fin|syn) counter name cnt_tcpinvalid drop" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (syn|rst) == (syn|rst) counter name cnt_tcpinvalid drop" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) < (fin) counter name cnt_tcpinvalid drop" + printf "%s\n" "add rule inet banIP pre-routing tcp flags & (fin|syn|rst|psh|ack|urg) == (fin|psh|urg) counter name cnt_tcpinvalid drop" # default wan-input rules # - printf "%s\n" "add rule inet banIP wan-input iifname != { ${wan_dev} } counter accept" printf "%s\n" "add rule inet banIP wan-input ct state established,related counter accept" + printf "%s\n" "add rule inet banIP wan-input iifname != { ${wan_dev} } counter accept" printf "%s\n" "add rule inet banIP wan-input meta nfproto ipv4 udp sport 67-68 udp dport 67-68 counter accept" printf "%s\n" "add rule inet banIP wan-input meta nfproto ipv6 udp sport 547 udp dport 546 counter accept" - printf "%s\n" "add rule inet banIP wan-input meta nfproto ipv6 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert} ip6 hoplimit 1 counter accept" - printf "%s\n" "add rule inet banIP wan-input meta nfproto ipv6 icmpv6 type { nd-neighbor-advert, nd-neighbor-solicit, nd-router-advert} ip6 hoplimit 255 counter accept" + printf "%s\n" "add rule inet banIP wan-input meta nfproto ipv6 icmpv6 type { nd-neighbor-solicit, nd-neighbor-advert, nd-router-advert } ip6 hoplimit 255 counter accept" [ -n "${allow_dport}" ] && printf "%s\n" "add rule inet banIP wan-input ${allow_dport} counter accept" + if [ "${ban_bcp38}" = "1" ]; then + printf "%s\n" "add rule inet banIP wan-input fib saddr . iif oif missing counter name cnt_bcp38 drop" + fi + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP wan-input meta mark set 1 counter jump _inbound" + else + printf "%s\n" "add rule inet banIP wan-input counter jump _inbound" + fi # default wan-forward rules # - printf "%s\n" "add rule inet banIP wan-forward iifname != { ${wan_dev} } counter accept" printf "%s\n" "add rule inet banIP wan-forward ct state established,related counter accept" + printf "%s\n" "add rule inet banIP wan-forward iifname != { ${wan_dev} } counter accept" [ -n "${allow_dport}" ] && printf "%s\n" "add rule inet banIP wan-forward ${allow_dport} counter accept" + if [ "${ban_bcp38}" = "1" ]; then + printf "%s\n" "add rule inet banIP wan-forward fib saddr . iif oif missing counter name cnt_bcp38 drop" + fi + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP wan-forward meta mark set 2 counter jump _inbound" + else + printf "%s\n" "add rule inet banIP wan-forward counter jump _inbound" + fi # default lan-forward rules # - printf "%s\n" "add rule inet banIP lan-forward oifname != { ${wan_dev} } counter accept" printf "%s\n" "add rule inet banIP lan-forward ct state established,related counter accept" + printf "%s\n" "add rule inet banIP lan-forward oifname != { ${wan_dev} } counter accept" [ -n "${vlan_allow}" ] && printf "%s\n" "add rule inet banIP lan-forward iifname { ${vlan_allow} } counter accept" - [ -n "${vlan_block}" ] && printf "%s\n" "add rule inet banIP lan-forward iifname { ${vlan_block} } counter goto reject-chain" + [ -n "${vlan_block}" ] && printf "%s\n" "add rule inet banIP lan-forward iifname { ${vlan_block} } counter goto _reject" + if [ "${ban_bcp38}" = "1" ]; then + printf "%s\n" "add rule inet banIP lan-forward fib saddr . iif oif missing counter name cnt_bcp38 drop" + fi + printf "%s\n" "add rule inet banIP lan-forward counter jump _outbound" } >"${file}" - # load initial banIP table within nft (atomic load) + # load initial banIP table/rules to nftset # - feed_log="$("${ban_nftcmd}" -f "${file}" 2>&1)" + f_nftload "${file}" "can't initialize banIP nftables namespace" feed_rc="${?}" + [ "${feed_rc}" = "0" ] && f_log "info" "initialize banIP nftables namespace" - if [ "${feed_rc}" = "0" ]; then - f_log "info" "initialize banIP nftables namespace" - else - f_log "err" "can't initialize banIP nftables namespace (rc: ${feed_rc}, log: ${feed_log})" - fi - - f_log "debug" "f_nftinit ::: wan_dev: ${wan_dev}, vlan_allow: ${vlan_allow:-"-"}, vlan_block: ${vlan_block:-"-"}, allowed_dports: ${allow_dport:-"-"}, priority: ${ban_nftpriority}, policy: ${ban_nftpolicy}, icmp_limit: ${ban_icmplimit}, syn_limit: ${ban_synlimit}, udp_limit: ${ban_udplimit}, loglevel: ${ban_nftloglevel}, rc: ${feed_rc:-"-"}, log: ${feed_log:-"-"}" + f_log "debug" "f_nftinit ::: wan_dev: ${wan_dev}, vlan_allow: ${vlan_allow:-"-"}, vlan_block: ${vlan_block:-"-"}, allowed_dports: ${allow_dport:-"-"}, priority: ${ban_nftpriority}, policy: ${ban_nftpolicy}, icmp_limit: ${ban_icmplimit}, syn_limit: ${ban_synlimit}, udp_limit: ${ban_udplimit}, loglevel: ${ban_nftloglevel}, rc: ${feed_rc:-"-"}" : >"${file}" return "${feed_rc}" } @@ -713,150 +955,192 @@ f_nftinit() { # handle downloads # f_down() { - local log_input log_forwardwan log_forwardlan start_ts end_ts tmp_raw tmp_load tmp_file split_file ruleset_raw handle rc etag_rc - local expr cnt_set cnt_dl restore_rc feed_direction feed_rc feed_log feed_comp feed_target feed_dport tmp_proto tmp_port flag - local feed="${1}" proto="${2}" feed_url="${3}" feed_rule="${4}" feed_flag="${5}" + local log_inbound log_outbound start_ts end_ts tmp_raw tmp_load tmp_file split_file table_json handles handle etag_rc etag_cnt element_count + local expr cnt_set cnt_dl restore_rc feed_direction feed_policy feed_rc feed_comp feed_complete feed_target feed_dport chain flag + local tmp_proto tmp_port asn country feed="${1}" feed_ipv="${2}" feed_url="${3}" feed_rule="${4}" feed_chain="${5}" feed_flag="${6}" start_ts="$(date +%s)" - feed="${feed}v${proto}" + feed="${feed}.v${feed_ipv}" tmp_load="${ban_tmpfile}.${feed}.load" tmp_raw="${ban_tmpfile}.${feed}.raw" tmp_split="${ban_tmpfile}.${feed}.split" tmp_file="${ban_tmpfile}.${feed}.file" tmp_flush="${ban_tmpfile}.${feed}.flush" tmp_nft="${ban_tmpfile}.${feed}.nft" - tmp_allow="${ban_tmpfile}.${feed%v*}" + tmp_allow="${ban_tmpfile}.${feed%.*}" - [ "${ban_loginput}" = "1" ] && log_input="log level ${ban_nftloglevel} prefix \"banIP/inp-wan/${ban_blocktype}/${feed}: \"" - [ "${ban_logforwardwan}" = "1" ] && log_forwardwan="log level ${ban_nftloglevel} prefix \"banIP/fwd-wan/${ban_blocktype}/${feed}: \"" - [ "${ban_logforwardlan}" = "1" ] && log_forwardlan="log level ${ban_nftloglevel} prefix \"banIP/fwd-lan/reject/${feed}: \"" + # set log target + # + [ "${ban_loginbound}" = "1" ] && log_inbound="log level ${ban_nftloglevel} prefix \"banIP/inbound/${ban_blockpolicy}/${feed}: \" limit rate 10/second" + [ "${ban_logoutbound}" = "1" ] && log_outbound="log level ${ban_nftloglevel} prefix \"banIP/outbound/reject/${feed}: \" limit rate 10/second" # set feed target # - if [ "${ban_blocktype}" = "reject" ]; then - feed_target="goto reject-chain" + if [ "${ban_blockpolicy}" = "reject" ]; then + feed_target="goto _reject" else feed_target="drop" fi - # set feed block direction + # set element counter flag # - if [ "${ban_blockpolicy}" = "input" ]; then - if ! printf "%s" "${ban_blockinput}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardwan}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardlan}" | "${ban_grepcmd}" -q "${feed%v*}"; then - ban_blockinput="${ban_blockinput} ${feed%v*}" - fi - elif [ "${ban_blockpolicy}" = "forwardwan" ]; then - if ! printf "%s" "${ban_blockinput}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardwan}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardlan}" | "${ban_grepcmd}" -q "${feed%v*}"; then - ban_blockforwardwan="${ban_blockforwardwan} ${feed%v*}" - fi - elif [ "${ban_blockpolicy}" = "forwardlan" ]; then - if ! printf "%s" "${ban_blockinput}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardwan}" | "${ban_grepcmd}" -q "${feed%v*}" && - ! printf "%s" "${ban_blockforwardlan}" | "${ban_grepcmd}" -q "${feed%v*}"; then - ban_blockforwardlan="${ban_blockforwardlan} ${feed%v*}" - fi + if [ "${ban_nftcount}" = "1" ]; then + element_count="counter" fi - if printf "%s" "${ban_blockinput}" | "${ban_grepcmd}" -q "${feed%v*}"; then - feed_direction="input" - fi - if printf "%s" "${ban_blockforwardwan}" | "${ban_grepcmd}" -q "${feed%v*}"; then - feed_direction="${feed_direction} forwardwan" + + # set feed complete flag + # + if printf "%s" "${ban_feedcomplete}" | "${ban_grepcmd}" -q "${feed%%.*}"; then + feed_complete="true" fi - if printf "%s" "${ban_blockforwardlan}" | "${ban_grepcmd}" -q "${feed%v*}"; then - feed_direction="${feed_direction} forwardlan" + + # set feed direction + # + if printf "%s" "${ban_feedin}" | "${ban_grepcmd}" -q "${feed%%.*}"; then + feed_policy="in" + feed_direction="inbound" + elif printf "%s" "${ban_feedout}" | "${ban_grepcmd}" -q "${feed%%.*}"; then + feed_policy="out" + feed_direction="outbound" + elif printf "%s" "${ban_feedinout}" | "${ban_grepcmd}" -q "${feed%%.*}"; then + feed_policy="inout" + feed_direction="inbound outbound" + else + feed_policy="${feed_chain}" + case "${feed_chain}" in + "in") + feed_direction="inbound" + ;; + "out") + feed_direction="outbound" + ;; + "inout") + feed_direction="inbound outbound" + ;; + *) + feed_direction="inbound" + ;; + esac fi # prepare feed flags # for flag in ${feed_flag}; do - if [ "${flag}" = "gz" ]; then - feed_comp="${flag}" - elif [ "${flag}" = "tcp" ] || [ "${flag}" = "udp" ]; then - if [ -z "${tmp_proto}" ]; then - tmp_proto="${flag}" - elif ! printf "%s" "${tmp_proto}" | "${ban_grepcmd}" -qw "${flag}"; then - tmp_proto="${tmp_proto}, ${flag}" - fi - elif [ -n "${flag//[![:digit:]-]/}" ]; then - if [ -z "${tmp_port}" ]; then - tmp_port="${flag}" - elif ! printf "%s" "${tmp_port}" | "${ban_grepcmd}" -qw "${flag}"; then - tmp_port="${tmp_port}, ${flag}" - fi - fi + case "${flag}" in + "gz") + feed_comp="${flag}" + ;; + "tcp" | "udp") + if [ -z "${tmp_proto}" ]; then + tmp_proto="${flag}" + elif ! printf "%s" "${tmp_proto}" | "${ban_grepcmd}" -qw "${flag}"; then + tmp_proto="${tmp_proto}, ${flag}" + fi + ;; + "${flag//[![:digit:]-]/}") + if [ -z "${tmp_port}" ]; then + tmp_port="${flag}" + elif ! printf "%s" "${tmp_port}" | "${ban_grepcmd}" -qw "${flag}"; then + tmp_port="${tmp_port}, ${flag}" + fi + ;; + esac done - if [ -n "${tmp_proto}" ] && [ -n "${tmp_port}" ]; then - feed_dport="meta l4proto { ${tmp_proto} } th dport { ${tmp_port} }" + + if ! printf "%s" "${ban_feedreset}" | "${ban_grepcmd}" -q "${feed%%.*}"; then + if [ -n "${tmp_proto}" ] && [ -n "${tmp_port}" ]; then + feed_dport="meta l4proto { ${tmp_proto} } th dport { ${tmp_port} }" + fi fi # chain/rule maintenance # if [ "${ban_action}" = "reload" ] && "${ban_nftcmd}" -t list set inet banIP "${feed}" >/dev/null 2>&1; then - ruleset_raw="$("${ban_nftcmd}" -tj list ruleset 2>/dev/null)" + table_json="$("${ban_nftcmd}" -tja list table inet banIP 2>>"${ban_errorlog}")" { - printf "%s\n" "flush set inet banIP ${feed}" - for expr in 0 1; do - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-input\"][@.expr[${expr}].match.right=\"@${feed}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP wan-input handle ${handle}" - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-forward\"][@.expr[${expr}].match.right=\"@${feed}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP wan-forward handle ${handle}" - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"lan-forward\"][@.expr[${expr}].match.right=\"@${feed}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP lan-forward handle ${handle}" + for chain in _inbound _outbound; do + for expr in 0 1 2; do + handles="$(printf "%s\n" "${table_json}" | "${ban_jsoncmd}" -q -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[${expr}].match.right=\"@${feed}\"].handle" | "${ban_xargscmd}")" + for handle in ${handles}; do + printf "%s\n" "delete rule inet banIP ${chain} handle ${handle}" + done + done done + printf "%s\n" "flush set inet banIP ${feed}" + printf "%s\n\n" "delete set inet banIP ${feed}" } >"${tmp_flush}" fi # restore local backups # - if [ "${feed%v*}" != "blocklist" ]; then - if [ -n "${ban_etagparm}" ] && [ "${ban_action}" = "reload" ] && [ "${feed_url}" != "local" ] && [ "${feed%v*}" != "allowlist" ]; then + if [ "${feed%%.*}" != "blocklist" ]; then + if [ -n "${ban_etagparm}" ] && [ "${ban_action}" = "reload" ] && [ "${feed_url}" != "local" ] && [ "${feed%%.*}" != "allowlist" ]; then etag_rc="0" - if [ "${feed%v*}" = "country" ]; then - for country in ${ban_country}; do - f_etag "${feed}" "${feed_url}${country}-aggregated.zone" ".${country}" - rc="${?}" - [ "${rc}" = "4" ] && break - etag_rc="$((etag_rc + rc))" - done - elif [ "${feed%v*}" = "asn" ]; then - for asn in ${ban_asn}; do - f_etag "${feed}" "${feed_url}AS${asn}" ".${asn}" - rc="${?}" - [ "${rc}" = "4" ] && break - etag_rc="$((etag_rc + rc))" - done - else - f_etag "${feed}" "${feed_url}" - etag_rc="${?}" - fi + case "${feed%%.*}" in + "country") + if [ "${ban_countrysplit}" = "1" ]; then + country="${feed%.*}" + country="${country#*.}" + f_etag "${feed}" "${feed_url}${country}-aggregated.zone" ".${country}" + etag_rc="${?}" + else + etag_rc="0" + etag_cnt="$(printf "%s" "${ban_country}" | "${ban_wccmd}" -w)" + for country in ${ban_country}; do + if ! f_etag "${feed}" "${feed_url}${country}-aggregated.zone" ".${country}" "${etag_cnt}"; then + etag_rc="$((etag_rc + 1))" + fi + done + fi + ;; + "asn") + if [ "${ban_asnsplit}" = "1" ]; then + asn="${feed%.*}" + asn="${asn#*.}" + f_etag "${feed}" "${feed_url}AS${asn}" ".${asn}" + etag_rc="${?}" + else + etag_rc="0" + etag_cnt="$(printf "%s" "${ban_asn}" | "${ban_wccmd}" -w)" + for asn in ${ban_asn}; do + if ! f_etag "${feed}" "${feed_url}AS${asn}" ".${asn}" "${etag_cnt}"; then + etag_rc="$((etag_rc + 1))" + fi + done + fi + ;; + *) + f_etag "${feed}" "${feed_url}" + etag_rc="${?}" + ;; + esac fi if [ "${etag_rc}" = "0" ] || [ "${ban_action}" != "reload" ] || [ "${feed_url}" = "local" ]; then - if [ "${feed%v*}" = "allowlist" ] && [ ! -f "${tmp_allow}" ]; then + if [ "${feed%%.*}" = "allowlist" ] && [ ! -f "${tmp_allow}" ]; then f_restore "allowlist" "-" "${tmp_allow}" "${etag_rc}" + restore_rc="${?}" else f_restore "${feed}" "${feed_url}" "${tmp_load}" "${etag_rc}" + restore_rc="${?}" fi - restore_rc="${?}" feed_rc="${restore_rc}" fi fi # prepare local/remote allowlist # - if [ "${feed%v*}" = "allowlist" ] && [ ! -f "${tmp_allow}" ]; then - "${ban_catcmd}" "${ban_allowlist}" 2>/dev/null >"${tmp_allow}" + if [ "${feed%%.*}" = "allowlist" ] && [ ! -f "${tmp_allow}" ]; then + "${ban_catcmd}" "${ban_allowlist}" 2>>"${ban_errorlog}" >"${tmp_allow}" feed_rc="${?}" for feed_url in ${ban_allowurl}; do - feed_log="$("${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}" 2>&1)" - feed_rc="${?}" - if [ "${feed_rc}" = "0" ] && [ -s "${tmp_load}" ]; then - "${ban_catcmd}" "${tmp_load}" 2>/dev/null >>"${tmp_allow}" + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}" 2>>"${ban_errorlog}"; then + if [ -s "${tmp_load}" ]; then + "${ban_catcmd}" "${tmp_load}" 2>>"${ban_errorlog}" >>"${tmp_allow}" + feed_rc="${?}" + fi else - f_log "info" "download for feed '${feed%v*}' failed (rc: ${feed_rc:-"-"}/log: ${feed_log})" + f_log "info" "download for feed '${feed%%.*}' failed" + feed_rc="4" break fi done @@ -871,114 +1155,121 @@ f_down() { # handle local feeds # - if [ "${feed%v*}" = "allowlist" ]; then + if [ "${feed%%.*}" = "allowlist" ]; then { printf "%s\n\n" "#!${ban_nftcmd} -f" [ -s "${tmp_flush}" ] && "${ban_catcmd}" "${tmp_flush}" - if [ "${proto}" = "4MAC" ]; then - "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="0.0.0.0/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${tmp_allow}" >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ether saddr . ip saddr @${feed} counter accept" - elif [ "${proto}" = "6MAC" ]; then - "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="::/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${tmp_allow}" >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ether saddr . ip6 saddr @${feed} counter accept" - elif [ "${proto}" = "4" ]; then - "${ban_awkcmd}" '/^127\./{next}/^(([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?)([[:space:]].*|$)/{printf "%s, ",$1}' "${tmp_allow}" >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - if [ -z "${feed_direction##*input*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP wan-input ip saddr != @${feed} ${log_input} counter ${feed_target}" - else - printf "%s\n" "add rule inet banIP wan-input ip saddr @${feed} counter accept" - fi - fi - if [ -z "${feed_direction##*forwardwan*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP wan-forward ip saddr != @${feed} ${log_forwardwan} counter ${feed_target}" - else - printf "%s\n" "add rule inet banIP wan-forward ip saddr @${feed} counter accept" - fi - fi - if [ -z "${feed_direction##*forwardlan*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP lan-forward ip daddr != @${feed} ${log_forwardlan} counter goto reject-chain" - else - printf "%s\n" "add rule inet banIP lan-forward ip daddr @${feed} counter accept" + case "${feed_ipv}" in + "4MAC") + "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="0.0.0.0/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${tmp_allow}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + [ -z "${feed_direction##*outbound*}" ] && printf "%s\n" "add rule inet banIP _outbound ether saddr . ip saddr @${feed} counter accept" + ;; + "6MAC") + "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="::/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${tmp_allow}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + [ -z "${feed_direction##*outbound*}" ] && printf "%s\n" "add rule inet banIP _outbound ether saddr . ip6 saddr @${feed} counter accept" + ;; + "4") + f_chkip ${feed_ipv} local 1 < "${tmp_allow}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_allowlistonly}" = "1" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ip saddr != @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ip saddr != @${feed} counter ${feed_target}" + else + printf "%s\n" "add rule inet banIP _inbound ip saddr @${feed} counter accept" + fi fi - fi - elif [ "${proto}" = "6" ]; then - "${ban_awkcmd}" '!/^([0-9A-f]{2}:){5}[0-9A-f]{2}.*/{printf "%s\n",$1}' "${tmp_allow}" | - "${ban_awkcmd}" '/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?)([[:space:]].*|$)/{printf "%s, ",tolower($1)}' >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - if [ -z "${feed_direction##*input*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP wan-input ip6 saddr != @${feed} ${log_input} counter ${feed_target}" - else - printf "%s\n" "add rule inet banIP wan-input ip6 saddr @${feed} counter accept" + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_allowlistonly}" = "1" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ip daddr != @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ip daddr != @${feed} counter goto _reject" + else + printf "%s\n" "add rule inet banIP _outbound ip daddr @${feed} counter accept" + fi fi - fi - if [ -z "${feed_direction##*forwardwan*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP wan-forward ip6 saddr != @${feed} ${log_forwardwan} counter ${feed_target}" - else - printf "%s\n" "add rule inet banIP wan-forward ip6 saddr @${feed} counter accept" + ;; + "6") + f_chkip ${feed_ipv} local 1 < "${tmp_allow}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_allowlistonly}" = "1" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ip6 saddr != @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ip6 saddr != @${feed} counter ${feed_target}" + else + printf "%s\n" "add rule inet banIP _inbound ip6 saddr @${feed} counter accept" + fi fi - fi - if [ -z "${feed_direction##*forwardlan*}" ]; then - if [ "${ban_allowlistonly}" = "1" ]; then - printf "%s\n" "add rule inet banIP lan-forward ip6 daddr != @${feed} ${log_forwardlan} counter ${feed_target}" - else - printf "%s\n" "add rule inet banIP lan-forward ip6 daddr @${feed} counter accept" + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_allowlistonly}" = "1" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ip6 daddr != @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ip6 daddr != @${feed} counter ${feed_target}" + else + printf "%s\n" "add rule inet banIP _outbound ip6 daddr @${feed} counter accept" + fi fi - fi - fi + ;; + esac } >"${tmp_nft}" : >"${tmp_flush}" >"${tmp_raw}" >"${tmp_file}" feed_rc="0" - elif [ "${feed%v*}" = "blocklist" ]; then + elif [ "${feed%%.*}" = "blocklist" ]; then { printf "%s\n\n" "#!${ban_nftcmd} -f" [ -s "${tmp_flush}" ] && "${ban_catcmd}" "${tmp_flush}" - if [ "${proto}" = "4MAC" ]; then - "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="0.0.0.0/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${ban_blocklist}" >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ether saddr . ip saddr @${feed} counter goto reject-chain" - elif [ "${proto}" = "6MAC" ]; then - "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="::/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${ban_blocklist}" >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ether saddr . ip6 saddr @${feed} counter goto reject-chain" - elif [ "${proto}" = "4" ]; then - if [ "${ban_deduplicate}" = "1" ]; then - "${ban_awkcmd}" '/^127\./{next}/^(([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?)([[:space:]].*|$)/{printf "%s,\n",$1}' "${ban_blocklist}" >"${tmp_raw}" - "${ban_awkcmd}" 'NR==FNR{member[$0];next}!($0 in member)' "${ban_tmpfile}.deduplicate" "${tmp_raw}" 2>/dev/null >"${tmp_split}" - "${ban_awkcmd}" 'BEGIN{FS="[ ,]"}NR==FNR{member[$1];next}!($1 in member)' "${ban_tmpfile}.deduplicate" "${ban_blocklist}" 2>/dev/null >"${tmp_raw}" - "${ban_catcmd}" "${tmp_raw}" 2>/dev/null >"${ban_blocklist}" - else - "${ban_awkcmd}" '/^127\./{next}/^(([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?)([[:space:]].*|$)/{printf "%s,\n",$1}' "${ban_blocklist}" >"${tmp_split}" - fi - "${ban_awkcmd}" '{ORS=" ";print}' "${tmp_split}" 2>/dev/null >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval, timeout; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*input*}" ] && printf "%s\n" "add rule inet banIP wan-input ip saddr @${feed} ${log_input} counter ${feed_target}" - [ -z "${feed_direction##*forwardwan*}" ] && printf "%s\n" "add rule inet banIP wan-forward ip saddr @${feed} ${log_forwardwan} counter ${feed_target}" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ip daddr @${feed} ${log_forwardlan} counter goto reject-chain" - elif [ "${proto}" = "6" ]; then - if [ "${ban_deduplicate}" = "1" ]; then - "${ban_awkcmd}" '!/^([0-9A-f]{2}:){5}[0-9A-f]{2}.*/{printf "%s\n",$1}' "${ban_blocklist}" | - "${ban_awkcmd}" '/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?)([[:space:]].*|$)/{printf "%s,\n",tolower($1)}' >"${tmp_raw}" - "${ban_awkcmd}" 'NR==FNR{member[$0];next}!($0 in member)' "${ban_tmpfile}.deduplicate" "${tmp_raw}" 2>/dev/null >"${tmp_split}" - "${ban_awkcmd}" 'BEGIN{FS="[ ,]"}NR==FNR{member[$1];next}!($1 in member)' "${ban_tmpfile}.deduplicate" "${ban_blocklist}" 2>/dev/null >"${tmp_raw}" - "${ban_catcmd}" "${tmp_raw}" 2>/dev/null >"${ban_blocklist}" - else - "${ban_awkcmd}" '!/^([0-9A-f]{2}:){5}[0-9A-f]{2}.*/{printf "%s\n",$1}' "${ban_blocklist}" | - "${ban_awkcmd}" '/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?)([[:space:]].*|$)/{printf "%s,\n",tolower($1)}' >"${tmp_split}" - fi - "${ban_awkcmd}" '{ORS=" ";print}' "${tmp_split}" 2>/dev/null >"${tmp_file}" - printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval, timeout; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}") }" - [ -z "${feed_direction##*input*}" ] && printf "%s\n" "add rule inet banIP wan-input ip6 saddr @${feed} ${log_input} counter ${feed_target}" - [ -z "${feed_direction##*forwardwan*}" ] && printf "%s\n" "add rule inet banIP wan-forward ip6 saddr @${feed} ${log_forwardwan} counter ${feed_target}" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ip6 daddr @${feed} ${log_forwardlan} counter goto reject-chain" - fi + case "${feed_ipv}" in + "4MAC") + "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([1-9][0-9]?[0-9]?\.){1}([0-9]{1,3}\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/(1?[0-9]|2?[0-9]|3?[0-2]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="0.0.0.0/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${ban_blocklist}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + [ -z "${feed_direction##*outbound*}" ] && printf "%s\n" "add rule inet banIP _outbound ether saddr . ip saddr @${feed} counter goto _reject" + ;; + "6MAC") + "${ban_awkcmd}" '/^([0-9A-f]{2}:){5}[0-9A-f]{2}(\/([0-9]|[1-3][0-9]|4[0-8]))?([[:space:]]+([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?([[:space:]]+#.*$|[[:space:]]*$)|[[:space:]]+#.*$|$)/{if(!$2||$2~/#/)$2="::/0";if(!seen[$1]++)printf "%s . %s, ",tolower($1),$2}' "${ban_blocklist}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ether_addr . ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + [ -z "${feed_direction##*outbound*}" ] && printf "%s\n" "add rule inet banIP _outbound ether saddr . ip6 saddr @${feed} counter goto _reject" + ;; + "4") + f_chkip ${feed_ipv} local 1 < "${ban_blocklist}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval, timeout; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ip saddr @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ip saddr @${feed} counter ${feed_target}" + fi + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ip daddr @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ip daddr @${feed} counter goto _reject" + fi + ;; + "6") + f_chkip ${feed_ipv} local 1 < "${ban_blocklist}" >"${tmp_file}" + printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval, timeout; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ip6 saddr @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ip6 saddr @${feed} counter ${feed_target}" + fi + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ip6 daddr @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ip6 daddr @${feed} counter goto _reject" + fi + ;; + esac } >"${tmp_nft}" : >"${tmp_flush}" >"${tmp_raw}" >"${tmp_file}" feed_rc="0" @@ -986,42 +1277,81 @@ f_down() { # handle external feeds # elif [ "${restore_rc}" != "0" ] && [ "${feed_url}" != "local" ]; then + # handle country downloads # - if [ "${feed%v*}" = "country" ]; then - for country in ${ban_country}; do - feed_log="$("${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}${country}-aggregated.zone" 2>&1)" - feed_rc="${?}" - [ "${feed_rc}" = "0" ] && "${ban_catcmd}" "${tmp_raw}" 2>/dev/null >>"${tmp_load}" - done - : >"${tmp_raw}" + if [ "${feed%%.*}" = "country" ]; then + if [ "${ban_countrysplit}" = "0" ]; then + for country in ${ban_country}; do + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}${country}-aggregated.zone" 2>>"${ban_errorlog}"; then + if [ -s "${tmp_raw}" ]; then + "${ban_catcmd}" "${tmp_raw}" 2>>"${ban_errorlog}" >>"${tmp_load}" + feed_rc="${?}" + fi + else + f_log "info" "download for feed '${feed}/${country}' failed" + fi + done + : >"${tmp_raw}" + else + country="${feed%.*}" + country="${country#*.}" + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}${country}-aggregated.zone" 2>>"${ban_errorlog}"; then + feed_rc="${?}" + else + feed_rc="4" + fi + fi # handle asn downloads # - elif [ "${feed%v*}" = "asn" ]; then - for asn in ${ban_asn}; do - feed_log="$("${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}AS${asn}" 2>&1)" - feed_rc="${?}" - [ "${feed_rc}" = "0" ] && "${ban_catcmd}" "${tmp_raw}" 2>/dev/null >>"${tmp_load}" - done - : >"${tmp_raw}" + elif [ "${feed%%.*}" = "asn" ]; then + if [ "${ban_asnsplit}" = "0" ]; then + for asn in ${ban_asn}; do + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}AS${asn}" 2>>"${ban_errorlog}"; then + if [ -s "${tmp_raw}" ]; then + "${ban_catcmd}" "${tmp_raw}" 2>>"${ban_errorlog}" >>"${tmp_load}" + feed_rc="${?}" + fi + else + f_log "info" "download for feed '${feed}/${asn}' failed" + fi + done + : >"${tmp_raw}" + else + asn="${feed%.*}" + asn="${asn#*.}" + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}AS${asn}" 2>>"${ban_errorlog}"; then + feed_rc="${?}" + else + feed_rc="4" + fi + fi # handle compressed downloads # elif [ "${feed_comp}" = "gz" ]; then - feed_log="$("${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}" 2>&1)" - feed_rc="${?}" - [ "${feed_rc}" = "0" ] && "${ban_zcatcmd}" "${tmp_raw}" 2>/dev/null >"${tmp_load}" + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_raw}" "${feed_url}" 2>>"${ban_errorlog}"; then + if [ -s "${tmp_raw}" ]; then + "${ban_zcatcmd}" "${tmp_raw}" 2>>"${ban_errorlog}" >"${tmp_load}" + feed_rc="${?}" + fi + else + feed_rc="4" + fi : >"${tmp_raw}" # handle normal downloads # else - feed_log="$("${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}" 2>&1)" - feed_rc="${?}" + if "${ban_fetchcmd}" ${ban_fetchparm} "${tmp_load}" "${feed_url}" 2>>"${ban_errorlog}"; then + feed_rc="${?}" + else + feed_rc="4" + fi fi fi - [ "${feed_rc}" != "0" ] && f_log "info" "download for feed '${feed}' failed (rc: ${feed_rc:-"-"}/log: ${feed_log})" + [ "${feed_rc}" != "0" ] && f_log "info" "download for feed '${feed}' failed, rc: ${feed_rc:-"-"}" # backup/restore # @@ -1036,14 +1366,15 @@ f_down() { # final file & Set preparation for regular downloads # if [ "${feed_rc}" = "0" ] && [ ! -s "${tmp_nft}" ]; then + # deduplicate Sets # - if [ "${ban_deduplicate}" = "1" ] && [ "${feed_url}" != "local" ]; then - "${ban_awkcmd}" '{sub("\r$", "");print}' "${tmp_load}" 2>/dev/null | "${ban_awkcmd}" "${feed_rule}" 2>/dev/null >"${tmp_raw}" - "${ban_awkcmd}" 'NR==FNR{member[$0];next}!($0 in member)' "${ban_tmpfile}.deduplicate" "${tmp_raw}" 2>/dev/null | tee -a "${ban_tmpfile}.deduplicate" >"${tmp_split}" + if [ "${ban_deduplicate}" = "1" ] && [ "${feed_url}" != "local" ] && [ -z "${feed_complete}" ]; then + f_chkip ${feed_ipv} ${feed_rule} < "${tmp_load}" >"${tmp_raw}" + "${ban_awkcmd}" 'NR==FNR{member[$0];next}!($0 in member)' "${ban_tmpfile}.deduplicate" "${tmp_raw}" 2>>"${ban_errorlog}" | tee -a "${ban_tmpfile}.deduplicate" >"${tmp_split}" feed_rc="${?}" else - "${ban_awkcmd}" '{sub("\r$", "");print}' "${tmp_load}" 2>/dev/null | "${ban_awkcmd}" "${feed_rule}" 2>/dev/null >"${tmp_split}" + f_chkip ${feed_ipv} ${feed_rule} < "${tmp_load}" >"${tmp_split}" feed_rc="${?}" fi : >"${tmp_raw}" >"${tmp_load}" @@ -1052,13 +1383,13 @@ f_down() { # if [ "${feed_rc}" = "0" ]; then if [ -n "${ban_splitsize//[![:digit:]]/}" ] && [ "${ban_splitsize//[![:digit:]]/}" -ge "512" ]; then - if ! "${ban_awkcmd}" "NR%${ban_splitsize//[![:digit:]]/}==1{file=\"${tmp_file}.\"++i;}{ORS=\" \";print > file}" "${tmp_split}" 2>/dev/null; then + if ! "${ban_awkcmd}" "NR%${ban_splitsize//[![:digit:]]/}==1{file=\"${tmp_file}.\"++i;}{ORS=\" \";print > file}" "${tmp_split}" 2>>"${ban_errorlog}"; then feed_rc="${?}" rm -f "${tmp_file}".* - f_log "info" "can't split Set '${feed}' to size '${ban_splitsize//[![:digit:]]/}'" + f_log "info" "can't split nfset '${feed}' to size '${ban_splitsize//[![:digit:]]/}'" fi else - "${ban_awkcmd}" '{ORS=" ";print}' "${tmp_split}" 2>/dev/null >"${tmp_file}.1" + "${ban_awkcmd}" '{ORS=" ";print}' "${tmp_split}" 2>>"${ban_errorlog}" >"${tmp_file}.1" feed_rc="${?}" fi fi @@ -1066,27 +1397,45 @@ f_down() { # build nft file # if [ "${feed_rc}" = "0" ] && [ -s "${tmp_file}.1" ]; then - if [ "${proto}" = "4" ]; then + if [ "${feed_ipv}" = "4" ]; then { - # nft header (IPv4 Set) input and forward rules + # nft header (IPv4 Set) incl. inbound and outbound rules # printf "%s\n\n" "#!${ban_nftcmd} -f" [ -s "${tmp_flush}" ] && "${ban_catcmd}" "${tmp_flush}" - printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}.1") }" - [ -z "${feed_direction##*input*}" ] && printf "%s\n" "add rule inet banIP wan-input ${feed_dport} ip saddr @${feed} ${log_input} counter ${feed_target}" - [ -z "${feed_direction##*forwardwan*}" ] && printf "%s\n" "add rule inet banIP wan-forward ${feed_dport} ip saddr @${feed} ${log_forwardwan} counter ${feed_target}" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ${feed_dport} ip daddr @${feed} ${log_forwardlan} counter goto reject-chain" + printf "%s\n" "add set inet banIP ${feed} { type ipv4_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}.1") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ${feed_dport} ip saddr @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ${feed_dport} ip saddr @${feed} counter ${feed_target}" + fi + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ${feed_dport} ip daddr @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ${feed_dport} ip daddr @${feed} counter goto _reject" + fi } >"${tmp_nft}" - elif [ "${proto}" = "6" ]; then + elif [ "${feed_ipv}" = "6" ]; then { - # nft header (IPv6 Set) plus input and forward rules + # nft header (IPv6 Set) incl. inbound and outbound rules # printf "%s\n\n" "#!${ban_nftcmd} -f" [ -s "${tmp_flush}" ] && "${ban_catcmd}" "${tmp_flush}" - printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; $(f_getelements "${tmp_file}.1") }" - [ -z "${feed_direction##*input*}" ] && printf "%s\n" "add rule inet banIP wan-input ${feed_dport} ip6 saddr @${feed} ${log_input} counter ${feed_target}" - [ -z "${feed_direction##*forwardwan*}" ] && printf "%s\n" "add rule inet banIP wan-forward ${feed_dport} ip6 saddr @${feed} ${log_forwardwan} counter ${feed_target}" - [ -z "${feed_direction##*forwardlan*}" ] && printf "%s\n" "add rule inet banIP lan-forward ${feed_dport} ip6 daddr @${feed} ${log_forwardlan} counter goto reject-chain" + printf "%s\n" "add set inet banIP ${feed} { type ipv6_addr; flags interval; auto-merge; policy ${ban_nftpolicy}; ${element_count}; $(f_getelements "${tmp_file}.1") }" + if [ -z "${feed_direction##*inbound*}" ]; then + if [ "${ban_loginbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _inbound ${feed_dport} ip6 saddr @${feed} ${log_inbound}" + fi + printf "%s\n" "add rule inet banIP _inbound ${feed_dport} ip6 saddr @${feed} counter ${feed_target}" + fi + if [ -z "${feed_direction##*outbound*}" ]; then + if [ "${ban_logoutbound}" = "1" ]; then + printf "%s\n" "add rule inet banIP _outbound ${feed_dport} ip6 daddr @${feed} ${log_outbound}" + fi + printf "%s\n" "add rule inet banIP _outbound ${feed_dport} ip6 daddr @${feed} counter goto _reject" + fi } >"${tmp_nft}" fi fi @@ -1096,14 +1445,19 @@ f_down() { # load generated nft file in banIP table # if [ "${feed_rc}" = "0" ]; then - if [ "${feed%v*}" = "allowlist" ]; then - cnt_dl="$("${ban_awkcmd}" 'END{printf "%d",NR}' "${tmp_allow}" 2>/dev/null)" + if [ "${feed%%.*}" = "allowlist" ]; then + cnt_dl="$("${ban_awkcmd}" 'END{printf "%d",NR}' "${tmp_allow}" 2>>"${ban_errorlog}")" + elif [ "${feed%%.*}" = "blocklist" ]; then + cnt_dl="$("${ban_awkcmd}" 'END{printf "%d",NR}' "${ban_blocklist}" 2>>"${ban_errorlog}")" else - cnt_dl="$("${ban_awkcmd}" 'END{printf "%d",NR}' "${tmp_split}" 2>/dev/null)" + cnt_dl="$("${ban_awkcmd}" 'END{printf "%d",NR}' "${tmp_split}" 2>>"${ban_errorlog}")" : >"${tmp_split}" fi - if [ "${cnt_dl:-"0"}" -gt "0" ] || [ "${feed_url}" = "local" ] || [ "${feed%v*}" = "allowlist" ] || [ "${feed%v*}" = "blocklist" ]; then - feed_log="$("${ban_nftcmd}" -f "${tmp_nft}" 2>&1)" + if [ "${cnt_dl:-"0"}" -gt "0" ] || [ "${feed%%.*}" = "allowlist" ] || [ "${feed%%.*}" = "blocklist" ]; then + + # load initial file to nftset + # + f_nftload "${tmp_nft}" "can't load initial file to nfset '${feed}'" feed_rc="${?}" # load additional split files @@ -1111,19 +1465,17 @@ f_down() { if [ "${feed_rc}" = "0" ]; then for split_file in "${tmp_file}".*; do if [ -s "${split_file}" ]; then - "${ban_sedcmd}" -i "1 i #!${ban_nftcmd} -f\nadd element inet banIP "${feed}" { " "${split_file}" + "${ban_sedcmd}" -i "1 i #!${ban_nftcmd} -f\nadd element inet banIP ${feed} { " "${split_file}" printf "%s\n" "}" >>"${split_file}" - if ! "${ban_nftcmd}" -f "${split_file}" >/dev/null 2>&1; then - f_log "info" "can't add split file '${split_file##*.}' to Set '${feed}'" - fi + + # load split file to nftset + # + f_nftload "${split_file}" "can't load split file '${split_file##*.}' to nfset '${feed}'" + feed_rc="${?}" : >"${split_file}" fi done - if [ "${ban_debug}" = "1" ] && [ "${ban_reportelements}" = "1" ]; then - cnt_set="$("${ban_nftcmd}" -j list set inet banIP "${feed}" 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | wc -l 2>/dev/null)" - fi - else - f_log "info" "can't initialize Set for feed '${feed}' (rc: ${feed_rc}, log: ${feed_log})" + cnt_set="$("${ban_nftcmd}" -j list set inet banIP "${feed}" 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | "${ban_wccmd}" -l 2>/dev/null)" fi else f_log "info" "skip empty feed '${feed}'" @@ -1132,7 +1484,7 @@ f_down() { : >"${tmp_nft}" end_ts="$(date +%s)" - f_log "debug" "f_down ::: feed: ${feed}, cnt_dl: ${cnt_dl:-"-"}, cnt_set: ${cnt_set:-"-"}, split_size: ${ban_splitsize:-"-"}, time: $((end_ts - start_ts)), rc: ${feed_rc:-"-"}, log: ${feed_log:-"-"}" + f_log "debug" "f_down ::: feed: ${feed}, policy: ${feed_policy}, complete: ${feed_complete:-"-"}, cnt_dl: ${cnt_dl:-"-"}, cnt_set: ${cnt_set:-"-"}, split_size: ${ban_splitsize:-"-"}, time: $((end_ts - start_ts)), rc: ${feed_rc:-"-"}" } # backup feeds @@ -1145,7 +1497,7 @@ f_backup() { backup_rc="${?}" fi - f_log "debug" "f_backup ::: feed: ${feed}, file: banIP.${feed}.gz, rc: ${backup_rc}" + f_log "debug" "f_backup ::: feed: ${feed}, file: banIP.${feed}.gz, rc: ${backup_rc}" return "${backup_rc}" } @@ -1154,77 +1506,106 @@ f_backup() { f_restore() { local tmp_feed restore_rc="4" feed="${1}" feed_url="${2}" feed_file="${3}" in_rc="${4}" - [ "${feed_url}" = "local" ] && tmp_feed="${feed%v*}v4" || tmp_feed="${feed}" + [ "${feed_url}" = "local" ] && tmp_feed="${feed%.*}.v4" || tmp_feed="${feed}" if [ -s "${ban_backupdir}/banIP.${tmp_feed}.gz" ]; then - "${ban_zcatcmd}" "${ban_backupdir}/banIP.${tmp_feed}.gz" 2>/dev/null >"${feed_file}" + "${ban_zcatcmd}" "${ban_backupdir}/banIP.${tmp_feed}.gz" 2>>"${ban_errorlog}" >"${feed_file}" restore_rc="${?}" fi - f_log "debug" "f_restore ::: feed: ${feed}, file: banIP.${tmp_feed}.gz, in_rc: ${in_rc:-"-"}, rc: ${restore_rc}" + f_log "debug" "f_restore ::: feed: ${feed}, file: banIP.${tmp_feed}.gz, in_rc: ${in_rc:-"-"}, rc: ${restore_rc}" return "${restore_rc}" } -# remove disabled Sets +# remove staled Sets # f_rmset() { - local expr feedlist tmp_del ruleset_raw item table_sets handle del_set feed_log feed_rc + local feedlist tmp_del table_json feed country asn table_sets handles handle expr del_set feed_rc f_getfeed json_get_keys feedlist tmp_del="${ban_tmpfile}.final.delete" - ruleset_raw="$("${ban_nftcmd}" -tj list ruleset 2>/dev/null)" - table_sets="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.set.table="banIP"&&@.set.family="inet"].set.name')" + table_json="$("${ban_nftcmd}" -tj list table inet banIP 2>>"${ban_errorlog}")" + table_sets="$(printf "%s\n" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.set.family="inet"].set.name')" { printf "%s\n\n" "#!${ban_nftcmd} -f" - for item in ${table_sets}; do - if ! printf "%s" "allowlist blocklist ${ban_feed}" | "${ban_grepcmd}" -q "${item%v*}" || - ! printf "%s" "allowlist blocklist ${feedlist}" | "${ban_grepcmd}" -q "${item%v*}"; then - [ -z "${del_set}" ] && del_set="${item}" || del_set="${del_set}, ${item}" - rm -f "${ban_backupdir}/banIP.${item}.gz" - printf "%s\n" "flush set inet banIP ${item}" - for expr in 0 1; do - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-input\"][@.expr[${expr}].match.right=\"@${item}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP wan-input handle ${handle}" - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP wan-forward handle ${handle}" - handle="$(printf "%s\n" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"lan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].handle")" - [ -n "${handle}" ] && printf "%s\n" "delete rule inet banIP lan-forward handle ${handle}" + for feed in ${table_sets}; do + if ! printf "%s" "allowlist blocklist ${ban_feed}" | "${ban_grepcmd}" -q "${feed%.*}" || + ! printf "%s" "allowlist blocklist ${feedlist}" | "${ban_grepcmd}" -q "${feed%.*}" || + { [ "${feed%.*}" = "country" ] && [ "${ban_countrysplit}" = "1" ]; } || + { [ "${feed%.*}" = "asn" ] && [ "${ban_asnsplit}" = "1" ]; } || + { [ "${feed%.*}" != "allowlist" ] && [ "${feed%.*}" != "blocklist" ] && [ "${ban_allowlistonly}" = "1" ] && + ! printf "%s" "${ban_feedin}" | "${ban_grepcmd}" -q "allowlist" && + ! printf "%s" "${ban_feedout}" | "${ban_grepcmd}" -q "allowlist"; }; then + case "${feed%%.*}" in + "country") + country="${feed%.*}" + country="${country#*.}" + if [ "${ban_countrysplit}" = "1" ] && printf "%s" "${ban_feed}" | "${ban_grepcmd}" -q "${feed%%.*}" && + printf "%s" "${ban_country}" | "${ban_grepcmd}" -q "${country}"; then + continue + fi + ;; + "asn") + asn="${feed%.*}" + asn="${asn#*.}" + if [ "${ban_asnsplit}" = "1" ] && printf "%s" "${ban_feed}" | "${ban_grepcmd}" -q "${feed%%.*}" && + printf "%s" "${ban_asn}" | "${ban_grepcmd}" -q "${asn}"; then + continue + fi + ;; + esac + [ -z "${del_set}" ] && del_set="${feed}" || del_set="${del_set}, ${feed}" + rm -f "${ban_backupdir}/banIP.${feed}.gz" + for chain in _inbound _outbound; do + for expr in 0 1 2; do + handles="$(printf "%s\n" "${table_json}" | "${ban_jsoncmd}" -q -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[${expr}].match.right=\"@${feed}\"].handle" | "${ban_xargscmd}")" + for handle in ${handles}; do + printf "%s\n" "delete rule inet banIP ${chain} handle ${handle}" + done + done done - printf "%s\n\n" "delete set inet banIP ${item}" + printf "%s\n" "flush set inet banIP ${feed}" + printf "%s\n\n" "delete set inet banIP ${feed}" fi done } >"${tmp_del}" if [ -n "${del_set}" ]; then - feed_log="$("${ban_nftcmd}" -f "${tmp_del}" 2>&1)" - feed_rc="${?}" + if "${ban_nftcmd}" -f "${tmp_del}" >/dev/null 2>&1; then + feed_rc="${?}" + else + feed_rc="4" + fi fi : >"${tmp_del}" - f_log "debug" "f_rmset ::: Set: ${del_set:-"-"}, rc: ${feed_rc:-"-"}, log: ${feed_log:-"-"}" + f_log "debug" "f_rmset ::: feed: ${del_set:-"-"}, rc: ${feed_rc:-"-"}" } # generate status information # f_genstatus() { - local mem_free mem_max object end_time duration table_sets cnt_elements="0" custom_feed="0" split="0" status="${1}" + local mem_free nft_ver chain_cnt set_cnt rule_cnt object end_time duration table table_sets element_cnt="0" custom_feed="0" split="0" status="${1}" - mem_free="$("${ban_awkcmd}" '/^MemAvailable/{printf "%s",int($2/1024)}' "/proc/meminfo" 2>/dev/null)" - mem_max="$("${ban_awkcmd}" '/^VmHWM/{printf "%s",int($2)}' /proc/${$}/status 2>/dev/null)" + mem_free="$("${ban_awkcmd}" '/^MemAvailable/{printf "%.2f", $2/1024}' "/proc/meminfo" 2>>"${ban_errorlog}")" + nft_ver="$(printf "%s" "${ban_packages}" | "${ban_jsoncmd}" -ql1 -e '@.packages["nftables-json"]')" [ -z "${ban_dev}" ] && f_conf if [ "${status}" = "active" ]; then + table="$("${ban_nftcmd}" -tj list table inet banIP 2>>"${ban_errorlog}")" + table_sets="$(printf "%s" "${table}" | "${ban_jsoncmd}" -qe '@.nftables[@.set.family="inet"].set.name')" + for object in ${table_sets}; do + element_cnt="$((element_cnt + $("${ban_nftcmd}" -j list set inet banIP "${object}" 2>>"${ban_errorlog}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | "${ban_wccmd}" -l 2>>"${ban_errorlog}")))" + done + chain_cnt="$(printf "%s" "${table}" | "${ban_jsoncmd}" -qe '@.nftables[*].chain.name' | "${ban_wccmd}" -l 2>>"${ban_errorlog}")" + set_cnt="$(printf "%s" "${table}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.name' | "${ban_wccmd}" -l 2>>"${ban_errorlog}")" + rule_cnt="$(printf "%s" "${table}" | "${ban_jsoncmd}" -qe '@.nftables[*].rule' | "${ban_wccmd}" -l 2>>"${ban_errorlog}")" + element_cnt="$("${ban_awkcmd}" -v cnt="${element_cnt}" 'BEGIN{res="";pos=0;for(i=length(cnt);i>0;i--){res=substr(cnt,i,1)res;pos++;if(pos==3&&i>1){res=" "res;pos=0;}}; printf"%s",res}')" if [ -n "${ban_starttime}" ] && [ "${ban_action}" != "boot" ]; then end_time="$(date "+%s")" duration="$(((end_time - ban_starttime) / 60))m $(((end_time - ban_starttime) % 60))s" fi - table_sets="$("${ban_nftcmd}" -tj list ruleset 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[@.set.table="banIP"&&@.set.family="inet"].set.name')" - if [ "${ban_reportelements}" = "1" ]; then - for object in ${table_sets}; do - cnt_elements="$((cnt_elements + $("${ban_nftcmd}" -j list set inet banIP "${object}" 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | wc -l 2>/dev/null)))" - done - fi - runtime="mode: ${ban_action:-"-"}, period: ${duration:-"-"}, memory: ${mem_free} MB available, ${mem_max} KB max. used, cores: ${ban_cores}, log: ${ban_logreadcmd##*/}, fetch: ${ban_fetchcmd##*/}" + runtime="mode: ${ban_action:-"-"}, $(date "+%Y-%m-%d %H:%M:%S"), duration: ${duration:-"-"}, memory: ${mem_free} MB available" fi [ -s "${ban_customfeedfile}" ] && custom_feed="1" [ "${ban_splitsize:-"0"}" -gt "0" ] && split="1" @@ -1233,8 +1614,9 @@ f_genstatus() { json_init json_load_file "${ban_rtfile}" >/dev/null 2>&1 json_add_string "status" "${status}" - json_add_string "version" "${ban_ver}" - json_add_string "element_count" "${cnt_elements}" + json_add_string "frontend_ver" "${ban_fver}" + json_add_string "backend_ver" "${ban_bver}" + json_add_string "element_count" "${element_cnt} (chains: ${chain_cnt:-"0"}, sets: ${set_cnt:-"0"}, rules: ${rule_cnt:-"0"})" json_add_array "active_feeds" for object in ${table_sets:-"-"}; do json_add_string "${object}" "${object}" @@ -1265,11 +1647,11 @@ f_genstatus() { json_add_string "${object}" "${object}" done json_close_array - json_add_string "nft_info" "priority: ${ban_nftpriority}, policy: ${ban_nftpolicy}, loglevel: ${ban_nftloglevel}, expiry: ${ban_nftexpiry:-"-"}, limit (icmp/syn/udp): ${ban_icmplimit}/${ban_synlimit}/${ban_udplimit}" - json_add_string "run_info" "base: ${ban_basedir}, backup: ${ban_backupdir}, report: ${ban_reportdir}" - json_add_string "run_flags" "auto: $(f_char ${ban_autodetect}), proto (4/6): $(f_char ${ban_protov4})/$(f_char ${ban_protov6}), log (pre/inp/fwd/lan): $(f_char ${ban_logprerouting})/$(f_char ${ban_loginput})/$(f_char ${ban_logforwardwan})/$(f_char ${ban_logforwardlan}), dedup: $(f_char ${ban_deduplicate}), split: $(f_char ${split}), custom feed: $(f_char ${custom_feed}), allowed only: $(f_char ${ban_allowlistonly})" + json_add_string "nft_info" "ver: ${nft_ver:-"-"}, priority: ${ban_nftpriority}, policy: ${ban_nftpolicy}, loglevel: ${ban_nftloglevel}, expiry: ${ban_nftexpiry:-"-"}, limit (icmp/syn/udp): ${ban_icmplimit}/${ban_synlimit}/${ban_udplimit}" + json_add_string "run_info" "base: ${ban_basedir}, backup: ${ban_backupdir}, report: ${ban_reportdir}, error: ${ban_errordir}" + json_add_string "run_flags" "auto: $(f_char ${ban_autodetect}), proto (4/6): $(f_char ${ban_protov4})/$(f_char ${ban_protov6}), bcp38: $(f_char ${ban_bcp38}), log (pre/in/out): $(f_char ${ban_logprerouting})/$(f_char ${ban_loginbound})/$(f_char ${ban_logoutbound}), count: $(f_char ${ban_nftcount}), dedup: $(f_char ${ban_deduplicate}), split: $(f_char ${split}), custom feed: $(f_char ${custom_feed}), allowed only: $(f_char ${ban_allowlistonly}), debug: $(f_char ${ban_debug})" json_add_string "last_run" "${runtime:-"-"}" - json_add_string "system_info" "$(date "+%Y-%m-%d %H:%M:%S"), ${ban_sysver}" + json_add_string "system_info" "cores: ${ban_cores}, log: ${ban_logreadcmd##*/}, fetch: ${ban_fetchcmd##*/}, ${ban_sysver}" json_dump >"${ban_rtfile}" } @@ -1299,7 +1681,7 @@ f_getstatus() { else json_get_var value "${key}" >/dev/null 2>&1 if [ "${key}" = "status" ]; then - [ "${value}" = "active" ] && value="${value} ($(f_actual))" || value="${value}" + [ "${value}" = "active" ] && value="${value} ($(f_actual))" fi fi if [ "${key}" != "wan_interfaces" ] && [ "${key}" != "vlan_allow" ] && [ "${key}" != "vlan_block" ]; then @@ -1316,16 +1698,15 @@ f_getstatus() { f_lookup() { local cnt list domain lookup ip elementsv4 elementsv6 start_time end_time duration cnt_domain="0" cnt_ip="0" feed="${1}" - [ -z "${ban_dev}" ] && f_conf start_time="$(date "+%s")" if [ "${feed}" = "allowlist" ]; then - list="$("${ban_awkcmd}" '/^([[:alnum:]_-]{1,63}\.)+[[:alpha:]]+([[:space:]]|$)/{printf "%s ",tolower($1)}' "${ban_allowlist}" 2>/dev/null)" + list="$("${ban_awkcmd}" '/^([[:alnum:]_-]{1,63}\.)+[[:alpha:]]+([[:space:]]|$)/{printf "%s ",tolower($1)}' "${ban_allowlist}" 2>>"${ban_errorlog}")" elif [ "${feed}" = "blocklist" ]; then - list="$("${ban_awkcmd}" '/^([[:alnum:]_-]{1,63}\.)+[[:alpha:]]+([[:space:]]|$)/{printf "%s ",tolower($1)}' "${ban_blocklist}" 2>/dev/null)" + list="$("${ban_awkcmd}" '/^([[:alnum:]_-]{1,63}\.)+[[:alpha:]]+([[:space:]]|$)/{printf "%s ",tolower($1)}' "${ban_blocklist}" 2>>"${ban_errorlog}")" fi for domain in ${list}; do - lookup="$("${ban_lookupcmd}" "${domain}" ${ban_resolver} 2>/dev/null | "${ban_awkcmd}" '/^Address[ 0-9]*: /{if(!seen[$NF]++)printf "%s ",$NF}' 2>/dev/null)" + lookup="$("${ban_lookupcmd}" "${domain}" ${ban_resolver} 2>>"${ban_errorlog}" | "${ban_awkcmd}" '/^Address[ 0-9]*: /{if(!seen[$NF]++)printf "%s ",$NF}' 2>>"${ban_errorlog}")" for ip in ${lookup}; do if [ "${ip%%.*}" = "127" ] || [ "${ip%%.*}" = "0" ] || [ -z "${ip%%::*}" ]; then continue @@ -1342,207 +1723,365 @@ f_lookup() { cnt_domain="$((cnt_domain + 1))" done if [ -n "${elementsv4}" ]; then - if ! "${ban_nftcmd}" add element inet banIP "${feed}v4" { ${elementsv4} } >/dev/null 2>&1; then - f_log "info" "can't add lookup file to Set '${feed}v4'" + if ! "${ban_nftcmd}" add element inet banIP "${feed}.v4" { ${elementsv4} } 2>>"${ban_errorlog}"; then + f_log "info" "can't add lookup file to nfset '${feed}.v4'" fi fi if [ -n "${elementsv6}" ]; then - if ! "${ban_nftcmd}" add element inet banIP "${feed}v6" { ${elementsv6} } >/dev/null 2>&1; then - f_log "info" "can't add lookup file to Set '${feed}v6'" + if ! "${ban_nftcmd}" add element inet banIP "${feed}.v6" { ${elementsv6} } 2>>"${ban_errorlog}"; then + f_log "info" "can't add lookup file to nfset '${feed}.v6'" fi fi end_time="$(date "+%s")" duration="$(((end_time - start_time) / 60))m $(((end_time - start_time) % 60))s" - f_log "info" "domain lookup finished in ${duration} (${feed}, ${cnt_domain} domains, ${cnt_ip} IPs)" + f_log "debug" "f_lookup ::: feed: ${feed}, domains: ${cnt_domain}, IPs: ${cnt_ip}, duration: ${duration}" } # table statistics # f_report() { - local report_jsn report_txt tmp_val ruleset_raw item table_sets set_cnt set_input set_forwardwan set_forwardlan set_cntinput set_cntforwardwan set_cntforwardlan set_proto set_dport set_details - local expr detail jsnval timestamp autoadd_allow autoadd_block sum_sets sum_setinput sum_setforwardwan sum_setforwardlan sum_setelements sum_cntinput sum_cntforwardwan sum_cntforwardlan - local sum_synflood sum_udpflood sum_icmpflood sum_ctinvalid sum_tcpinvalid output="${1}" + local report_jsn report_txt tmp_val table_json item table_sets set_cnt set_inbound set_outbound set_cntinbound set_cntoutbound set_proto set_dport set_details + local expr detail jsnval timestamp autoadd_allow autoadd_block sum_sets sum_setinbound sum_setoutbound sum_cntelements sum_cntinbound sum_cntoutbound quantity + local chunk map_jsn chain set_elements set_json sum_setelements sum_synflood sum_udpflood sum_icmpflood sum_ctinvalid sum_tcpinvalid sum_setports sum_bcp38 output="${1}" - [ -z "${ban_dev}" ] && f_conf + f_conf f_mkdir "${ban_reportdir}" report_jsn="${ban_reportdir}/ban_report.jsn" report_txt="${ban_reportdir}/ban_report.txt" + map_jsn="${ban_reportdir}/ban_map.jsn" - # json output preparation - # - ruleset_raw="$("${ban_nftcmd}" -tj list ruleset 2>/dev/null)" - table_sets="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.set.table="banIP"&&@.set.family="inet"].set.name')" - sum_sets="0" - sum_setinput="0" - sum_setforwardwan="0" - sum_setforwardlan="0" - sum_setelements="0" - sum_cntinput="0" - sum_cntforwardwan="0" - sum_cntforwardlan="0" - sum_synflood="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt-synflood"].*.packets')" - sum_udpflood="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt-udpflood"].*.packets')" - sum_icmpflood="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt-icmpflood"].*.packets')" - sum_ctinvalid="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt-ctinvalid"].*.packets')" - sum_tcpinvalid="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt-tcpinvalid"].*.packets')" - timestamp="$(date "+%Y-%m-%d %H:%M:%S")" - : >"${report_jsn}" - { - printf "%s\n" "{" - printf "\t%s\n" '"sets":{' + if [ "${output}" != "json" ]; then + # json output preparation + # + : >"${report_jsn}" + : >"${map_jsn}" + table_json="$("${ban_nftcmd}" -tj list table inet banIP 2>>"${ban_errorlog}")" + table_sets="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.set.family="inet"].set.name')" + sum_sets="0" + sum_cntelements="0" + sum_setinbound="0" + sum_setoutbound="0" + sum_cntinbound="0" + sum_cntoutbound="0" + sum_setports="0" + sum_setelements="0" + sum_synflood="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_synflood"].*.packets')" + sum_udpflood="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_udpflood"].*.packets')" + sum_icmpflood="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_icmpflood"].*.packets')" + sum_ctinvalid="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_ctinvalid"].*.packets')" + sum_tcpinvalid="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_tcpinvalid"].*.packets')" + sum_bcp38="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -qe '@.nftables[@.counter.name="cnt_bcp38"].*.packets')" + timestamp="$(date "+%Y-%m-%d %H:%M:%S")" + + cnt="1" for item in ${table_sets}; do - set_cntinput="" - set_cntforwardwan="" - set_cntforwardlan="" - set_proto="" - set_dport="" - for expr in 0 1; do - [ -z "${set_cntinput}" ] && set_cntinput="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-input\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].counter.packets")" - [ "${expr}" = "1" ] && [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-input\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.right.set")" - [ "${expr}" = "1" ] && [ -z "${set_proto}" ] && set_proto="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-input\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.left.payload.protocol")" - [ -z "${set_cntforwardwan}" ] && set_cntforwardwan="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].counter.packets")" - [ "${expr}" = "1" ] && [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.right.set")" - [ "${expr}" = "1" ] && [ -z "${set_proto}" ] && set_proto="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"wan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.left.payload.protocol")" - [ -z "${set_cntforwardlan}" ] && set_cntforwardlan="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"lan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].counter.packets")" - [ "${expr}" = "1" ] && [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"lan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.right.set")" - [ "${expr}" = "1" ] && [ -z "${set_proto}" ] && set_proto="$(printf "%s" "${ruleset_raw}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.table=\"banIP\"&&@.rule.chain=\"lan-forward\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].match.left.payload.protocol")" - done - if [ "${ban_reportelements}" = "1" ]; then - set_cnt="$("${ban_nftcmd}" -j list set inet banIP "${item}" 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | wc -l 2>/dev/null)" - sum_setelements="$((sum_setelements + set_cnt))" - else - set_cnt="" - sum_setelements="n/a" - fi - if [ -n "${set_dport}" ]; then - set_dport="${set_dport//[\{\}\":]/}" - set_dport="${set_dport#\[ *}" - set_dport="${set_dport%* \]}" - set_dport="${set_proto}: $(f_trim "${set_dport}")" - fi - if [ -n "${set_cntinput}" ]; then - set_input="ON" - sum_setinput="$((sum_setinput + 1))" - sum_cntinput="$((sum_cntinput + set_cntinput))" - else - set_input="-" - set_cntinput="" - fi - if [ -n "${set_cntforwardwan}" ]; then - set_forwardwan="ON" - sum_setforwardwan="$((sum_setforwardwan + 1))" - sum_cntforwardwan="$((sum_cntforwardwan + set_cntforwardwan))" - else - set_forwardwan="-" - set_cntforwardwan="" - fi - if [ -n "${set_cntforwardlan}" ]; then - set_forwardlan="ON" - sum_setforwardlan="$((sum_setforwardlan + 1))" - sum_cntforwardlan="$((sum_cntforwardlan + set_cntforwardlan))" - else - set_forwardlan="-" - set_cntforwardlan="" - fi - [ "${sum_sets}" -gt "0" ] && printf "%s\n" "," - printf "\t\t%s\n" "\"${item}\":{" - printf "\t\t\t%s\n" "\"cnt_elements\": \"${set_cnt}\"," - printf "\t\t\t%s\n" "\"cnt_input\": \"${set_cntinput}\"," - printf "\t\t\t%s\n" "\"input\": \"${set_input}\"," - printf "\t\t\t%s\n" "\"cnt_forwardwan\": \"${set_cntforwardwan}\"," - printf "\t\t\t%s\n" "\"wan_forward\": \"${set_forwardwan}\"," - printf "\t\t\t%s\n" "\"cnt_forwardlan\": \"${set_cntforwardlan}\"," - printf "\t\t\t%s\n" "\"lan_forward\": \"${set_forwardlan}\"", - printf "\t\t\t%s\n" "\"port\": \"${set_dport:-"-"}\"" - printf "\t\t%s" "}" - sum_sets="$((sum_sets + 1))" + ( + set_json="$("${ban_nftcmd}" -j list set inet banIP "${item}" 2>>"${ban_errorlog}")" + set_cnt="$(printf "%s" "${set_json}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]' | "${ban_wccmd}" -l 2>>"${ban_errorlog}")" + set_cntinbound="" + set_cntoutbound="" + set_inbound="" + set_outbound="" + set_proto="" + set_dport="" + set_elements="" + for chain in _inbound _outbound; do + for expr in 0 1 2; do + if [ "${chain}" = "_inbound" ] && [ -z "${set_cntinbound}" ]; then + set_cntinbound="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].counter.packets")" + elif [ "${chain}" = "_outbound" ] && [ -z "${set_cntoutbound}" ]; then + set_cntoutbound="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[${expr}].match.right=\"@${item}\"].expr[*].counter.packets")" + fi + [ -z "${set_proto}" ] && set_proto="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[2].match.right=\"@${item}\"].expr[0].match.right.set")" + [ -z "${set_proto}" ] && set_proto="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[1].match.right=\"@${item}\"].expr[0].match.left.payload.protocol")" + [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[2].match.right=\"@${item}\"].expr[1].match.right.set")" + [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[2].match.right=\"@${item}\"].expr[1].match.right")" + [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[1].match.right=\"@${item}\"].expr[0].match.right.set")" + [ -z "${set_dport}" ] && set_dport="$(printf "%s" "${table_json}" | "${ban_jsoncmd}" -ql1 -e "@.nftables[@.rule.chain=\"${chain}\"][@.expr[1].match.right=\"@${item}\"].expr[0].match.right")" + done + done + if [ -n "${set_proto}" ] && [ -n "${set_dport}" ]; then + set_proto="${set_proto//[\{\}\":]/}" + set_proto="${set_proto#\[ *}" + set_proto="${set_proto%* \]}" + set_dport="${set_dport//[\{\}\":]/}" + set_dport="${set_dport#\[ *}" + set_dport="${set_dport%* \]}" + set_dport="${set_proto}: $(f_trim "${set_dport}")" + fi + if [ "${ban_nftcount}" = "1" ]; then + set_elements="$(printf "%s" "${set_json}" | "${ban_jsoncmd}" -l50 -qe '@.nftables[*].set.elem[*][@.counter.packets>0].val' | + "${ban_awkcmd}" -F '[ ,]' '{ORS=" ";if($2=="\"range\":"||$2=="\"concat\":")printf"%s, ",$4;else if($2=="\"prefix\":")printf"%s, ",$5;else printf"\"%s\", ",$1}')" + fi + if [ -n "${set_cntinbound}" ]; then + set_inbound="ON" + else + set_inbound="-" + set_cntinbound="" + fi + if [ -n "${set_cntoutbound}" ]; then + set_outbound="ON" + else + set_outbound="-" + set_cntoutbound="" + fi + if [ "${cnt}" = "1" ]; then + printf "%s\n" "{ \ + \"sets\":{ \"${item}\":{ \"cnt_elements\": \"${set_cnt}\", \ + \"cnt_inbound\": \"${set_cntinbound}\", \ + \"inbound\": \"${set_inbound}\", \ + \"cnt_outbound\": \"${set_cntoutbound}\", \ + \"outbound\": \"${set_outbound}\", \ + \"port\": \"${set_dport:-"-"}\", \ + \"set_elements\": [ ${set_elements%%??} ] \ + }" >>"${report_jsn}" + else + printf "%s\n" ", \ + \"${item}\":{ \"cnt_elements\": \"${set_cnt}\", \ + \"cnt_inbound\": \"${set_cntinbound}\", \ + \"inbound\": \"${set_inbound}\", \ + \"cnt_outbound\": \"${set_cntoutbound}\", \ + \"outbound\": \"${set_outbound}\", \ + \"port\": \"${set_dport:-"-"}\", \ + \"set_elements\": [ ${set_elements%%??} ] \ + }" >>"${report_jsn}" + fi + ) & + [ "${cnt}" -eq "1" ] || [ "${cnt}" -gt "${ban_cores}" ] && wait -n + cnt="$((cnt + 1))" done - printf "\n\t%s\n" "}," - printf "\t%s\n" "\"timestamp\": \"${timestamp}\"," - printf "\t%s\n" "\"autoadd_allow\": \"$("${ban_grepcmd}" -c "added on ${timestamp% *}" "${ban_allowlist}")\"," - printf "\t%s\n" "\"autoadd_block\": \"$("${ban_grepcmd}" -c "added on ${timestamp% *}" "${ban_blocklist}")\"," - printf "\t%s\n" "\"sum_synflood\": \"${sum_synflood}\"," - printf "\t%s\n" "\"sum_udpflood\": \"${sum_udpflood}\"," - printf "\t%s\n" "\"sum_icmpflood\": \"${sum_icmpflood}\"," - printf "\t%s\n" "\"sum_ctinvalid\": \"${sum_ctinvalid}\"," - printf "\t%s\n" "\"sum_tcpinvalid\": \"${sum_tcpinvalid}\"," - printf "\t%s\n" "\"sum_sets\": \"${sum_sets}\"," - printf "\t%s\n" "\"sum_setinput\": \"${sum_setinput}\"," - printf "\t%s\n" "\"sum_setforwardwan\": \"${sum_setforwardwan}\"," - printf "\t%s\n" "\"sum_setforwardlan\": \"${sum_setforwardlan}\"," - printf "\t%s\n" "\"sum_setelements\": \"${sum_setelements}\"," - printf "\t%s\n" "\"sum_cntinput\": \"${sum_cntinput}\"," - printf "\t%s\n" "\"sum_cntforwardwan\": \"${sum_cntforwardwan}\"," - printf "\t%s\n" "\"sum_cntforwardlan\": \"${sum_cntforwardlan}\"" - printf "%s\n" "}" - } >>"${report_jsn}" - - # text output preparation - # - if [ "${output}" != "json" ] && [ -s "${report_jsn}" ]; then - : >"${report_txt}" + wait + printf "\n%s\n" "} }" >>"${report_jsn}" + + # add sum statistics + # json_init if json_load_file "${report_jsn}" >/dev/null 2>&1; then - json_get_var timestamp "timestamp" >/dev/null 2>&1 - json_get_var autoadd_allow "autoadd_allow" >/dev/null 2>&1 - json_get_var autoadd_block "autoadd_block" >/dev/null 2>&1 - json_get_var sum_synflood "sum_synflood" >/dev/null 2>&1 - json_get_var sum_udpflood "sum_udpflood" >/dev/null 2>&1 - json_get_var sum_icmpflood "sum_icmpflood" >/dev/null 2>&1 - json_get_var sum_ctinvalid "sum_ctinvalid" >/dev/null 2>&1 - json_get_var sum_tcpinvalid "sum_tcpinvalid" >/dev/null 2>&1 - json_get_var sum_sets "sum_sets" >/dev/null 2>&1 - json_get_var sum_setinput "sum_setinput" >/dev/null 2>&1 - json_get_var sum_setforwardwan "sum_setforwardwan" >/dev/null 2>&1 - json_get_var sum_setforwardlan "sum_setforwardlan" >/dev/null 2>&1 - json_get_var sum_setelements "sum_setelements" >/dev/null 2>&1 - json_get_var sum_cntinput "sum_cntinput" >/dev/null 2>&1 - json_get_var sum_cntforwardwan "sum_cntforwardwan" >/dev/null 2>&1 - json_get_var sum_cntforwardlan "sum_cntforwardlan" >/dev/null 2>&1 - { - printf "%s\n%s\n%s\n" ":::" "::: banIP Set Statistics" ":::" - printf "%s\n" " Timestamp: ${timestamp}" - printf "%s\n" " ------------------------------" - printf "%s\n" " blocked syn-flood packets : ${sum_synflood}" - printf "%s\n" " blocked udp-flood packets : ${sum_udpflood}" - printf "%s\n" " blocked icmp-flood packets : ${sum_icmpflood}" - printf "%s\n" " blocked invalid ct packets : ${sum_ctinvalid}" - printf "%s\n" " blocked invalid tcp packets: ${sum_tcpinvalid}" - printf "%s\n" " ---" - printf "%s\n" " auto-added IPs to allowlist: ${autoadd_allow}" - printf "%s\n\n" " auto-added IPs to blocklist: ${autoadd_block}" - json_select "sets" >/dev/null 2>&1 - json_get_keys table_sets >/dev/null 2>&1 - if [ -n "${table_sets}" ]; then - printf "%-25s%-15s%-24s%-24s%-24s%s\n" " Set" "| Elements" "| WAN-Input (packets)" "| WAN-Forward (packets)" "| LAN-Forward (packets)" "| Port/Protocol Limit" - printf "%s\n" " ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------" - for item in ${table_sets}; do - printf " %-21s" "${item}" - json_select "${item}" - json_get_keys set_details - for detail in ${set_details}; do - json_get_var jsnval "${detail}" >/dev/null 2>&1 - case "${detail}" in - "cnt_elements") - printf "%-15s" "| ${jsnval}" - ;; - "cnt_input" | "cnt_forwardwan" | "cnt_forwardlan") - [ -n "${jsnval}" ] && tmp_val=": ${jsnval}" - ;; - *) - printf "%-24s" "| ${jsnval}${tmp_val}" - tmp_val="" - ;; - esac - done - printf "\n" - json_select ".." + json_select "sets" >/dev/null 2>&1 + json_get_keys table_sets >/dev/null 2>&1 + if [ -n "${table_sets}" ]; then + for item in ${table_sets}; do + sum_sets="$((sum_sets + 1))" + json_select "${item}" + json_get_keys set_details + for detail in ${set_details}; do + case "${detail}" in + "cnt_elements") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + sum_cntelements="$((sum_cntelements + jsnval))" + ;; + "set_elements") + json_get_values jsnval "${detail}" >/dev/null 2>&1 + if [ -n "${jsnval}" ]; then + jsnval="$(printf "%s" "${jsnval}" | "${ban_wccmd}" -w)" + sum_setelements="$((sum_setelements + jsnval))" + fi + ;; + "inbound") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + if [ "${jsnval}" = "ON" ]; then + sum_setinbound="$((sum_setinbound + 1))" + fi + ;; + "outbound") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + if [ "${jsnval}" = "ON" ]; then + sum_setoutbound="$((sum_setoutbound + 1))" + fi + ;; + "cnt_inbound") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + if [ -n "${jsnval}" ]; then + sum_cntinbound="$((sum_cntinbound + jsnval))" + fi + ;; + "cnt_outbound") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + if [ -n "${jsnval}" ]; then + sum_cntoutbound="$((sum_cntoutbound + jsnval))" + fi + ;; + "port") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + if [ "${jsnval}" != "-" ]; then + jsnval="${jsnval//[^0-9 ]/}" + jsnval="$(printf "%s" "${jsnval}" | "${ban_wccmd}" -w)" + sum_setports="$((sum_setports + jsnval))" + fi + ;; + esac done - printf "%s\n" " ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------" - printf "%-25s%-15s%-24s%-24s%s\n" " ${sum_sets}" "| ${sum_setelements}" "| ${sum_setinput} (${sum_cntinput})" "| ${sum_setforwardwan} (${sum_cntforwardwan})" "| ${sum_setforwardlan} (${sum_cntforwardlan})" + json_select ".." + done + "${ban_sedcmd}" -i ':a;$!N;1,1ba;P;$d;D' "${report_jsn}" + printf "%s\n" "}, \ + \"timestamp\": \"${timestamp}\", \ + \"autoadd_allow\": \"$("${ban_grepcmd}" -c "added on ${timestamp% *}" "${ban_allowlist}")\", \ + \"autoadd_block\": \"$("${ban_grepcmd}" -c "added on ${timestamp% *}" "${ban_blocklist}")\", \ + \"sum_synflood\": \"${sum_synflood}\", \ + \"sum_udpflood\": \"${sum_udpflood}\", \ + \"sum_icmpflood\": \"${sum_icmpflood}\", \ + \"sum_ctinvalid\": \"${sum_ctinvalid}\", \ + \"sum_tcpinvalid\": \"${sum_tcpinvalid}\", \ + \"sum_bcp38\": \"${sum_bcp38}\", \ + \"sum_sets\": \"${sum_sets}\", \ + \"sum_setinbound\": \"${sum_setinbound}\", \ + \"sum_setoutbound\": \"${sum_setoutbound}\", \ + \"sum_cntelements\": \"${sum_cntelements}\", \ + \"sum_cntinbound\": \"${sum_cntinbound}\", \ + \"sum_cntoutbound\": \"${sum_cntoutbound}\", \ + \"sum_setports\": \"${sum_setports}\", \ + \"sum_setelements\": \"${sum_setelements}\" \ + }" >>"${report_jsn}" + fi + fi + + # retrieve/prepare map data + # + if [ "${ban_nftcount}" = "1" ] && [ "${ban_map}" = "1" ] && [ -s "${report_jsn}" ]; then + cnt="1" + f_getdl + json_init + if json_load_file "${ban_rtfile}" >/dev/null 2>&1; then + json_get_values jsnval "active_uplink" >/dev/null 2>&1 + jsnval="${jsnval//\/[0-9][0-9]/}" + jsnval="${jsnval//\/[0-9]/}" + jsnval="\"${jsnval// /\", \"}\"" + if [ "${jsnval}" != '""' ]; then + { + printf "%s" ",[{}" + "${ban_fetchcmd}" ${ban_geoparm} "[ ${jsnval} ]" "${ban_geourl}" 2>>"${ban_errorlog}" | + "${ban_jsoncmd}" -qe '@[*&&@.status="success"]' | "${ban_awkcmd}" -v feed="homeIP" '{printf ",{\"%s\": %s}\n",feed,$0}' + } >>"${map_jsn}" + fi + fi + if [ -s "${map_jsn}" ]; then + json_init + if json_load_file "${report_jsn}" >/dev/null 2>&1; then + json_select "sets" >/dev/null 2>&1 + json_get_keys table_sets >/dev/null 2>&1 + if [ -n "${table_sets}" ]; then + for item in ${table_sets}; do + [ "${item%%_*}" = "allowlist" ] && continue + json_select "${item}" + json_get_keys set_details + for detail in ${set_details}; do + if [ "${detail}" = "set_elements" ]; then + json_get_values jsnval "${detail}" >/dev/null 2>&1 + jsnval="\"${jsnval// /\", \"}\"" + fi + done + if [ "${jsnval}" != '""' ]; then + quantity="0" + chunk="" + ( + for ip in ${jsnval}; do + chunk="${chunk} ${ip}" + quantity="$((quantity + 1))" + if [ "${quantity}" -eq "100" ]; then + "${ban_fetchcmd}" ${ban_geoparm} "[ ${chunk%%?} ]" "${ban_geourl}" 2>>"${ban_errorlog}" | + "${ban_jsoncmd}" -qe '@[*&&@.status="success"]' | "${ban_awkcmd}" -v feed="${item//_v/.v}" '{printf ",{\"%s\": %s}\n",feed,$0}' >>"${map_jsn}" + chunk="" + quantity="0" + fi + done + if [ "${quantity}" -gt "0" ]; then + "${ban_fetchcmd}" ${ban_geoparm} "[ ${chunk} ]" "${ban_geourl}" 2>>"${ban_errorlog}" | + "${ban_jsoncmd}" -qe '@[*&&@.status="success"]' | "${ban_awkcmd}" -v feed="${item//_v/.v}" '{printf ",{\"%s\": %s}\n",feed,$0}' >>"${map_jsn}" + fi + ) & + [ "${cnt}" -gt "${ban_cores}" ] && wait -n + cnt="$((cnt + 1))" + fi + json_select ".." + done + wait + fi fi - } >>"${report_txt}" + fi + fi + + # text output preparation + # + if [ "${output}" != "json" ] && [ -s "${report_jsn}" ]; then + json_init + if json_load_file "${report_jsn}" >/dev/null 2>&1; then + json_get_var timestamp "timestamp" >/dev/null 2>&1 + json_get_var autoadd_allow "autoadd_allow" >/dev/null 2>&1 + json_get_var autoadd_block "autoadd_block" >/dev/null 2>&1 + json_get_var sum_synflood "sum_synflood" >/dev/null 2>&1 + json_get_var sum_udpflood "sum_udpflood" >/dev/null 2>&1 + json_get_var sum_icmpflood "sum_icmpflood" >/dev/null 2>&1 + json_get_var sum_ctinvalid "sum_ctinvalid" >/dev/null 2>&1 + json_get_var sum_tcpinvalid "sum_tcpinvalid" >/dev/null 2>&1 + json_get_var sum_bcp38 "sum_bcp38" >/dev/null 2>&1 + json_get_var sum_sets "sum_sets" >/dev/null 2>&1 + json_get_var sum_setinbound "sum_setinbound" >/dev/null 2>&1 + json_get_var sum_setoutbound "sum_setoutbound" >/dev/null 2>&1 + json_get_var sum_cntelements "sum_cntelements" >/dev/null 2>&1 + json_get_var sum_cntinbound "sum_cntinbound" >/dev/null 2>&1 + json_get_var sum_cntoutbound "sum_cntoutbound" >/dev/null 2>&1 + json_get_var sum_setports "sum_setports" >/dev/null 2>&1 + json_get_var sum_setelements "sum_setelements" >/dev/null 2>&1 + { + printf "%s\n%s\n%s\n" ":::" "::: banIP Set Statistics" ":::" + printf "%s\n" " Timestamp: ${timestamp}" + printf "%s\n" " ------------------------------" + printf "%s\n" " blocked syn-flood packets : ${sum_synflood}" + printf "%s\n" " blocked udp-flood packets : ${sum_udpflood}" + printf "%s\n" " blocked icmp-flood packets : ${sum_icmpflood}" + printf "%s\n" " blocked invalid ct packets : ${sum_ctinvalid}" + printf "%s\n" " blocked invalid tcp packets: ${sum_tcpinvalid}" + printf "%s\n" " blocked bcp38 packets : ${sum_bcp38}" + printf "%s\n" " ---" + printf "%s\n" " auto-added IPs to allowlist: ${autoadd_allow}" + printf "%s\n\n" " auto-added IPs to blocklist: ${autoadd_block}" + json_select "sets" >/dev/null 2>&1 + json_get_keys table_sets >/dev/null 2>&1 + table_sets="$(printf "%s\n" ${table_sets} | "${ban_sortcmd}")" + if [ -n "${table_sets}" ]; then + printf "%-25s%-15s%-24s%-24s%-24s%-24s\n" " Set" "| Count " "| Inbound (packets)" "| Outbound (packets)" "| Port/Protocol " "| Elements (max. 50) " + printf "%s\n" " ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------" + for item in ${table_sets}; do + printf " %-21s" "${item//_v/.v}" + json_select "${item}" + json_get_keys set_details + for detail in ${set_details}; do + case "${detail}" in + "cnt_elements") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + printf "%-15s" "| ${jsnval}" + ;; + "cnt_inbound" | "cnt_outbound") + json_get_var jsnval "${detail}" >/dev/null 2>&1 + [ -n "${jsnval}" ] && tmp_val=": ${jsnval}" + ;; + "set_elements") + json_get_values jsnval "${detail}" >/dev/null 2>&1 + jsnval="${jsnval// /, }" + printf "%-24s" "| ${jsnval:0:24}" + jsnval="${jsnval:24}" + while [ -n "${jsnval}" ]; do + printf "\n%-25s%-15s%-24s%-24s%-24s%-24s" "" "|" "|" "|" "|" "| ${jsnval:0:24}" + jsnval="${jsnval:24}" + done + ;; + *) + json_get_var jsnval "${detail}" >/dev/null 2>&1 + printf "%-24s" "| ${jsnval}${tmp_val}" + tmp_val="" + ;; + esac + done + printf "\n" + json_select ".." + done + printf "%s\n" " ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------" + printf "%-25s%-15s%-24s%-24s%-24s%-24s\n" " ${sum_sets}" "| ${sum_cntelements}" "| ${sum_setinbound} (${sum_cntinbound})" "| ${sum_setoutbound} (${sum_cntoutbound})" "| ${sum_setports}" "| ${sum_setelements}" + fi + } >>"${report_txt}" + fi fi fi @@ -1551,75 +2090,162 @@ f_report() { case "${output}" in "text") [ -s "${report_txt}" ] && "${ban_catcmd}" "${report_txt}" + : >"${report_txt}" ;; "json") - [ -s "${report_jsn}" ] && "${ban_catcmd}" "${report_jsn}" + if [ "${ban_nftcount}" = "1" ] && [ "${ban_map}" = "1" ]; then + jsn="$("${ban_catcmd}" ${report_jsn} ${map_jsn} 2>>"${ban_errorlog}")" + [ -n "${jsn}" ] && printf "[%s]]\n" "${jsn}" + else + jsn="$("${ban_catcmd}" ${report_jsn} 2>>"${ban_errorlog}")" + [ -n "${jsn}" ] && printf "[%s]\n" "${jsn}" + fi ;; "mail") [ -n "${ban_mailreceiver}" ] && [ -x "${ban_mailcmd}" ] && f_mail + : >"${report_txt}" + ;; + "gen") + printf "%s\n" "$(date "+%s")" >"/var/run/banIP.report" + ;; + *) + : >"${report_txt}" ;; esac - : >"${report_txt}" } -# Set search -# f_search() { - local item table_sets ip proto hold cnt result_flag="/var/run/banIP.search" input="${1}" - - if [ -n "${input}" ]; then - ip="$(printf "%s" "${input}" | "${ban_awkcmd}" 'BEGIN{RS="(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?[[:space:]]*$)"}{printf "%s",RT}')" - [ -n "${ip}" ] && proto="v4" - if [ -z "${proto}" ]; then - ip="$(printf "%s" "${input}" | "${ban_awkcmd}" 'BEGIN{RS="(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)([[:space:]].*|$)"}{printf "%s",RT}')" - [ -n "${ip}" ] && proto="v6" - fi - fi + local item table_sets ip proto cnt tmp_result result res input="${1}" + + # prepare result file + # + tmp_result="/var/run/banIP.search.tmp" + result="/var/run/banIP.search" + + # validate input + # + case "${input}" in + ''|*[!0-9A-Fa-f:/.]*) + printf "%s\n%s\n%s\n" ":::" "::: no valid search input" ":::" + printf "%s\n%s\n%s\n" ":::" "::: no valid search input" ":::" >"${result}" + return + ;; + esac + + # determine protocol via awk + # + res="$(printf "%s" "${input}" | "${ban_awkcmd}" ' + { + if (match($0,/([1-9][0-9]{0,2}\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\/([12]?[0-9]|3[012]))?[[:space:]]*$/)) { + printf "v4 %s",substr($0,RSTART,RLENGTH) + } else if (match($0,/(([0-9A-Fa-f]{0,4}:){1,7}[0-9A-Fa-f]{0,4}:?(\/(1?[0-2][0-8]|[0-9][0-9]))?)/)) { + printf "v6 %s",substr($0,RSTART,RLENGTH) + } + }')" + proto="${res%% *}" + ip="${res#* }" + [ "${proto}" != "v4" ] && [ "${proto}" != "v6" ] && proto="" && ip="" + + # get relevant Sets + # if [ -n "${proto}" ]; then - table_sets="$("${ban_nftcmd}" -tj list ruleset 2>/dev/null | "${ban_jsoncmd}" -qe "@.nftables[@.set.table=\"banIP\"&&@.set.type=\"ip${proto}_addr\"].set.name")" + table_sets="$("${ban_nftcmd}" -tj list table inet banIP 2>>"${ban_errorlog}" | \ + "${ban_jsoncmd}" -qe "@.nftables[@.set.type=\"ip${proto}_addr\"].set.name")" else printf "%s\n%s\n%s\n" ":::" "::: no valid search input" ":::" + printf "%s\n%s\n%s\n" ":::" "::: no valid search input" ":::" >"${result}" return fi + + # initial output + # + { + printf "%s\n%s\n%s\n" ":::" "::: banIP Search" ":::" + printf " %s\n" "Looking for IP '${ip}' on $(date "+%Y-%m-%d %H:%M:%S")" + printf " %s\n" "---" + } >"${tmp_result}" + + # search for IP in Sets + # cnt="1" for item in ${table_sets}; do - [ -f "${result_flag}" ] && break + case "${item}" in + *[!a-zA-Z0-9_.]*) + continue + ;; + esac ( if "${ban_nftcmd}" get element inet banIP "${item}" "{ ${ip} }" >/dev/null 2>&1; then - printf "%s\n%s\n%s\n" ":::" "::: banIP Search" ":::" - printf " %s\n" "Looking for IP '${ip}' on $(date "+%Y-%m-%d %H:%M:%S")" - printf " %s\n" "---" - printf " %s\n" "IP found in Set '${item}'" - : >"${result_flag}" + printf " %s\n" "IP found in Set '${item}'" >>"${tmp_result}" fi ) & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait + [ "${cnt}" -gt "${ban_cores}" ] && wait -n cnt="$((cnt + 1))" done wait - if [ -f "${result_flag}" ]; then - rm -f "${result_flag}" - else - printf "%s\n%s\n%s\n" ":::" "::: banIP Search" ":::" - printf " %s\n" "Looking for IP '${ip}' on $(date "+%Y-%m-%d %H:%M:%S")" - printf " %s\n" "---" - printf " %s\n" "IP not found" + + # output result + # + if ! "${ban_grepcmd}" -qm1 "found" "${tmp_result}"; then + printf " %s\n" "IP not found" >>"${tmp_result}" fi + "${ban_mvcmd}" -f "${tmp_result}" "${result}" + "${ban_catcmd}" "${result}" } -# Set survey +# Set content # -f_survey() { - local set_elements input="${1}" +f_content() { + local set_raw set_elements input="${1}" filter="${2}" + + # validate input + # + case "${input}" in + ""|*[!a-zA-Z0-9_.]*) + printf "%s\n%s\n%s\n" ":::" "::: no valid Set input" ":::" + return + ;; + esac + + case "${filter}" in + ""|"false") + filter="false" + ;; + "true") + filter="true" + ;; + *) + printf "%s\n%s\n%s\n" ":::" "::: no valid filter input" ":::" + return + ;; + esac - if [ -z "${input}" ]; then - printf "%s\n%s\n%s\n" ":::" "::: no valid survey input" ":::" + # check if Set exists + # + if ! "${ban_nftcmd}" -t list set inet banIP "${input}" >/dev/null 2>&1; then + printf "%s\n%s\n%s\n" ":::" "::: Set '${input}' not found" ":::" return fi - set_elements="$("${ban_nftcmd}" -j list set inet banIP "${input}" 2>/dev/null | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]')" - printf "%s\n%s\n%s\n" ":::" "::: banIP Survey" ":::" - printf " %s\n" "List of elements in the Set '${input}' on $(date "+%Y-%m-%d %H:%M:%S")" + + # get Set content + # + set_raw="$("${ban_nftcmd}" -j list set inet banIP "${input}" 2>>"${ban_errorlog}")" + if [ "$(uci_get banip global ban_nftcount)" = "1" ]; then + if [ "${filter}" = "true" ]; then + set_elements="$(printf "%s" "${set_raw}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*][@.counter.packets>0].*' | + "${ban_awkcmd}" 'NR%2==1{ip=$0;next}BEGIN{FS="[:,{}\"]+"}{print ip ", packets: "$4 }')" + else + set_elements="$(printf "%s" "${set_raw}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*].elem["val","counter"]' | + "${ban_awkcmd}" 'NR%2==1{ip=$0;next}BEGIN{FS="[:,{}\"]+"}{print ip ", packets: "$4 }')" + fi + else + set_elements="$(printf "%s" "${set_raw}" | "${ban_jsoncmd}" -qe '@.nftables[*].set.elem[*]')" + fi + + # output result + # + printf "%s\n%s\n%s\n" ":::" "::: banIP Set Content" ":::" + printf " %s\n" "List elements of the Set '${input}' on $(date "+%Y-%m-%d %H:%M:%S")" printf " %s\n" "---" [ -n "${set_elements}" ] && printf "%s\n" "${set_elements}" || printf " %s\n" "empty Set" } @@ -1643,89 +2269,138 @@ f_mail() { # ban_mailhead="From: ${ban_mailsender}\nTo: ${ban_mailreceiver}\nSubject: ${ban_mailtopic}\nReply-to: ${ban_mailsender}\nMime-Version: 1.0\nContent-Type: text/html;charset=utf-8\nContent-Disposition: inline\n\n" printf "%b" "${ban_mailhead}${mail_text}" | "${ban_mailcmd}" --timeout=10 ${msmtp_debug} -a "${ban_mailprofile}" "${ban_mailreceiver}" >/dev/null 2>&1 - f_log "info" "send status mail (${?})" - f_log "debug" "f_mail ::: notification: ${ban_mailnotification}, template: ${ban_mailtemplate}, profile: ${ban_mailprofile}, receiver: ${ban_mailreceiver}, rc: ${?}" + f_log "debug" "f_mail ::: notification: ${ban_mailnotification}, template: ${ban_mailtemplate}, profile: ${ban_mailprofile}, receiver: ${ban_mailreceiver}, rc: ${?}" } # log monitor # f_monitor() { - local daemon logread_cmd loglimit_cmd nft_expiry line proto ip log_raw log_count idx prefix cidr rdap_log rdap_rc rdap_idx rdap_info + local logread_cmd loglimit_cmd logread_filter nft_expiry line ip_proto ip proto log_count idx base cidr rdap_log rdap_rc rdap_idx rdap_info - if [ -f "${ban_logreadfile}" ]; then - logread_cmd="${ban_logreadcmd} -qf ${ban_logreadfile} 2>/dev/null | ${ban_grepcmd} -e \"${ban_logterm%%??}\" 2>/dev/null" + # log reading configuration + # + if [ -f "${ban_logreadfile}" ] && [ -x "${ban_logreadcmd}" ] && [ "${ban_logreadcmd##*/}" = "tail" ]; then + logread_cmd="${ban_logreadcmd} -qf ${ban_logreadfile} 2>/dev/null" loglimit_cmd="${ban_logreadcmd} -qn ${ban_loglimit} ${ban_logreadfile} 2>/dev/null" - else - logread_cmd="${ban_logreadcmd} -fe \"${ban_logterm%%??}\" 2>/dev/null" + logread_filter="${ban_grepcmd} -e \"${ban_logterm}\" 2>/dev/null" + elif [ -x "${ban_logreadcmd}" ] && [ "${ban_logreadcmd##*/}" = "logread" ]; then + logread_cmd="${ban_logreadcmd} -fe \"${ban_logterm}\" 2>/dev/null" loglimit_cmd="${ban_logreadcmd} -l ${ban_loglimit} 2>/dev/null" + logread_filter="" fi - if [ -x "${ban_logreadcmd}" ] && [ -n "${logread_cmd}" ] && [ -n "${loglimit_cmd}" ] && [ -n "${ban_logterm%%??}" ] && [ "${ban_loglimit}" != "0" ]; then + # start log monitoring + # + if [ -n "${logread_cmd}" ] && [ -n "${loglimit_cmd}" ] && [ -n "${ban_logterm}" ] && [ "${ban_loglimit}" != "0" ]; then f_log "info" "start detached banIP log service (${ban_logreadcmd})" - [ -n "${ban_nftexpiry}" ] && nft_expiry="timeout $(printf "%s" "${ban_nftexpiry}" | "${ban_grepcmd}" -oE "([0-9]+[d|h|m|s])+$")" - eval "${logread_cmd}" | - while read -r line; do - proto="" - : >"${ban_rdapfile}" - if [ -z "${daemon}" ]; then - daemon="$(printf "%s" "${line}" | "${ban_awkcmd}" 'BEGIN{RS="dropbear"}{if(!seen[RT]++)printf "%s",RT}')" - [ -z "${daemon}" ] && daemon="sshd" - fi - ip="$(printf "%s" "${line}" | "${ban_awkcmd}" 'BEGIN{RS="(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5]))+"}{if(!seen[RT]++)printf "%s ",RT}')" - ip="$(f_trim "${ip}")" - ip="${ip##* }" - [ -n "${ip}" ] && [ "${ip%%.*}" != "127" ] && [ "${ip%%.*}" != "0" ] && proto="v4" - if [ -z "${proto}" ]; then - if [ "${daemon}" = "dropbear" ]; then - ip="$(printf "%s" "${line}" | "${ban_awkcmd}" 'BEGIN{RS="([A-Fa-f0-9]{1,4}::?){3,7}([A-Fa-f0-9]:?)+"}{if(!seen[RT]++)printf "%s ",RT}')" - ip="${ip%:*}" + if printf "%s" "${ban_nftexpiry}" | grep -qE '^([1-9][0-9]*(ms|s|m|h|d|w))+$'; then + nft_expiry="timeout ${ban_nftexpiry}" + fi + + # retrieve/cache current allowlist/blocklist content + # + allow_v4="$("${ban_nftcmd}" list set inet banIP allowlist.v4 2>/dev/null)" + allow_v6="$("${ban_nftcmd}" list set inet banIP allowlist.v6 2>/dev/null)" + block_v4="$("${ban_nftcmd}" list set inet banIP blocklist.v4 2>/dev/null)" + block_v6="$("${ban_nftcmd}" list set inet banIP blocklist.v6 2>/dev/null)" + + # log monitoring loop + # + pipeline_cmd="${logread_cmd}" + [ -n "${logread_filter}" ] && pipeline_cmd="${pipeline_cmd} | ${logread_filter}" + eval "${pipeline_cmd}" | while read -r line; do + proto="" + base="" + : >"${ban_rdapfile}" + + # IP detection + # + ip_proto=$(printf "%s" "${line}" | "${ban_awkcmd}" ' + { + gsub(/[<>[\]]/, "", $0) + sub(/%.*/, "", $0) + sub(/:[0-9]+([ >]|$)/, "\\1", $0) + if (match($0, /([0-9]{1,3}\.){3}[0-9]{1,3}/, m4)) { + if (m4[0] !~ /^127\./ && m4[0] !~ /^0\./) { + print m4[0] " .v4" + exit + } + } + if (match($0, /([A-Fa-f0-9]{1,4}:){2,7}[A-Fa-f0-9]{1,4}/, m6)) { + if (m6[0] ~ /^[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}$/) next + if (m6[0] !~ /^([A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}$/) { + print m6[0] " .v6" + exit + } + } + }' + ) + ip="${ip_proto% *}" + proto="${ip_proto#* }" + + # process detected IP + # + if [ -n "${proto}" ]; then + case "${proto}" in + .v4) + case "${allow_v4} ${block_v4}" in + *" ${ip} "* ) + continue + ;; + esac + ;; + .v6) + case "${allow_v6} ${block_v6}" in + *" ${ip} "* ) + continue + ;; + esac + ;; + esac + f_log "info" "suspicious IP '${ip}'" + log_count="$(${loglimit_cmd} | "${ban_grepcmd}" -F -c "suspicious IP '${ip}'")" + if [ "${log_count}" -ge "${ban_logcount}" ]; then + if "${ban_nftcmd}" add element inet banIP "blocklist${proto}" { ${ip} ${nft_expiry} } >/dev/null 2>&1; then + f_log "info" "add IP '${ip}' (expiry: ${ban_nftexpiry:-"-"}) to blocklist${proto} set" + [ "${proto}" = ".v4" ] && block_v4="$("${ban_nftcmd}" list set inet banIP blocklist.v4 2>/dev/null)" + [ "${proto}" = ".v6" ] && block_v6="$("${ban_nftcmd}" list set inet banIP blocklist.v6 2>/dev/null)" else - ip="$(printf "%s" "${line}" | "${ban_awkcmd}" 'BEGIN{RS="([A-Fa-f0-9]{1,4}::?){3,7}[A-Fa-f0-9]{1,4}"}{if(!seen[RT]++)printf "%s ",RT}')" + f_log "info" "failed to add IP '${ip}' to blocklist${proto} set" fi - ip="$(f_trim "${ip}")" - ip="${ip##* }" - [ -n "${ip%%::*}" ] && proto="v6" - fi - if [ -n "${proto}" ] && ! "${ban_nftcmd}" get element inet banIP allowlist"${proto}" "{ ${ip} }" >/dev/null 2>&1 && ! "${ban_nftcmd}" get element inet banIP blocklist"${proto}" "{ ${ip} }" >/dev/null 2>&1; then - f_log "info" "suspicious IP '${ip}'" - log_raw="$(eval ${loglimit_cmd})" - log_count="$(printf "%s\n" "${log_raw}" | "${ban_grepcmd}" -c "suspicious IP '${ip}'")" - if [ "${log_count}" -ge "${ban_logcount}" ]; then - if "${ban_nftcmd}" add element inet banIP "blocklist${proto}" { ${ip} ${nft_expiry} } >/dev/null 2>&1; then - f_log "info" "add IP '${ip}' (expiry: ${ban_nftexpiry:-"-"}) to blocklist${proto} set" - fi - if [ "${ban_autoblocksubnet}" = "1" ]; then - rdap_log="$("${ban_fetchcmd}" ${ban_rdapparm} "${ban_rdapfile}" "${ban_rdapurl}${ip}" 2>&1)" - rdap_rc="${?}" - if [ "${rdap_rc}" = "0" ] && [ -s "${ban_rdapfile}" ]; then - [ "${proto}" = "v4" ] && rdap_idx="$("${ban_jsoncmd}" -i "${ban_rdapfile}" -qe '@.cidr0_cidrs[@.v4prefix].*' | "${ban_awkcmd}" '{ORS=" "; print}')" - [ "${proto}" = "v6" ] && rdap_idx="$("${ban_jsoncmd}" -i "${ban_rdapfile}" -qe '@.cidr0_cidrs[@.v6prefix].*' | "${ban_awkcmd}" '{ORS=" "; print}')" - rdap_info="$("${ban_jsoncmd}" -l1 -i "${ban_rdapfile}" -qe '@.country' -qe '@.notices[@.title="Source"].description[1]' | "${ban_awkcmd}" 'BEGIN{RS="";FS="\n"}{printf "%s, %s",$1,$2}')" - [ -z "${rdap_info}" ] && rdap_info="$("${ban_jsoncmd}" -l1 -i "${ban_rdapfile}" -qe '@.notices[0].links[0].value' | "${ban_awkcmd}" 'BEGIN{FS="[/.]"}{printf"%s, %s","n/a",toupper($4)}')" - for idx in ${rdap_idx}; do - if [ -z "${prefix}" ]; then - prefix="${idx}" - continue - else - cidr="${prefix}/${idx}" + if [ "${ban_autoblocksubnet}" = "1" ]; then + rdap_log="$("${ban_fetchcmd}" ${ban_rdapparm} "${ban_rdapfile}" "${ban_rdapurl}${ip}" 2>&1)" + rdap_rc="${?}" + if [ "${rdap_rc}" = "0" ] && [ -s "${ban_rdapfile}" ]; then + [ "${proto}" = ".v4" ] && rdap_idx="$("${ban_jsoncmd}" -i "${ban_rdapfile}" -qe '@.cidr0_cidrs[@.v4prefix].*' | "${ban_awkcmd}" '{ORS=" "; print}')" + [ "${proto}" = ".v6" ] && rdap_idx="$("${ban_jsoncmd}" -i "${ban_rdapfile}" -qe '@.cidr0_cidrs[@.v6prefix].*' | "${ban_awkcmd}" '{ORS=" "; print}')" + rdap_info="$("${ban_jsoncmd}" -l1 -i "${ban_rdapfile}" -qe '@.country' -qe '@.notices[@.title="Source"].description[1]' | "${ban_awkcmd}" 'BEGIN{RS="";FS="\n"}{printf "%s, %s",$1,$2}')" + [ -z "${rdap_info}" ] && rdap_info="$("${ban_jsoncmd}" -l1 -i "${ban_rdapfile}" -qe '@.notices[0].links[0].value' | "${ban_awkcmd}" 'BEGIN{FS="[/.]"}{printf"%s, %s","n/a",toupper($4)}')" + for idx in ${rdap_idx}; do + if [ -z "${base}" ]; then + base="${idx}" + continue + else + if [ -n "${base%%::*}" ] && [ "${base%%.*}" != "127" ] && [ "${base%%.*}" != "0" ]; then + cidr="${base}/${idx}" if "${ban_nftcmd}" add element inet banIP "blocklist${proto}" { ${cidr} ${nft_expiry} } >/dev/null 2>&1; then f_log "info" "add IP range '${cidr}' (source: ${rdap_info:-"n/a"} ::: expiry: ${ban_nftexpiry:-"-"}) to blocklist${proto} set" fi - prefix="" fi - done - else - f_log "info" "rdap request failed (rc: ${rdap_rc:-"-"}/log: ${rdap_log})" - fi - fi - if [ -z "${ban_nftexpiry}" ] && [ "${ban_autoblocklist}" = "1" ] && ! "${ban_grepcmd}" -q "^${ip}" "${ban_blocklist}"; then - printf "%-45s%s\n" "${ip}" "# added on $(date "+%Y-%m-%d %H:%M:%S")" >>"${ban_blocklist}" - f_log "info" "add IP '${ip}' to local blocklist" + base="" + fi + done + else + f_log "info" "rdap request failed (rc: ${rdap_rc:-"-"}/log: ${rdap_log})" fi fi + if [ -z "${ban_nftexpiry}" ] && [ "${ban_autoblocklist}" = "1" ] && ! "${ban_grepcmd}" -q "^${ip}" "${ban_blocklist}"; then + printf "%-45s%s\n" "${ip}" "# added on $(date "+%Y-%m-%d %H:%M:%S")" >>"${ban_blocklist}" + f_log "info" "add IP '${ip}' to local blocklist" + fi fi - done + fi + done else f_log "info" "start detached no-op banIP service" sleep infinity @@ -1746,7 +2421,6 @@ fi # ban_awkcmd="$(f_cmd gawk awk)" ban_catcmd="$(f_cmd cat)" -ban_fw4cmd="$(f_cmd fw4)" ban_grepcmd="$(f_cmd grep)" ban_jsoncmd="$(f_cmd jsonfilter)" ban_logcmd="$(f_cmd logger)" @@ -1754,10 +2428,14 @@ ban_lookupcmd="$(f_cmd nslookup)" ban_mailcmd="$(f_cmd msmtp optional)" ban_nftcmd="$(f_cmd nft)" ban_pgrepcmd="$(f_cmd pgrep)" +ban_xargscmd="$(f_cmd xargs)" ban_sedcmd="$(f_cmd sed)" ban_ubuscmd="$(f_cmd ubus)" ban_zcatcmd="$(f_cmd zcat)" ban_gzipcmd="$(f_cmd gzip)" +ban_sortcmd="$(f_cmd sort)" +ban_wccmd="$(f_cmd wc)" +ban_mvcmd="$(f_cmd mv)" f_system if [ "${ban_action}" != "stop" ]; then diff --git a/packages/banip/files/banip-service.sh b/packages/banip/files/banip-service.sh index 1f38e07ec..b59cbb5c3 100755 --- a/packages/banip/files/banip-service.sh +++ b/packages/banip/files/banip-service.sh @@ -1,6 +1,6 @@ #!/bin/sh # banIP main service script - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. # (s)hellcheck exceptions @@ -9,29 +9,28 @@ ban_action="${1}" ban_starttime="$(date "+%s")" ban_funlib="/usr/lib/banip-functions.sh" -[ -z "${ban_ver}" ] && . "${ban_funlib}" +[ -z "${ban_bver}" ] && . "${ban_funlib}" # load config and set banIP environment # [ "${ban_action}" = "boot" ] && sleep "$(uci_get banip global ban_triggerdelay "20")" f_conf -f_log "info" "start banIP processing (${ban_action}, ${ban_ver:-"n/a"})" +f_log "info" "start banIP processing (${ban_action}, ${ban_bver:-"n/a"})" f_genstatus "processing" f_tmp -f_getfetch +f_getdl f_getif f_getdev -f_getuplink +f_getup f_mkdir "${ban_backupdir}" f_mkfile "${ban_allowlist}" f_mkfile "${ban_blocklist}" +f_rmdir "${ban_errordir}" # firewall/fw4 pre-check # -if [ ! -x "${ban_fw4cmd}" ] || [ ! -x "/etc/init.d/firewall" ]; then - f_log "err" "firewall/fw4 not found" -elif ! /etc/init.d/firewall status >/dev/null 2>&1; then - f_log "info" "firewall/fw4 is not running" +if ! /etc/init.d/firewall status >/dev/null 2>&1; then + f_log "info" "the main firewall is not running" fi # init banIP nftables namespace @@ -40,28 +39,34 @@ if [ "${ban_action}" != "reload" ] || ! "${ban_nftcmd}" list chain inet banIP pr f_nftinit "${ban_tmpfile}".init.nft fi -# handle downloads +# start banIP processing # f_log "info" "start banIP download processes" -if [ "${ban_allowlistonly}" = "1" ]; then - ban_feed="" -else - f_getfeed -fi +f_getfeed [ "${ban_deduplicate}" = "1" ] && printf "\n" >"${ban_tmpfile}.deduplicate" +# handle downloads +# cnt="1" for feed in allowlist ${ban_feed} blocklist; do + # local feeds (sequential processing) # if [ "${feed}" = "allowlist" ] || [ "${feed}" = "blocklist" ]; then for proto in 4MAC 6MAC 4 6; do - [ "${feed}" = "blocklist" ] && wait - f_down "${feed}" "${proto}" + f_down "${feed}" "${proto}" "-" "-" "inout" done continue fi + # skip external feeds in allowlistonly mode + # + if [ "${ban_allowlistonly}" = "1" ] && + ! printf "%s" "${ban_feedin}" | "${ban_grepcmd}" -q "allowlist" && + ! printf "%s" "${ban_feedout}" | "${ban_grepcmd}" -q "allowlist"; then + continue + fi + # external feeds (parallel processing on multicore hardware) # if ! json_select "${feed}" >/dev/null 2>&1; then @@ -70,7 +75,7 @@ for feed in allowlist ${ban_feed} blocklist; do uci_commit "banip" continue fi - json_objects="url_4 rule_4 url_6 rule_6 flag" + json_objects="url_4 url_6 rule chain flag" for object in ${json_objects}; do eval json_get_var feed_"${object}" '${object}' >/dev/null 2>&1 done @@ -78,43 +83,53 @@ for feed in allowlist ${ban_feed} blocklist; do # skip incomplete feeds # - if { { [ -n "${feed_url_4}" ] && [ -z "${feed_rule_4}" ]; } || { [ -z "${feed_url_4}" ] && [ -n "${feed_rule_4}" ]; }; } || - { { [ -n "${feed_url_6}" ] && [ -z "${feed_rule_6}" ]; } || { [ -z "${feed_url_6}" ] && [ -n "${feed_rule_6}" ]; }; } || - { [ -z "${feed_url_4}" ] && [ -z "${feed_rule_4}" ] && [ -z "${feed_url_6}" ] && [ -z "${feed_rule_6}" ]; }; then + if { [ -z "${feed_url_4}" ] && [ -z "${feed_url_6}" ]; } || \ + { { [ -n "${feed_url_4}" ] || [ -n "${feed_url_6}" ]; } && [ -z "${feed_rule}" ]; }; then f_log "info" "skip incomplete feed '${feed}'" continue fi - # handle IPv4/IPv6 feeds with a single download URL + # handle IPv4 feeds # - if [ "${feed_url_4}" = "${feed_url_6}" ]; then - if [ "${ban_protov4}" = "1" ] && [ -n "${feed_url_4}" ] && [ -n "${feed_rule_4}" ]; then - (f_down "${feed}" "4" "${feed_url_4}" "${feed_rule_4}" "${feed_flag}") & - feed_url_6="local" - wait - fi - if [ "${ban_protov6}" = "1" ] && [ -n "${feed_url_6}" ] && [ -n "${feed_rule_6}" ]; then - (f_down "${feed}" "6" "${feed_url_6}" "${feed_rule_6}" "${feed_flag}") & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait - cnt="$((cnt + 1))" + if [ "${ban_protov4}" = "1" ] && [ -n "${feed_url_4}" ] && [ -n "${feed_rule}" ]; then + feed_ipv="4" + if [ "${feed}" = "country" ] && [ "${ban_countrysplit}" = "1" ]; then + for country in ${ban_country}; do + f_down "${feed}.${country}" "${feed_ipv}" "${feed_url_4}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}" + done + elif [ "${feed}" = "asn" ] && [ "${ban_asnsplit}" = "1" ]; then + for asn in ${ban_asn}; do + f_down "${feed}.${asn}" "${feed_ipv}" "${feed_url_4}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}" + done + else + if [ "${feed_url_4}" = "${feed_url_6}" ]; then + feed_url_6="local" + f_down "${feed}" "${feed_ipv}" "${feed_url_4}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}" + else + (f_down "${feed}" "${feed_ipv}" "${feed_url_4}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}") & + [ "${cnt}" -gt "${ban_cores}" ] && wait -n + cnt="$((cnt + 1))" + fi fi - continue fi - # handle IPv4/IPv6 feeds with separate download URLs + # handle IPv6 feeds # - if [ "${ban_protov4}" = "1" ] && [ -n "${feed_url_4}" ] && [ -n "${feed_rule_4}" ]; then - (f_down "${feed}" "4" "${feed_url_4}" "${feed_rule_4}" "${feed_flag}") & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait - cnt="$((cnt + 1))" - fi - if [ "${ban_protov6}" = "1" ] && [ -n "${feed_url_6}" ] && [ -n "${feed_rule_6}" ]; then - (f_down "${feed}" "6" "${feed_url_6}" "${feed_rule_6}" "${feed_flag}") & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait - cnt="$((cnt + 1))" + if [ "${ban_protov6}" = "1" ] && [ -n "${feed_url_6}" ] && [ -n "${feed_rule}" ]; then + feed_ipv="6" + if [ "${feed}" = "country" ] && [ "${ban_countrysplit}" = "1" ]; then + for country in ${ban_country}; do + f_down "${feed}.${country}" "${feed_ipv}" "${feed_url_6}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}" + done + elif [ "${feed}" = "asn" ] && [ "${ban_asnsplit}" = "1" ]; then + for asn in ${ban_asn}; do + f_down "${feed}.${asn}" "${feed_ipv}" "${feed_url_6}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}" + done + else + (f_down "${feed}" "${feed_ipv}" "${feed_url_6}" "${feed_rule}" "${feed_chain:-"in"}" "${feed_flag}") & + [ "${cnt}" -gt "${ban_cores}" ] && wait -n + cnt="$((cnt + 1))" + fi fi done wait @@ -128,14 +143,14 @@ f_log "info" "start banIP domain lookup" cnt="1" for list in allowlist blocklist; do (f_lookup "${list}") & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait + [ "${cnt}" -gt "${ban_cores}" ] && wait -n cnt="$((cnt + 1))" done wait # end processing # +f_log "info" "finish banIP processing" ( sleep 5 if [ "${ban_mailnotification}" = "1" ] && [ -n "${ban_mailreceiver}" ] && [ -x "${ban_mailcmd}" ]; then diff --git a/packages/banip/files/banip.cgi b/packages/banip/files/banip.cgi index 975e3c5b2..770bce53e 100644 --- a/packages/banip/files/banip.cgi +++ b/packages/banip/files/banip.cgi @@ -1,6 +1,6 @@ #!/bin/sh # banIP cgi remote logging script - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. # (s)hellcheck exceptions @@ -19,7 +19,7 @@ request_decode() { value="${request#*=}" token="$(uci -q get banip.global.ban_remotetoken)" - if [ -n "${key}" ] && [ -n "${value}" ] && [ "${key}" = "${token}" ] && /etc/init.d/banip running; then + if [ -n "${token}" ] && [ -n "${key}" ] && [ -n "${value}" ] && [ "${key}" = "${token}" ] && /etc/init.d/banip running; then [ -r "/usr/lib/banip-functions.sh" ] && { . "/usr/lib/banip-functions.sh"; f_conf; } if [ "${ban_remotelog}" = "1" ] && [ -x "${ban_logreadcmd}" ] && [ -n "${ban_logterm%%??}" ] && [ "${ban_loglimit}" != "0" ]; then f_log "info" "received a suspicious remote IP '${value}'" diff --git a/packages/banip/files/banip.countries b/packages/banip/files/banip.countries index 5c0aa00b8..99e8dfc88 100644 --- a/packages/banip/files/banip.countries +++ b/packages/banip/files/banip.countries @@ -1,4 +1,5 @@ af APNIC Afghanistan +ap AFRINIC ARIPO ax RIPE Åland Islands al RIPE Albania dz AFRINIC Algeria @@ -28,7 +29,6 @@ bo LACNIC Bolivia bq LACNIC Bonaire ba RIPE Bosnia & Herzegowina bw AFRINIC Botswana -bv ARIN Bouvet Island br LACNIC Brazil io APNIC British Indian Ocean Territory bn APNIC Brunei @@ -44,7 +44,6 @@ cf AFRINIC Central African Republic td AFRINIC Chad cl LACNIC Chile cn APNIC China -cx APNIC Christmas Island cc APNIC Cocos Islands co LACNIC Colombia km AFRINIC Comoros @@ -70,6 +69,7 @@ er AFRINIC Eritrea ee RIPE Estonia sz AFRINIC Eswatini et AFRINIC Ethiopia +eu RIPE European Union fk LACNIC Falkland Islands fo RIPE Faroe Islands fj APNIC Fiji @@ -77,7 +77,6 @@ fi RIPE Finland fr RIPE France gf LACNIC French Guiana pf APNIC French Polynesia -tf APNIC French Southern Territories ga AFRINIC Gabon gm AFRINIC Gambia ge RIPE Georgia @@ -95,7 +94,6 @@ gn AFRINIC Guinea gw AFRINIC Guinea-Bissau gy LACNIC Guyana ht LACNIC Haiti -hm ARIN Heard & McDonald Islands hn LACNIC Honduras hk APNIC Hong Kong hu RIPE Hungary @@ -172,7 +170,6 @@ pg APNIC Papua New Guinea py LACNIC Paraguay pe LACNIC Peru ph APNIC Philippines -pn APNIC Pitcairn pl RIPE Poland pt RIPE Portugal pr ARIN Puerto Rico @@ -181,7 +178,6 @@ re AFRINIC Reunion ro RIPE Romania ru RIPE Russian Federation rw AFRINIC Rwanda -sh ARIN Saint Helena bl ARIN Saint Barthélemy kn ARIN Saint Kitts & Nevis lc ARIN Saint Lucia @@ -203,14 +199,12 @@ si RIPE Slovenia sb APNIC Solomon Islands so AFRINIC Somalia za AFRINIC South Africa -gs LACNIC South Georgia kr APNIC South Korea ss AFRINIC South Sudan es RIPE Spain lk APNIC Sri Lanka sd AFRINIC Sudan sr LACNIC Suriname -sj RIPE Svalbard & Jan Mayen Islands se RIPE Sweden ch RIPE Switzerland sy RIPE Syrian @@ -243,7 +237,6 @@ vn APNIC Vietnam vg ARIN Virgin Islands (British) vi ARIN Virgin Islands (U.S.) wf APNIC Wallis & Futuna Islands -eh AFRINIC Western Sahara ye RIPE Yemen zm AFRINIC Zambia zw AFRINIC Zimbabwe diff --git a/packages/banip/files/banip.feeds b/packages/banip/files/banip.feeds index 2c6fa8329..2f587aced 100644 --- a/packages/banip/files/banip.feeds +++ b/packages/banip/files/banip.feeds @@ -1,251 +1,259 @@ { + "asn":{ + "url_4": "https://asn.ipinfo.app/api/text/list/", + "url_6": "https://asn.ipinfo.app/api/text/list/", + "rule": "feed 1", + "chain": "in", + "descr": "ASN IP segments" + }, "backscatterer":{ "url_4": "http://wget-mirrors.uceprotect.net/rbldnsd-all/ips.backscatterer.org.gz", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "backscatterer IPs", "flag": "gz" }, "becyber":{ - "url_4": "https://raw.githubusercontent.com/duggytuxy/Intelligence_IPv4_Blocklists/refs/heads/main/agressive_ips_dst_fr_be_blocklist.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "url_4": "https://raw.githubusercontent.com/duggytuxy/Data-Shield_IPv4_Blocklist/refs/heads/main/prod_data-shield_ipv4_blocklist.txt", + "rule": "feed 1", "chain": "in", "descr": "malicious attacker IPs" }, "binarydefense":{ "url_4": "https://iplists.firehol.org/files/bds_atif.ipset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "binary defense banlist" }, "bogon":{ "url_4": "https://www.team-cymru.org/Services/Bogons/fullbogons-ipv4.txt", "url_6": "https://www.team-cymru.org/Services/Bogons/fullbogons-ipv6.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "bogon prefixes" }, "bruteforceblock":{ "url_4": "https://danger.rulez.sk/projects/bruteforceblocker/blist.php", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "bruteforceblocker IPs" }, "cinsscore":{ "url_4": "https://cinsscore.com/list/ci-badguys.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "suspicious attacker IPs" }, "country":{ "url_4": "https://www.ipdeny.com/ipblocks/data/aggregated/", "url_6": "https://www.ipdeny.com/ipv6/ipaddresses/aggregated/", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "country blocks" }, "debl":{ "url_4": "https://lists.blocklist.de/lists/all.txt", "url_6": "https://lists.blocklist.de/lists/all.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "fail2ban IP blocklist" }, + "dns":{ + "url_4": "https://public-dns.info/nameservers-all.txt", + "url_6": "https://public-dns.info/nameservers-all.txt", + "rule": "feed 1", + "chain": "out", + "descr": "public DNS-Server", + "flag": "tcp udp 53 853" + }, "doh":{ "url_4": "https://raw.githubusercontent.com/dibdot/DoH-IP-blocklists/master/doh-ipv4.txt", "url_6": "https://raw.githubusercontent.com/dibdot/DoH-IP-blocklists/master/doh-ipv6.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)[[:space:]]/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "out", - "descr": "public DoH-Provider", + "descr": "public DoH-Server", "flag": "tcp udp 80 443" }, "drop":{ "url_4": "https://www.spamhaus.org/drop/drop.txt", "url_6": "https://www.spamhaus.org/drop/dropv6.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)[[:space:]]/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "spamhaus drop compilation" }, "dshield":{ "url_4": "https://feeds.dshield.org/block.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s/%s,\\n\",$1,$3}", + "rule": "feed 13", "chain": "in", "descr": "dshield IP blocklist" }, "etcompromised":{ "url_4": "https://iplists.firehol.org/files/et_compromised.ipset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "ET compromised hosts" }, "feodo":{ "url_4": "https://feodotracker.abuse.ch/downloads/ipblocklist.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "feodo tracker" }, "firehol1":{ "url_4": "https://iplists.firehol.org/files/firehol_level1.netset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "firehol level 1 compilation" }, "firehol2":{ "url_4": "https://iplists.firehol.org/files/firehol_level2.netset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "firehol level 2 compilation" }, "firehol3":{ "url_4": "https://iplists.firehol.org/files/firehol_level3.netset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "firehol level 3 compilation" }, "firehol4":{ "url_4": "https://iplists.firehol.org/files/firehol_level4.netset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{if(!seen[$1]++)printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "firehol level 4 compilation" }, "greensnow":{ "url_4": "https://blocklist.greensnow.co/greensnow.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "suspicious server IPs" }, "hagezi":{ "url_4": "https://raw.githubusercontent.com/hagezi/dns-blocklists/refs/heads/main/ips/tif.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "out", "descr": "Threat IP blocklist", "flag": "tcp udp 80 443" }, "ipblackhole":{ - "url_4": "https://blackhole.s-e-r-v-e-r.pw/blackhole-today", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "url_4": "https://blackhole.monster/blackhole-today", + "rule": "feed 1", "chain": "in", "descr": "blackhole IP blocklist" }, + "ipexdbl":{ + "url_4": "https://raw.githubusercontent.com/ZEROF/ipextractor/main/ipexdbl.txt", + "rule": "feed 1", + "chain": "in", + "descr": "IPEX dynamic blocklists" + }, "ipsum":{ "url_4": "https://raw.githubusercontent.com/stamparm/ipsum/master/levels/3.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[-[:space:]]?/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "malicious IPs" }, "ipthreat":{ "url_4": "https://lists.ipthreat.net/file/ipthreat-lists/threat/threat-30.txt.gz", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[-[:space:]]?/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "hacker and botnet IPs", "flag": "gz" }, - "nixspam":{ - "url_4": "https://www.nixspam.net/download/nixspam-ip.dump.gz", - "rule_4": "/127\\./{next}/(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)[[:space:]]/{printf \"%s,\\n\",$2}", - "chain": "in", - "descr": "iX spam protection", - "flag": "gz" - }, - "pallebone":{ - "url_4": "https://raw.githubusercontent.com/pallebone/StrictBlockPAllebone/master/BlockIP.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "myip":{ + "url_4": "https://myip.ms/files/blacklist/general/latest_blacklist.txt", + "url_6": "https://myip.ms/files/blacklist/general/latest_blacklist.txt", + "rule": "feed 1", "chain": "in", - "descr": "curated IP blocklist" + "descr": "real-time IP blocklist" }, "proxy":{ "url_4": "https://iplists.firehol.org/files/proxylists.ipset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "open proxies" }, "threat":{ "url_4": "https://rules.emergingthreats.net/fwrules/emerging-Block-IPs.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "emerging threats" }, "threatview":{ "url_4": "https://threatview.io/Downloads/IP-High-Confidence-Feed.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "malicious IPs" }, "tor":{ "url_4": "https://www.dan.me.uk/torlist/?exit", "url_6": "https://www.dan.me.uk/torlist/?exit", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "rule_6": "/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "tor exit nodes" }, "turris":{ "url_4": "https://view.sentinel.turris.cz/greylist-data/greylist-latest.csv", "url_6": "https://view.sentinel.turris.cz/greylist-data/greylist-latest.csv", - "rule_4": "BEGIN{FS=\",\"}/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)/{printf \"%s,\\n\",$1}", - "rule_6": "BEGIN{FS=\",\"}/^(([0-9A-f]{0,4}:){1,7}[0-9A-f]{0,4}:?(\\/(1?[0-2][0-8]|[0-9][0-9]))?)/{printf \"%s,\\n\",$1}", + "rule": "feed 1 ,", "chain": "in", "descr": "turris sentinel blocklist" }, "uceprotect1":{ "url_4": "http://wget-mirrors.uceprotect.net/rbldnsd-all/dnsbl-1.uceprotect.net.gz", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "spam protection level 1", "flag": "gz" }, "uceprotect2":{ "url_4": "http://wget-mirrors.uceprotect.net/rbldnsd-all/dnsbl-2.uceprotect.net.gz", - "rule_4": "BEGIN{IGNORECASE=1}/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)([[:space:]]NET)/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "spam protection level 2", "flag": "gz" }, "uceprotect3":{ "url_4": "http://wget-mirrors.uceprotect.net/rbldnsd-all/dnsbl-3.uceprotect.net.gz", - "rule_4": "BEGIN{IGNORECASE=1}/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)([[:space:]]YOUR)/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "spam protection level 3", "flag": "gz" }, "urlhaus":{ "url_4": "https://urlhaus.abuse.ch/downloads/ids/", - "rule_4": "BEGIN{FS=\";\"}/content:\"127\\./{next}/(content:\"([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\")/{printf \"%s,\\n\",substr($10,11,length($10)-11)}", + "rule": "suricata 1", "chain": "in", "descr": "urlhaus IDS IPs" }, "urlvir":{ "url_4": "https://iplists.firehol.org/files/urlvir.ipset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "malware related IPs" }, "voip":{ "url_4": "https://voipbl.org/update/", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "VoIP fraud blocklist" }, "vpn":{ "url_4": "https://raw.githubusercontent.com/X4BNet/lists_vpn/refs/heads/main/output/vpn/ipv4.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "vpn IPs" }, "vpndc":{ "url_4": "https://raw.githubusercontent.com/X4BNet/lists_vpn/refs/heads/main/output/datacenter/ipv4.txt", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "vpn datacenter IPs" }, "webclient":{ "url_4": "https://iplists.firehol.org/files/firehol_webclient.netset", - "rule_4": "/^127\\./{next}/^(([1-9][0-9]{0,2}\\.){1}([0-9]{1,3}\\.){2}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", + "rule": "feed 1", "chain": "in", "descr": "malware related IPs" } diff --git a/packages/banip/files/banip.init b/packages/banip/files/banip.init index acf2ffd06..8914e2487 100755 --- a/packages/banip/files/banip.init +++ b/packages/banip/files/banip.init @@ -1,6 +1,6 @@ #!/bin/sh /etc/rc.common # banIP init script - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. # (s)hellcheck exceptions @@ -9,10 +9,9 @@ START=95 USE_PROCD=1 -extra_command "report" "[text|json|mail] Print banIP related Set statistics" +extra_command "report" "[text|json|mail|gen] Print banIP related Set statistics" extra_command "search" "[|] Check if an element exists in a banIP Set" -extra_command "survey" "[] List all elements of a given banIP Set" -extra_command "lookup" "Lookup the IPs of domain names in the local lists and update them" +extra_command "content" "[] [true|false] Listing of all or only elements with hits of a given banIP Set" ban_init="/etc/init.d/banip" ban_service="/usr/bin/banip-service.sh" @@ -20,13 +19,21 @@ ban_funlib="/usr/lib/banip-functions.sh" ban_pidfile="/var/run/banip.pid" ban_lock="/var/run/banip.lock" -[ "${action}" = "boot" ] && "${ban_init}" running && exit 0 -{ [ "${action}" = "stop" ] || [ "${action}" = "report" ] || [ "${action}" = "search" ] || [ "${action}" = "survey" ] || [ "${action}" = "lookup" ]; } && ! "${ban_init}" running && exit 0 -[ ! -r "${ban_funlib}" ] && { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "stop" ] || [ "${action}" = "report" ] || [ "${action}" = "search" ] || [ "${action}" = "survey" ] || [ "${action}" = "lookup" ] || [ "${action}" = "status" ]; } && exit 1 -[ -d "${ban_lock}" ] && { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "lookup" ] || [ "${action}" = "search" ]; } && exit 1 -[ ! -d "${ban_lock}" ] && { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "lookup" ] || [ "${action}" = "search" ]; } && mkdir -p "${ban_lock}" -{ [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "stop" ] || [ "${action}" = "report" ] || [ "${action}" = "search" ] || [ "${action}" = "survey" ] || [ "${action}" = "lookup" ] || [ "${action}" = "status" ]; } && . "${ban_funlib}" -[ ! -d "${ban_lock}" ] && { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "lookup" ] || [ "${action}" = "search" ]; } && exit 1 +if [ -z "${IPKG_INSTROOT}" ]; then + if [ "${action}" = "boot" ] && "${ban_init}" running; then + exit 0 + elif { [ "${action}" = "stop" ] || [ "${action}" = "report" ] || [ "${action}" = "search" ] || [ "${action}" = "content" ]; } && ! "${ban_init}" running; then + exit 0 + fi + if [ ! -d "${ban_lock}" ] && + { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "search" ]; }; then + mkdir -p "${ban_lock}" + elif [ -d "${ban_lock}" ] && + { [ "${action}" = "boot" ] || [ "${action}" = "start" ] || [ "${action}" = "restart" ] || [ "${action}" = "reload" ] || [ "${action}" = "search" ]; }; then + exit 1 + fi + . "${ban_funlib}" +fi boot() { : >"${ban_pidfile}" @@ -42,7 +49,7 @@ start_service() { procd_set_param nice "$(uci_get banip global ban_nicelimit "0")" procd_set_param limits nofile="$(uci_get banip global ban_filelimit "1024")" procd_set_param stdout 0 - procd_set_param stderr 0 + procd_set_param stderr 1 procd_close_instance else f_log "err" "banIP service autostart is disabled" @@ -84,21 +91,8 @@ search() { rm -rf "${ban_lock}" } -survey() { - f_survey "${1}" -} - -lookup() { - local list hold cnt="1" - - for list in allowlist blocklist; do - (f_lookup "${list}") & - hold="$((cnt % ban_cores))" - [ "${hold}" = "0" ] && wait - cnt="$((cnt + 1))" - done - wait - rm -rf "${ban_lock}" +content() { + f_content "${1}" "${2:-"false"}" } service_triggers() { @@ -111,7 +105,4 @@ service_triggers() { for iface in ${trigger}; do procd_add_interface_trigger "interface.*.up" "${iface}" "${ban_init}" start done - - PROCD_RELOAD_DELAY="$((2 * 1000))" - procd_add_reload_trigger banip } diff --git a/packages/banip/files/banip.tpl b/packages/banip/files/banip.tpl index 924ffe0b6..a542f6b0d 100644 --- a/packages/banip/files/banip.tpl +++ b/packages/banip/files/banip.tpl @@ -1,24 +1,22 @@ # banIP mail template/include - ban incoming and outgoing IPs via named nftables Sets -# Copyright (c) 2018-2024 Dirk Brenken (dev@brenken.org) +# Copyright (c) 2018-2026 Dirk Brenken (dev@brenken.org) # This is free software, licensed under the GNU General Public License v3. # info preparation # local banip_info report_info log_info system_info mail_text logread_cmd -if [ -f "${ban_logreadfile}" ]; then +if [ -f "${ban_logreadfile}" ] && [ -x "${ban_logreadcmd}" ] && [ "${ban_logreadcmd##*/}" = "tail" ]; then logread_cmd="${ban_logreadcmd} -qn ${ban_loglimit} ${ban_logreadfile} 2>/dev/null | ${ban_grepcmd} -e \"banIP/\" 2>/dev/null" -elif printf "%s" "${ban_packages}" | "${ban_grepcmd}" -q '"logd'; then - logread_cmd="${ban_logreadcmd} -l ${ban_loglimit} -e "banIP/" 2>/dev/null" +elif [ -x "${ban_logreadcmd}" ] && [ "${ban_logreadcmd##*/}" = "logread" ]; then + logread_cmd="${ban_logreadcmd} -l ${ban_loglimit} -e \"banIP/\" 2>/dev/null" fi banip_info="$(/etc/init.d/banip status 2>/dev/null)" report_info="$("${ban_catcmd}" "${ban_reportdir}/ban_report.txt" 2>/dev/null)" -log_info="$(${logread_cmd})" -system_info="$( - strings /etc/banner 2>/dev/null - "${ban_ubuscmd}" call system board | "${ban_awkcmd}" 'BEGIN{FS="[{}\"]"}{if($2=="kernel"||$2=="hostname"||$2=="system"||$2=="model"||$2=="description")printf " + %-12s: %s\n",$2,$4}' -)" +log_info="$(eval "${logread_cmd}" 2>/dev/null)" +system_info="$(strings /etc/banner 2>/dev/null; "${ban_ubuscmd}" call system board | \ + "${ban_awkcmd}" 'BEGIN{FS="[{}\"]"}{if($2=="kernel"||$2=="hostname"||$2=="system"||$2=="model"||$2=="description")printf " + %-12s: %s\n",$2,$4}')" # content header # diff --git a/packages/ns-api/README.md b/packages/ns-api/README.md index 34c7295c8..887298b76 100644 --- a/packages/ns-api/README.md +++ b/packages/ns-api/README.md @@ -5914,9 +5914,8 @@ Configure banip settings: - `enabled`: disable or enable banip (true or false). - `ban_logprerouting`: Log suspicious packets in the prerouting chain (true or false). -- `ban_loginput`: Log suspicious packets in the WAN-input chain (true or false). -- `ban_logforwardwan`: Log suspicious packets in the WAN-forward chain (true or false). -- `ban_logforwardlan`: Log suspicious packets in the LAN-forward chain (true or false). +- `ban_loginbound`: Log suspicious packets in inbound traffic (WAN-input chain) (true or false). +- `ban_logoutbound`: Log suspicious packets in outbound traffic (LAN-forward chain) (true or false). - `ban_loglimit`: Enable or disable scanning of logfiles (true or false). - `ban_logcount`: Specify how many times an IP must appear in the log to be considered suspicious (integer). - `ban_logterm`: List of regex entries for logfile parsing (list of strings). @@ -5927,7 +5926,7 @@ Configure banip settings: ```bash -api-cli ns.threatshield edit-settings --data '{"enabled": true, "ban_logprerouting": true, "ban_loginput": true, "ban_logforwardwan": true, "ban_logforwardlan": true, "ban_loglimit": false, "ban_logcount": 5, "ban_logterm": ["regex1", "regex2"], "ban_icmplimit": true, "ban_synlimit": true, "ban_udplimit": true, "ban_nftexpiry": "1d"}' +api-cli ns.threatshield edit-settings --data '{"enabled": true, "ban_logprerouting": true, "ban_loginbound": true, "ban_logoutbound": true, "ban_loglimit": false, "ban_logcount": 5, "ban_logterm": ["regex1", "regex2"], "ban_icmplimit": true, "ban_synlimit": true, "ban_udplimit": true, "ban_nftexpiry": "1d"}' ``` Response example: diff --git a/packages/ns-api/files/ns.threatshield b/packages/ns-api/files/ns.threatshield index be8ed3de1..916162b6a 100644 --- a/packages/ns-api/files/ns.threatshield +++ b/packages/ns-api/files/ns.threatshield @@ -182,9 +182,8 @@ def list_settings(e_uci): 'data': { 'enabled': e_uci.get('banip', 'global', 'ban_enabled') == '1', 'ban_logprerouting': e_uci.get('banip', 'global', 'ban_logprerouting', default=False) == '1', - 'ban_loginput': e_uci.get('banip', 'global', 'ban_loginput', default=False) == '1', - 'ban_logforwardwan': e_uci.get('banip', 'global', 'ban_logforwardwan', default=False) == '1', - 'ban_logforwardlan': e_uci.get('banip', 'global', 'ban_logforwardlan', default=False) == '1', + 'ban_loginbound': e_uci.get('banip', 'global', 'ban_loginbound', default=False) == '1', + 'ban_logoutbound': e_uci.get('banip', 'global', 'ban_logoutbound', default=False) == '1', 'ban_loglimit': True if int(e_uci.get('banip', 'global', 'ban_loglimit', default=100)) > 0 else False, 'ban_logcount': e_uci.get('banip', 'global', 'ban_logcount', default=1), 'ban_logterm': e_uci.get('banip', 'global', 'ban_logterm', list=True, default=[]), @@ -232,18 +231,14 @@ def edit_settings(e_uci, payload): raise ValidationError('ban_logprerouting', 'required') if not isinstance(payload['ban_logprerouting'], bool): raise ValidationError('ban_logprerouting', 'invalid', payload['ban_logprerouting']) - if 'ban_loginput' not in payload: - raise ValidationError('ban_loginput', 'required') - if not isinstance(payload['ban_loginput'], bool): - raise ValidationError('ban_loginput', 'invalid', payload['ban_loginput']) - if 'ban_logforwardwan' not in payload: - raise ValidationError('ban_logforwardwan', 'required') - if not isinstance(payload['ban_logforwardwan'], bool): - raise ValidationError('ban_logforwardwan', 'invalid', payload['ban_logforwardwan']) - if 'ban_logforwardlan' not in payload: - raise ValidationError('ban_logforwardlan', 'required') - if not isinstance(payload['ban_logforwardlan'], bool): - raise ValidationError('ban_logforwardlan', 'invalid', payload['ban_logforwardlan']) + if 'ban_loginbound' not in payload: + raise ValidationError('ban_loginbound', 'required') + if not isinstance(payload['ban_loginbound'], bool): + raise ValidationError('ban_loginbound', 'invalid', payload['ban_loginbound']) + if 'ban_logoutbound' not in payload: + raise ValidationError('ban_logoutbound', 'required') + if not isinstance(payload['ban_logoutbound'], bool): + raise ValidationError('ban_logoutbound', 'invalid', payload['ban_logoutbound']) if 'ban_icmplimit' not in payload: raise ValidationError('ban_icmplimit', 'required') if not (isinstance(payload['ban_icmplimit'], bool) or isinstance(payload['ban_icmplimit'], int)): @@ -282,9 +277,8 @@ def edit_settings(e_uci, payload): set_default(e_uci, 'ban_protov6', '1') e_uci.set('banip', 'global', 'ban_logprerouting', payload['ban_logprerouting']) - e_uci.set('banip', 'global', 'ban_loginput', payload['ban_loginput']) - e_uci.set('banip', 'global', 'ban_logforwardwan', payload['ban_logforwardwan']) - e_uci.set('banip', 'global', 'ban_logforwardlan', payload['ban_logforwardlan']) + e_uci.set('banip', 'global', 'ban_loginbound', payload['ban_loginbound']) + e_uci.set('banip', 'global', 'ban_logoutbound', payload['ban_logoutbound']) e_uci.set('banip', 'global', 'ban_loglimit', 100 if payload['ban_loglimit'] else 0) if isinstance(payload['ban_icmplimit'], int): @@ -708,9 +702,8 @@ if cmd == 'list': 'edit-settings': { 'enabled': True, 'ban_logprerouting': True, - 'ban_loginput': True, - 'ban_logforwardwan': True, - 'ban_logforwardlan': True, + 'ban_loginbound': True, + 'ban_logoutbound': True, 'ban_loglimit': True, 'ban_logcount': 3, 'ban_logterm': ['string'], diff --git a/packages/ns-threat_shield/files/banip-defaults b/packages/ns-threat_shield/files/banip-defaults index f8e665414..52af4f6ae 100644 --- a/packages/ns-threat_shield/files/banip-defaults +++ b/packages/ns-threat_shield/files/banip-defaults @@ -1,10 +1,9 @@ -[ "$(uci -q get banip.global.ban_logforwardwan)" != "" ] && exit 0 +[ "$(uci -q get banip.global.ban_loginbound)" != "" ] && exit 0 uci -q batch << EOI -set banip.global.ban_logforwardwan="1" -set banip.global.ban_logforwardlan="1" +set banip.global.ban_loginbound="1" +set banip.global.ban_logoutbound="1" set banip.global.ban_logprerouting="0" -set banip.global.ban_loginput="0" set banip.global.ban_loglimit="100" set banip.global.ban_logcount="3" diff --git a/packages/ns-threat_shield/files/banip.nethesis.feeds b/packages/ns-threat_shield/files/banip.nethesis.feeds index e9f6d4db1..3ec2273a8 100644 --- a/packages/ns-threat_shield/files/banip.nethesis.feeds +++ b/packages/ns-threat_shield/files/banip.nethesis.feeds @@ -1,27 +1,32 @@ { - "yoroimallvl1": { - "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_malware_level1.ipset", - "rule_4": "/^(([0-9]{1,3}\\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "descr": "Yoroi malware - Level 1" - }, - "yoroimallvl2": { - "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_malware_level2.ipset", - "rule_4": "/^(([0-9]{1,3}\\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "descr": "Yoroi malware - Level 2" - }, - "yoroisusplvl1": { - "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_souspicious_level1.ipset", - "rule_4": "/^(([0-9]{1,3}\\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "descr": "Yoroi suspicious - Level 1" - }, - "yoroisusplvl2": { - "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_souspicious_level2.ipset", - "rule_4": "/^(([0-9]{1,3}\\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "descr": "Yoroi suspicious - Level 2" - }, - "nethesislvl3": { - "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/nethesis_level3.netset", - "rule_4": "/^(([0-9]{1,3}\\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])(\\/(1?[0-9]|2?[0-9]|3?[0-2]))?)$/{printf \"%s,\\n\",$1}", - "descr": "Nethesis suspicious - Level 3" - } + "yoroimallvl1": { + "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_malware_level1.ipset", + "rule": "feed 1", + "chain": "in", + "descr": "Yoroi malware - Level 1" + }, + "yoroimallvl2": { + "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_malware_level2.ipset", + "rule": "feed 1", + "chain": "in", + "descr": "Yoroi malware - Level 2" + }, + "yoroisusplvl1": { + "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_souspicious_level1.ipset", + "rule": "feed 1", + "chain": "in", + "descr": "Yoroi suspicious - Level 1" + }, + "yoroisusplvl2": { + "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/yoroi_souspicious_level2.ipset", + "rule": "feed 1", + "chain": "in", + "descr": "Yoroi suspicious - Level 2" + }, + "nethesislvl3": { + "url_4": "https://__USER__:__PASSWORD__@bl.nethesis.it/plain/__TYPE__/nethesis-blacklists/nethesis_level3.netset", + "rule": "feed 1", + "chain": "in", + "descr": "Nethesis suspicious - Level 3" + } } From 2973e28cbb4f938e58e0b6fa9ffeed51ed84f74c Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Mon, 30 Mar 2026 14:14:50 +0200 Subject: [PATCH 22/39] separated telegraf parsers --- packages/telegraf/Makefile | 5 + packages/telegraf/files/telegraf.conf | 285 ------------------ .../telegraf/files/telegraf.conf.d/os.conf | 278 +++++++++++++++++ 3 files changed, 283 insertions(+), 285 deletions(-) create mode 100644 packages/telegraf/files/telegraf.conf.d/os.conf diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index d7de7ab81..3c3eeab2d 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -33,7 +33,9 @@ GO_PKG_TAGS:= \ inputs.disk \ inputs.diskio \ inputs.ethtool \ + inputs.exec \ inputs.file \ + inputs.http_listener_v2 \ inputs.iptables \ inputs.kernel \ inputs.kernel_vmstat \ @@ -47,7 +49,9 @@ GO_PKG_TAGS:= \ inputs.processes \ inputs.sensors \ inputs.system \ + inputs.tail \ outputs.influxdb \ + parsers.grok \ parsers.json_v2 include $(INCLUDE_DIR)/package.mk @@ -76,6 +80,7 @@ define Package/telegraf/install $(INSTALL_DIR) $(1)/etc/telegraf $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf $(INSTALL_DIR) $(1)/etc/telegraf.conf.d + $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf $(INSTALL_DATA) ./files/telegraf.conf.d/netifyd.conf $(1)/etc/telegraf.conf.d/netifyd.conf $(INSTALL_DIR) $(1)/etc/netifyd $(INSTALL_DATA) ./files/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json diff --git a/packages/telegraf/files/telegraf.conf b/packages/telegraf/files/telegraf.conf index 151e90832..e9f0d9aef 100644 --- a/packages/telegraf/files/telegraf.conf +++ b/packages/telegraf/files/telegraf.conf @@ -220,288 +220,3 @@ ## what you want as it can lead to data points captured at different times ## getting omitted due to similar data. # influx_omit_timestamp = false - - -############################################################################### -# INPUT PLUGINS # -############################################################################### - -# OS and system metrics collection -# Includes CPU, memory, disk, network, and kernel statistics -# All metrics from this section are tagged with influxdb_db=os-metrics - -# Read metrics about cpu usage -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - ## NOTE: The resulting 'time_active' field INCLUDES 'iowait'! - report_active = false - ## If true and the info is available then add core_id and physical_id tags - core_tags = false - [inputs.cpu.tags] - influxdb_db = "os-metrics" - - -# Read metrics about disk usage by mount point -[[inputs.disk]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] - - ## Ignore mount points by mount options. - ## The 'mount' command reports options of all mounts in parathesis. - ## Bind mounts can be ignored with the special 'bind' option. - # ignore_mount_opts = [] - [inputs.disk.tags] - influxdb_db = "os-metrics" - - -# Read metrics about disk IO by device -[[inputs.diskio]] - ## Devices to collect stats for - ## Wildcards are supported except for disk synonyms like '/dev/disk/by-id'. - ## ex. devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] - # devices = ["*"] - - ## Skip gathering of the disk's serial numbers. - # skip_serial_number = true - - ## Device metadata tags to add on systems supporting it (Linux only) - ## Use 'udevadm info -q property -n ' to get a list of properties. - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - [inputs.diskio.tags] - influxdb_db = "os-metrics" - - -# Plugin to collect various Linux kernel statistics. -# This plugin ONLY supports Linux -[[inputs.kernel]] - ## Additional gather options - ## Possible options include: - ## * ksm - kernel same-page merging - ## * psi - pressure stall information - # collect = [] - [inputs.kernel.tags] - influxdb_db = "os-metrics" - - -# Read metrics about memory usage -[[inputs.mem]] - # no configuration - [inputs.mem.tags] - influxdb_db = "os-metrics" - - -# Get the number of processes and group them by status -# This plugin ONLY supports non-Windows -[[inputs.processes]] - ## Use sudo to run ps command on *BSD systems. Linux systems will read - ## /proc, so this does not apply there. - # use_sudo = false - [inputs.processes.tags] - influxdb_db = "os-metrics" - - -# Read metrics about system load & uptime -[[inputs.system]] - # no configuration - [inputs.system.tags] - influxdb_db = "os-metrics" - - -# Collect bond interface status, slaves statuses and failures count -[[inputs.bond]] - ## Sets 'proc' directory path - ## If not specified, then default is /proc - # host_proc = "/proc" - - ## Sets 'sys' directory path - ## If not specified, then default is /sys - # host_sys = "/sys" - - ## By default, telegraf gather stats for all bond interfaces - ## Setting interfaces will restrict the stats to the specified - ## bond interfaces. - # bond_interfaces = ["bond0"] - - ## Tries to collect additional bond details from /sys/class/net/{bond} - ## currently only useful for LACP (mode 4) bonds - # collect_sys_details = false - [inputs.bond.tags] - influxdb_db = "os-metrics" - - -# Returns ethtool statistics for given interfaces -# This plugin ONLY supports Linux -#[[inputs.ethtool]] - ## List of interfaces to pull metrics for - # interface_include = ["eth0"] - - ## List of interfaces to ignore when pulling metrics. - # interface_exclude = ["eth1"] - - ## Plugin behavior for downed interfaces - ## Available choices: - ## - expose: collect & report metrics for down interfaces - ## - skip: ignore interfaces that are marked down - # down_interfaces = "expose" - - ## Reading statistics from interfaces in additional namespaces is also - ## supported, so long as the namespaces are named (have a symlink in - ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN - ## permission. - ## By default, only the current namespace will be used. For additional - ## namespace support, at least one of `namespace_include` and - ## `namespace_exclude` must be provided. - ## To include all namespaces, set `namespace_include` to `["*"]`. - ## The initial namespace (if anonymous) can be specified with the empty - ## string (""). - - ## List of namespaces to pull metrics for - # namespace_include = [] - - ## List of namespace to ignore when pulling metrics. - # namespace_exclude = [] - - ## Some drivers declare statistics with extra whitespace, different spacing, - ## and mix cases. This list, when enabled, can be used to clean the keys. - ## Here are the current possible normalizations: - ## * snakecase: converts fooBarBaz to foo_bar_baz - ## * trim: removes leading and trailing whitespace - ## * lower: changes all capitalized letters to lowercase - ## * underscore: replaces spaces with underscores - # normalize_keys = ["snakecase", "trim", "lower", "underscore"] - - -# Gather packets and bytes throughput from iptables -# This plugin ONLY supports Linux -[[inputs.iptables]] - ## iptables require root access on most systems. - ## Setting 'use_sudo' to true will make use of sudo to run iptables. - ## Users must configure sudo to allow telegraf user to run iptables with - ## no password. - ## iptables can be restricted to only list command "iptables -nvL". - # use_sudo = false - - ## Setting 'use_lock' to true runs iptables with the "-w" option. - ## Adjust your sudo settings appropriately if using this option - ## ("iptables -w 5 -nvl") - # use_lock = false - - ## Define an alternate executable, such as "ip6tables". Default is "iptables". - # binary = "ip6tables" - ## defines the table to monitor: - table = "filter" - - ## defines the chains to monitor. - ## NOTE: iptables rules without a comment will not be monitored. - ## Read the plugin documentation for more information. - chains = [ "INPUT" ] - [inputs.iptables.tags] - influxdb_db = "os-metrics" - - -# Get kernel statistics from /proc/vmstat -# This plugin ONLY supports Linux -[[inputs.kernel_vmstat]] - # no configuration - [inputs.kernel_vmstat.tags] - influxdb_db = "os-metrics" - - -# Provides Linux CPU metrics -# This plugin ONLY supports Linux -[[inputs.linux_cpu]] - ## Path for sysfs filesystem. - ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt - ## Defaults: - # host_sys = "/sys" - - ## CPU metrics collected by the plugin. - ## Supported options: - ## "cpufreq", "thermal" - ## Defaults: - # metrics = ["cpufreq"] - [inputs.linux_cpu.tags] - influxdb_db = "os-metrics" - - -# Provides Linux sysctl fs metrics -[[inputs.linux_sysctl_fs]] - # no configuration - [inputs.linux_sysctl_fs.tags] - influxdb_db = "os-metrics" - - -# Gather metrics about network interfaces -[[inputs.net]] - ## By default, telegraf gathers stats from any up interface (excluding loopback) - ## Setting interfaces will tell it to gather these explicit interfaces, - ## regardless of status. When specifying an interface, glob-style - ## patterns are also supported. - # interfaces = ["eth*", "enp0s[0-1]", "lo"] - - ## On linux systems telegraf also collects protocol stats. - ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. - ## - ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! - ## Please set this to `true` and use the 'inputs.nstat' - ## plugin instead. - # ignore_protocol_stats = false - [inputs.net.tags] - influxdb_db = "os-metrics" - - -# Read TCP metrics such as established, time wait and sockets counts. -[[inputs.netstat]] - # no configuration - [inputs.netstat.tags] - influxdb_db = "os-metrics" - - -# Collect kernel snmp counters and network interface statistics -[[inputs.nstat]] - ## file paths for proc files. If empty default paths will be used: - ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 - ## These can also be overridden with env variables, see README. - proc_net_netstat = "/proc/net/netstat" - proc_net_snmp = "/proc/net/snmp" - proc_net_snmp6 = "/proc/net/snmp6" - ## dump metrics with 0 values too - dump_zeros = true - [inputs.nstat.tags] - influxdb_db = "os-metrics" - - -# Monitor sensors, requires lm-sensors package -# This plugin ONLY supports Linux -[[inputs.sensors]] - ## Remove numbers from field names. - ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. - # remove_numbers = true - - ## Timeout is the maximum amount of time that the sensors command can run. - # timeout = "5s" - [inputs.sensors.tags] - influxdb_db = "os-metrics" - diff --git a/packages/telegraf/files/telegraf.conf.d/os.conf b/packages/telegraf/files/telegraf.conf.d/os.conf new file mode 100644 index 000000000..18ed9f251 --- /dev/null +++ b/packages/telegraf/files/telegraf.conf.d/os.conf @@ -0,0 +1,278 @@ +# OS and system metrics collection +# Includes CPU, memory, disk, network, and kernel statistics +# All metrics from this section are tagged with influxdb_db=os-metrics + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + ## NOTE: The resulting 'time_active' field INCLUDES 'iowait'! + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + [inputs.cpu.tags] + influxdb_db = "os-metrics" + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + [inputs.disk.tags] + influxdb_db = "os-metrics" + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## Devices to collect stats for + ## Wildcards are supported except for disk synonyms like '/dev/disk/by-id'. + ## ex. devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] + # devices = ["*"] + + ## Skip gathering of the disk's serial numbers. + # skip_serial_number = true + + ## Device metadata tags to add on systems supporting it (Linux only) + ## Use 'udevadm info -q property -n ' to get a list of properties. + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + [inputs.diskio.tags] + influxdb_db = "os-metrics" + + +# Plugin to collect various Linux kernel statistics. +# This plugin ONLY supports Linux +[[inputs.kernel]] + ## Additional gather options + ## Possible options include: + ## * ksm - kernel same-page merging + ## * psi - pressure stall information + # collect = [] + [inputs.kernel.tags] + influxdb_db = "os-metrics" + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + [inputs.mem.tags] + influxdb_db = "os-metrics" + + +# Get the number of processes and group them by status +# This plugin ONLY supports non-Windows +[[inputs.processes]] + ## Use sudo to run ps command on *BSD systems. Linux systems will read + ## /proc, so this does not apply there. + # use_sudo = false + [inputs.processes.tags] + influxdb_db = "os-metrics" + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + [inputs.system.tags] + influxdb_db = "os-metrics" + + +# Collect bond interface status, slaves statuses and failures count +[[inputs.bond]] + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" + + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + + ## By default, telegraf gather stats for all bond interfaces + ## Setting interfaces will restrict the stats to the specified + ## bond interfaces. + # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false + [inputs.bond.tags] + influxdb_db = "os-metrics" + + +# Returns ethtool statistics for given interfaces +# This plugin ONLY supports Linux +#[[inputs.ethtool]] + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] + + ## Plugin behavior for downed interfaces + ## Available choices: + ## - expose: collect & report metrics for down interfaces + ## - skip: ignore interfaces that are marked down + # down_interfaces = "expose" + + ## Reading statistics from interfaces in additional namespaces is also + ## supported, so long as the namespaces are named (have a symlink in + ## /var/run/netns). The telegraf process will also need the CAP_SYS_ADMIN + ## permission. + ## By default, only the current namespace will be used. For additional + ## namespace support, at least one of `namespace_include` and + ## `namespace_exclude` must be provided. + ## To include all namespaces, set `namespace_include` to `["*"]`. + ## The initial namespace (if anonymous) can be specified with the empty + ## string (""). + + ## List of namespaces to pull metrics for + # namespace_include = [] + + ## List of namespace to ignore when pulling metrics. + # namespace_exclude = [] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# Gather packets and bytes throughput from iptables +# This plugin ONLY supports Linux +[[inputs.iptables]] + ## iptables require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run iptables. + ## Users must configure sudo to allow telegraf user to run iptables with + ## no password. + ## iptables can be restricted to only list command "iptables -nvL". + # use_sudo = false + + ## Setting 'use_lock' to true runs iptables with the "-w" option. + ## Adjust your sudo settings appropriately if using this option + ## ("iptables -w 5 -nvl") + # use_lock = false + + ## Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" + ## defines the table to monitor: + table = "filter" + + ## defines the chains to monitor. + ## NOTE: iptables rules without a comment will not be monitored. + ## Read the plugin documentation for more information. + chains = [ "INPUT" ] + [inputs.iptables.tags] + influxdb_db = "os-metrics" + + +# Get kernel statistics from /proc/vmstat +# This plugin ONLY supports Linux +[[inputs.kernel_vmstat]] + # no configuration + [inputs.kernel_vmstat.tags] + influxdb_db = "os-metrics" + + +# Provides Linux CPU metrics +# This plugin ONLY supports Linux +[[inputs.linux_cpu]] + ## Path for sysfs filesystem. + ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt + ## Defaults: + # host_sys = "/sys" + + ## CPU metrics collected by the plugin. + ## Supported options: + ## "cpufreq", "thermal" + ## Defaults: + # metrics = ["cpufreq"] + [inputs.linux_cpu.tags] + influxdb_db = "os-metrics" + + +# Provides Linux sysctl fs metrics +[[inputs.linux_sysctl_fs]] + # no configuration + [inputs.linux_sysctl_fs.tags] + influxdb_db = "os-metrics" + + +# Gather metrics about network interfaces +[[inputs.net]] + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. When specifying an interface, glob-style + ## patterns are also supported. + # interfaces = ["eth*", "enp0s[0-1]", "lo"] + + ## On linux systems telegraf also collects protocol stats. + ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. + ## + ## DEPRECATION NOTICE: A value of 'false' is deprecated and discouraged! + ## Please set this to `true` and use the 'inputs.nstat' + ## plugin instead. + # ignore_protocol_stats = false + [inputs.net.tags] + influxdb_db = "os-metrics" + + +# Read TCP metrics such as established, time wait and sockets counts. +[[inputs.netstat]] + # no configuration + [inputs.netstat.tags] + influxdb_db = "os-metrics" + + +# Collect kernel snmp counters and network interface statistics +[[inputs.nstat]] + ## file paths for proc files. If empty default paths will be used: + ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 + ## These can also be overridden with env variables, see README. + proc_net_netstat = "/proc/net/netstat" + proc_net_snmp = "/proc/net/snmp" + proc_net_snmp6 = "/proc/net/snmp6" + ## dump metrics with 0 values too + dump_zeros = true + [inputs.nstat.tags] + influxdb_db = "os-metrics" + + +# Monitor sensors, requires lm-sensors package +# This plugin ONLY supports Linux +[[inputs.sensors]] + ## Remove numbers from field names. + ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. + # remove_numbers = true + + ## Timeout is the maximum amount of time that the sensors command can run. + # timeout = "5s" + [inputs.sensors.tags] + influxdb_db = "os-metrics" From bb805038a86a74990bea00f879859ebf14d9abd7 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Mon, 30 Mar 2026 16:47:43 +0200 Subject: [PATCH 23/39] updated netifyd ingester with http --- packages/telegraf/Makefile | 2 +- .../netify-proc-aggregator-telegraf.json | 2 +- .../netifyd/netify-sink-http-telegraf.json | 12 +++++ .../netifyd/netify-sink-log-telegraf.json | 10 ---- .../netifyd/plugins.d/10-netify-telegraf.conf | 8 ++-- .../files/telegraf.conf.d/netifyd.conf | 46 ++++++++----------- 6 files changed, 36 insertions(+), 44 deletions(-) create mode 100644 packages/telegraf/files/netifyd/netify-sink-http-telegraf.json delete mode 100644 packages/telegraf/files/netifyd/netify-sink-log-telegraf.json diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 3c3eeab2d..4208e7c4d 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -83,7 +83,7 @@ define Package/telegraf/install $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf $(INSTALL_DATA) ./files/telegraf.conf.d/netifyd.conf $(1)/etc/telegraf.conf.d/netifyd.conf $(INSTALL_DIR) $(1)/etc/netifyd - $(INSTALL_DATA) ./files/netifyd/netify-sink-log-telegraf.json $(1)/etc/netifyd/netify-sink-log-telegraf.json + $(INSTALL_DATA) ./files/netifyd/netify-sink-http-telegraf.json $(1)/etc/netifyd/netify-sink-http-telegraf.json $(INSTALL_DATA) ./files/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json $(INSTALL_DIR) $(1)/etc/netifyd/plugins.d $(INSTALL_DATA) ./files/netifyd/plugins.d/10-netify-telegraf.conf $(1)/etc/netifyd/plugins.d/10-netify-telegraf.conf diff --git a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json index c561b4d48..17ab825c4 100644 --- a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json +++ b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json @@ -7,7 +7,7 @@ "nested_mode": false, "privacy_mode": false, "sinks": { - "sink-log-telegraf": { + "sink-http-telegraf": { "telegraf": { } } } diff --git a/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json b/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json new file mode 100644 index 000000000..073023bcc --- /dev/null +++ b/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json @@ -0,0 +1,12 @@ +{ + "timeout_connect": 30, + "timeout_transfer": 300, + "tls_verify": true, + "tls_version1": false, + "channels": { + "telegraf": { + "enable": true, + "url": "http://127.0.0.1:8087/netifyd" + } + } +} \ No newline at end of file diff --git a/packages/telegraf/files/netifyd/netify-sink-log-telegraf.json b/packages/telegraf/files/netifyd/netify-sink-log-telegraf.json deleted file mode 100644 index fb661e42b..000000000 --- a/packages/telegraf/files/netifyd/netify-sink-log-telegraf.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "overwrite": false, - "channels": { - "telegraf": { - "log_path": "/var/run/netifyd", - "log_name": "telegraf", - "overwrite": true - } - } -} diff --git a/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf b/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf index 81495d07c..87f34f593 100644 --- a/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf +++ b/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf @@ -1,4 +1,4 @@ -# Netify Aggregator Processor Plugin Loader +# Netify plugins config for Telegraf - Local and Sede # ############################################################################## @@ -7,9 +7,9 @@ enable = yes plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 conf_filename = ${path_state_persistent}/netify-proc-aggregator-telegraf.json -[sink-log-telegraf] +[sink-http-telegraf] enable = yes -plugin_library = ${path_plugin_libdir}/libnetify-sink-log.so.0.0.0 -conf_filename = ${path_state_persistent}/netify-sink-log-telegraf.json +plugin_library = ${path_plugin_libdir}/libnetify-sink-http.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-sink-http-telegraf.json # vim: set ft=dosini : diff --git a/packages/telegraf/files/telegraf.conf.d/netifyd.conf b/packages/telegraf/files/telegraf.conf.d/netifyd.conf index a163e23a4..8cb15316d 100644 --- a/packages/telegraf/files/telegraf.conf.d/netifyd.conf +++ b/packages/telegraf/files/telegraf.conf.d/netifyd.conf @@ -1,17 +1,19 @@ -# Parse netifyd metrics from JSON file -[[inputs.file]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = ["/var/run/netifyd/telegraf.json"] +# Parse netifyd metrics from HTTP sink +# The netifyd service sends metrics via HTTP POST to this endpoint +# See packages/telegraf/files/netifyd/netify-sink-http-telegraf.json for sink configuration +[[inputs.http_listener_v2]] + ## Address and port to listen on + service_address = "127.0.0.1:8087" - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" + ## Paths this server serves + paths = ["/netifyd"] + + ## Use TLS for https, enter the certfile and keyfile path + # tls_cert = "" + # tls_key = "" + + ## HTTP methods to accept + methods = ["POST"] ## Data format to consume. ## Each data format has its own unique set of configuration options, read @@ -19,27 +21,15 @@ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json_v2" - ## Please use caution when using the following options: when file name - ## variation is high, this can increase the cardinality significantly. Read - ## more about cardinality here: - ## https://docs.influxdb.com/influxdb/cloud/reference/glossary/#series-cardinality - - ## Name of tag to store the name of the file. Disabled if not set. - # file_tag = "" - - ## Name of tag to store the absolute path and name of the file. Disabled if - ## not set. - # file_path_tag = "" - ## Global tags to add to all metrics from this input - [inputs.file.tags] + [inputs.http_listener_v2.tags] influxdb_db = "netifyd" - [[inputs.file.json_v2]] + [[inputs.http_listener_v2.json_v2]] measurement_name = "netifyd" timestamp_path = "log_time_end" timestamp_format = "unix" - [[inputs.file.json_v2.object]] + [[inputs.http_listener_v2.json_v2.object]] path = "stats" tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] excluded_keys = ["digests"] From ed341e8a183de70fdc28869339e152c8c3e3f8b0 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Tue, 31 Mar 2026 09:43:35 +0200 Subject: [PATCH 24/39] added outputs.sql --- packages/telegraf/Makefile | 1 + packages/telegraf/files/telegraf.conf.d/netifyd.conf | 3 +++ 2 files changed, 4 insertions(+) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 4208e7c4d..e43ca3e89 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -51,6 +51,7 @@ GO_PKG_TAGS:= \ inputs.system \ inputs.tail \ outputs.influxdb \ + outputs.sql \ parsers.grok \ parsers.json_v2 diff --git a/packages/telegraf/files/telegraf.conf.d/netifyd.conf b/packages/telegraf/files/telegraf.conf.d/netifyd.conf index 8cb15316d..089e4e382 100644 --- a/packages/telegraf/files/telegraf.conf.d/netifyd.conf +++ b/packages/telegraf/files/telegraf.conf.d/netifyd.conf @@ -15,6 +15,9 @@ ## HTTP methods to accept methods = ["POST"] + ## Response code + http_success_code = 200 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: From b64518c99bb51639ec733e76c551a774a510600e Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 1 Apr 2026 09:02:55 +0200 Subject: [PATCH 25/39] synced banip --- packages/banip/Makefile | 2 +- packages/banip/files/95-banip-housekeeping | 2 +- packages/banip/files/README.md | 454 ++++++++++++++++++++- 3 files changed, 455 insertions(+), 3 deletions(-) diff --git a/packages/banip/Makefile b/packages/banip/Makefile index fb7e45ff2..e279d3642 100644 --- a/packages/banip/Makefile +++ b/packages/banip/Makefile @@ -51,7 +51,7 @@ define Package/banip/install $(INSTALL_BIN) ./files/banip.init $(1)/etc/init.d/banip $(INSTALL_DIR) $(1)/usr/lib - $(INSTALL_BIN) ./files/banip-functions.sh $(1)/usr/lib + $(INSTALL_DATA) ./files/banip-functions.sh $(1)/usr/lib $(INSTALL_DIR) $(1)/etc/config $(INSTALL_CONF) ./files/banip.conf $(1)/etc/config/banip diff --git a/packages/banip/files/95-banip-housekeeping b/packages/banip/files/95-banip-housekeeping index 9ef25b616..5dbc8648e 100644 --- a/packages/banip/files/95-banip-housekeeping +++ b/packages/banip/files/95-banip-housekeeping @@ -35,7 +35,7 @@ done [ -n "$(uci -q changes "${config}")" ] && uci -q commit "${config}" custom_feed="/etc/banip/banip.custom.feeds" -if grep -q '"rule_4"' "${custom_feed}" 2>/dev/null; then +if grep -q '"rule_4"' "${custom_feed}"; then mv -f "${custom_feed}" "${custom_feed}.backup.$(date "+%Y%m%d%H%M%S")" : > "${custom_feed}" fi diff --git a/packages/banip/files/README.md b/packages/banip/files/README.md index d3d5a496f..79887f762 100644 --- a/packages/banip/files/README.md +++ b/packages/banip/files/README.md @@ -105,4 +105,456 @@ IP address blocking is commonly used to protect against brute force attacks, pre * Devices with less than 256MB of RAM are **_not_** supported * After system upgrades it's recommended to start with a fresh banIP default config -For more information and documentation, please visit the official [banIP GitHub repository](https://github.com/openwrt/packages/tree/master/net/banip). + +## Installation and Usage +* Update your router's apk repository (apk update) +* Install the LuCI companion package 'luci-app-banip' which also installs the main 'banip' package as a dependency +* Enable the banIP system service (System -> Startup) and enable banIP itself (banIP -> General Settings) +* It's strongly recommended to use the LuCI frontend to easily configure all aspects of banIP, the application is located in LuCI under the 'Services' menu +* It's also recommended to configure a 'Reload Trigger Interface' to depend on your WAN ifup events during boot or restart of your router +* To be able to use banIP in a meaningful way, you must activate the service and possibly also activate a few blocklist feeds +* If you're using a complex network setup, e.g. special tunnel interfaces, than untick the 'Auto Detection' option under the 'General Settings' tab and set the required options manually +* Start the service with '/etc/init.d/banip start' and check everything is working by running '/etc/init.d/banip status', also check the 'Processing Log' tab + + +## banIP CLI interface +* All important banIP functions are accessible via CLI, too. If you're going to configure banIP via CLI, edit the config file '/etc/config/banip' and enable the service, add pre-configured feeds and add/change other options to your needs, see the options reference table below. + +```sh +~# /etc/init.d/banip +Syntax: /etc/init.d/banip [command] + +Available commands: + start Start the service + stop Stop the service + restart Restart the service + reload Reload configuration files (or restart if service does not implement reload) + enable Enable service autostart + disable Disable service autostart + enabled Check if service is started on boot + report [text|json|mail|gen] Print banIP related Set statistics + search [|] Check if an element exists in a banIP Set + content [] [true|false] Listing of all or only elements with hits of a given banIP Set + running Check if service is running + status Service status + trace Start with syscall trace + info Dump procd service info +``` + + +## banIP config options + +| Option | Type | Default | Description | +| :---------------------- | :----- | :---------------------------- | :---------------------------------------------------------------------------------------------------------------- | +| ban_enabled | option | 0 | enable the banIP service | +| ban_nicelimit | option | 0 | ulimit nice level of the banIP service (range 0-19) | +| ban_filelimit | option | 1024 | ulimit max open/number of files (range 1024-4096) | +| ban_loglimit | option | 100 | scan only the last n log entries permanently. A value of '0' disables the monitor | +| ban_logcount | option | 1 | how many times the IP must appear in the log to be considered as suspicious | +| ban_logterm | list | regex | various regex for logfile parsing (default: dropbear, sshd, luci, nginx, asterisk and cgi-remote events) | +| ban_logreadfile | option | /var/log/messages | alternative location for parsing a log file via tail, to deactivate the standard parsing via logread | +| ban_autodetect | option | 1 | auto-detect wan interfaces, devices and subnets | +| ban_debug | option | 0 | enable banIP related debug logging | +| ban_icmplimit | option | 25 | threshold in number of packets to detect icmp DoS in prerouting chain. A value of '0' disables this safeguard | +| ban_synlimit | option | 10 | threshold in number of packets to detect syn DoS in prerouting chain. A value of '0' disables this safeguard | +| ban_udplimit | option | 100 | threshold in number of packets to detect udp DoS in prerouting chain. A value of '0' disables this safeguard | +| ban_logprerouting | option | 0 | log suspicious packets in the prerouting chain | +| ban_loginbound | option | 0 | log suspicious packets in the inbound chain (wan-input and wan-forward) | +| ban_logoutbound | option | 0 | log suspicious packets in the outbound chain (lan-forward) | +| ban_autoallowlist | option | 1 | add wan IPs/subnets and resolved domains automatically to the local allowlist (not only to the Sets) | +| ban_autoblocklist | option | 1 | add suspicious attacker IPs and resolved domains automatically to the local blocklist (not only to the Sets) | +| ban_autoblocksubnet | option | 0 | add entire subnets to the blocklist Sets based on an additional RDAP request with the suspicious IP | +| ban_autoallowuplink | option | subnet | limit the uplink autoallow function to: 'subnet', 'ip' or 'disable' it at all | +| ban_allowlistonly | option | 0 | restrict the internet access only to specific, explicitly allowed IP segments | +| ban_allowflag | option | - | always allow certain protocols(tcp or udp) plus destination ports or port ranges, e.g.: 'tcp 80 443-445' | +| ban_allowurl | list | - | external allowlist feed URLs, one or more references to simple remote IP lists | +| ban_basedir | option | /tmp | base working directory while banIP processing | +| ban_reportdir | option | /tmp/banIP-report | directory where banIP stores report files | +| ban_backupdir | option | /tmp/banIP-backup | directory where banIP stores compressed backup files | +| ban_errordir | option | /tmp/banIP-error | directory where banIP stores processing error files | +| ban_protov4 | option | - / autodetect | enable IPv4 support | +| ban_protov6 | option | - / autodetect | enable IPv6 support | +| ban_ifv4 | list | - / autodetect | logical wan IPv4 interfaces, e.g. 'wan' | +| ban_ifv6 | list | - / autodetect | logical wan IPv6 interfaces, e.g. 'wan6' | +| ban_dev | list | - / autodetect | wan device(s), e.g. 'eth2' | +| ban_vlanallow | list | - | always allow certain VLAN forwards, e.g. br-lan.20 | +| ban_vlanblock | list | - | always block certain VLAN forwards, e.g. br-lan.10 | +| ban_trigger | list | - | logical reload trigger interface(s), e.g. 'wan' | +| ban_triggerdelay | option | 20 | trigger timeout during interface reload and boot | +| ban_deduplicate | option | 1 | deduplicate IP addresses across all active Sets (see optional feed flag 'dup' below) | +| ban_splitsize | option | 0 | split the processing/loading of Sets in chunks of n lines/members (saves RAM) | +| ban_cores | option | - / autodetect | limit the cpu cores used by banIP (saves RAM) | +| ban_nftloglevel | option | warn | nft loglevel, values: emerg, alert, crit, err, warn, notice, info, debug | +| ban_nftpriority | option | -100 | nft priority for the banIP table (the prerouting table is fixed to priority -150) | +| ban_nftpolicy | option | memory | nft policy for banIP-related Sets, values: memory, performance | +| ban_nftexpiry | option | - | expiry time (ms|s|m|h|d|w) for auto added blocklist members, e.g. '5m', '2h' or '1d' | +| ban_nftretry | option | 3 | number of Set load attempts in case of an error | +| ban_nftcount | option | 0 | enable nft counter for every Set element | +| ban_bcp38 | option | 0 | block packets with spoofed source IP addresses in all supported chains | +| ban_map | option | 0 | enable a GeoIP Map with suspicious Set elements | +| ban_feed | list | - | external download feeds, e.g. 'yoyo', 'doh', 'country' or 'talos' (see feed table) | +| ban_asn | list | - | ASNs for the 'asn' feed, e.g.'32934' | +| ban_asnsplit | option | - | the selected ASNs are stored in separate Sets | +| ban_region | list | - | Regional Internet Registry (RIR) country selection. Supported regions are: AFRINIC, ARIN, APNIC, LACNIC and RIPE | +| ban_country | list | - | country iso codes for the 'country' feed, e.g. 'ru' | +| ban_countrysplit | option | - | the selected countries are stored in separate Sets | +| ban_blockpolicy | option | drop | 'drop' packets silently on input and forwardwan chains or actively 'reject' the traffic | +| ban_feedin | list | - | limit the selected feeds to the inbound chain (wan-input and wan-forward) | +| ban_feedout | list | - | limit the selected feeds to the outbound chain (lan-forward) | +| ban_feedinout | list | - | set the selected feeds to the inbound and outbound chain (lan-forward) | +| ban_feedreset | list | - | override the default feed configuration and remove existing port/protocol limitations | +| ban_feedcomplete | list | - | opt out the selected feeds from the deduplication process | +| ban_fetchcmd | option | - / autodetect | 'uclient-fetch', 'wget' or 'curl' | +| ban_fetchparm | option | - / autodetect | set the config options for the selected download utility | +| ban_fetchretry | option | 5 | number of download attempts in case of an error (not supported by uclient-fetch) | +| ban_fetchinsecure | option | 0 | don't check SSL server certificates during download | +| ban_mailreceiver | option | - | receiver address for banIP related notification E-Mails | +| ban_mailsender | option | no-reply@banIP | sender address for banIP related notification E-Mails | +| ban_mailtopic | option | banIP notification | topic for banIP related notification E-Mails | +| ban_mailprofile | option | ban_notify | mail profile used in 'msmtp' for banIP related notification E-Mails | +| ban_mailnotification | option | 0 | receive E-Mail notifications with every banIP run | +| ban_resolver | option | - | external resolver used for DNS lookups, by default the local resolver/forwarder will be used | +| ban_remotelog | option | 0 | enable the cgi interface to receive remote logging events | +| ban_remotetoken | option | - | unique token to communicate with the cgi interface | + + +## Examples +**banIP report information** + +``` +~# /etc/init.d/banip report +::: +::: banIP Set Statistics +::: + Timestamp: 2026-01-12 19:33:11 + ------------------------------ + blocked syn-flood packets : 0 + blocked udp-flood packets : 10 + blocked icmp-flood packets : 11480 + blocked invalid ct packets : 1653 + blocked invalid tcp packets: 0 + blocked bcp38 packets : 0 + --- + auto-added IPs to allowlist: 0 + auto-added IPs to blocklist: 0 + + Set | Count | Inbound (packets) | Outbound (packets) | Port/Protocol | Elements (max. 50) + ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------ + allowlist.v4 | 1 | ON: 0 | ON: 0 | - | + allowlist.v4MAC | 0 | - | ON: 0 | - | + allowlist.v6 | 1 | ON: 0 | ON: 0 | - | + allowlist.v6MAC | 0 | - | ON: 0 | - | + blocklist.v4 | 7 | ON: 358 | ON: 812 | - | 5.187.35.0, 20.160.0.0, + | | | | | 45.135.232.0, 91.202.233 + | | | | | .0 + blocklist.v4MAC | 0 | - | ON: 0 | - | + blocklist.v6 | 0 | ON: 4 | ON: 0 | - | + blocklist.v6MAC | 0 | - | ON: 0 | - | + dns.v4 | 95493 | - | ON: 2039 | tcp, udp: 53, 853 | 8.8.8.8 + dns.v6 | 251 | - | ON: 0 | tcp, udp: 53, 853 | + doh.v4 | 1663 | - | ON: 0 | tcp, udp: 80, 443 | + doh.v6 | 1204 | - | ON: 0 | tcp, udp: 80, 443 | + hagezi.v4 | 39535 | - | ON: 0 | tcp, udp: 80, 443 | + ---------------------+--------------+-----------------------+-----------------------+-----------------------+------------------------ + 13 | 138155 | 4 (362) | 13 (2851) | 10 | 5 +``` + +**banIP runtime information** + +```sh +~# /etc/init.d/banip status +::: banIP runtime information + + status : active (nft: ✔, monitor: ✔) + + frontend_ver : 1.8.0-r1 + + backend_ver : 1.8.0-r1 + + element_count : 138 148 (chains: 7, sets: 13, rules: 50) + + active_feeds : allowlist.v4MAC, allowlist.v6MAC, allowlist.v4, allowlist.v6, dns.v4, blocklist.v4MAC, blocklist.v6MAC, doh.v6, blocklist.v4, doh.v4, blocklist.v6, dns.v6, hagezi.v4 + + active_devices : wan: pppoe-wan / wan-if: wan, wan_6 / vlan-allow: - / vlan-block: - + + active_uplink : 5.73.162.23, 2a13:4800:204:319e:b26d:238b:d7fe:8213 + + nft_info : ver: 1.1.6-r1, priority: -100, policy: performance, loglevel: warn, expiry: 2h, limit (icmp/syn/udp): 25/10/100 + + run_info : base: /mnt/data/banIP, backup: /mnt/data/banIP/backup, report: /mnt/data/banIP/report, error: /mnt/data/banIP/error + + run_flags : auto: ✔, proto (4/6): ✔/✔, bcp38: ✔, log (pre/in/out): ✘/✘/✔, count: ✔, dedup: ✔, split: ✘, custom feed: ✘, allowed only: ✘ + + last_run : mode: restart, 2026-01-12 06:16:19, duration: 0m 36s, memory: 1446.84 MB available + + system_info : cores: 4, log: logread, fetch: curl, Bananapi BPI-R3, mediatek/filogic, OpenWrt SNAPSHOT (r32542-bf46d119a2) +``` + +**banIP search information** + +```sh +~# /etc/init.d/banip search 8.8.8.8 +::: +::: banIP Search +::: + Looking for IP '8.8.8.8' on 2025-01-13 22:13:36 + --- + IP found in Set 'country.v4' + IP found in Set 'doh.v4' +``` + +**banIP Set content information** +List all elements of a given Set with hit counters, e.g.: + +```sh +~# /etc/init.d/banip content turris.v4 +::: +::: banIP Set Content +::: + List elements of the Set 'turris.v4' on 2025-06-08 23:28:55 + --- +1.4.228.135, packets: 0 +1.23.16.3, packets: 0 +1.33.35.42, packets: 0 +1.33.231.132, packets: 0 +1.34.29.158, packets: 0 +1.34.231.106, packets: 0 +1.52.91.174, packets: 0 +1.64.149.142, packets: 0 +1.69.243.13, packets: 0 +1.70.139.250, packets: 0 +1.70.171.246, packets: 0 +1.82.191.114, packets: 0 +[...] +``` + +List only elements with hits of a given Set with hit counters, e.g.: +```sh +~# /etc/init.d/banip content turris.v4 true +::: +::: banIP Set Content +::: + List elements of the Set 'turris.v4' on 2025-06-08 23:30:59 + --- +74.50.211.178, packets: 1 +109.205.213.115, packets: 18 +109.205.213.123, packets: 35 +109.205.213.248, packets: 29 +109.205.213.250, packets: 20 +109.205.213.252, packets: 30 +122.222.152.65, packets: 1 +186.91.25.141, packets: 2 +190.203.106.113, packets: 2 +200.123.238.20, packets: 1 +``` + + +## Best practise and tweaks +**Recommendation for low memory systems** +nftables supports the atomic loading of firewall rules (incl. elements), which is cool but unfortunately is also very memory intensive. To reduce the memory pressure on low memory systems (i.e. those with 256-512MB RAM), you should optimize your configuration with the following options: + +* point 'ban_basedir', 'ban_reportdir', 'ban_backupdir' and 'ban_errordir' to an external usb drive or ssd +* set 'ban_cores' to '1' (only useful on a multicore system) to force sequential feed processing +* set 'ban_splitsize' e.g. to '1024' to split the load of an external Set after every 1024 lines/elements +* set 'ban_nftcount' to '0' to deactivate the CPU- and memory-intensive creation of counter elements at Set level + +**Sensible choice of blocklists** +The following feeds are just my personal recommendation as an initial setup: +* cinsscore, debl, turris and doh in their default chains + +In total, this feed selection blocks about 20K IP addresses. It may also be useful to include some countries to the country feed. +Please note: don't just blindly activate (too) many feeds at once, sooner or later this will lead to OOM conditions. + +**Log Terms for logfile parsing** +Like fail2ban and crowdsec, banIP supports logfile scanning and automatic blocking of suspicious attacker IPs. +In the default config only the log terms to detect failed login attempts via dropbear and LuCI are in place. The following search pattern has been tested as well: + +``` +dropbear : 'Exit before auth from' +LuCI : 'luci: failed login' +sshd1 : 'error: maximum authentication attempts exceeded' +sshd2 : 'sshd.*Connection closed by.*\[preauth\]' +asterisk : 'SecurityEvent=\"InvalidAccountID\".*RemoteAddress=' +nginx : 'received a suspicious remote IP .*' +openvpn : 'TLS Error: could not determine wrapping from \[AF_INET\]' +AdGuard : 'AdGuardHome.*\[error\].*/control/login: from ip' +``` + +You find the 'Log Terms' option in LuCI under the 'Log Settings' tab. Feel free to add more log terms to meet your needs and protect additional services. + +**Allow-/Blocklist handling** +banIP supports local allow- and block-lists, MAC/IPv4/IPv6 addresses (incl. ranges in CIDR notation) or domain names. These files are located in /etc/banip/banip.allowlist and /etc/banip/banip.blocklist. +Unsuccessful login attempts or suspicious requests will be tracked and added to the local blocklist (see the 'ban_autoblocklist' option). The blocklist behaviour can be further tweaked with the 'ban_nftexpiry' option. +Depending on the options 'ban_autoallowlist' and 'ban_autoallowuplink' the uplink subnet or the uplink IP will be added automatically to local allowlist. +Furthermore, you can reference external Allowlist URLs with additional IPv4 and IPv6 feeds (see 'ban_allowurl'). +Both local lists also accept domain names as input to allow IP filtering based on these names. The corresponding IPs (IPv4 & IPv6) will be extracted and added to the Sets. + +**Allowlist-only mode** +banIP supports an "allowlist only" mode. This option restricts Internet access only to certain, explicitly permitted IP segments - and blocks access to the rest of the Internet. All IPs that are _not_ listed in the allowlist or in the external allowlist URLs are blocked. In this mode it might be useful to limit the allowlist feed to the inbound chain, to still allow outbound communication to the rest of the world. + +**MAC/IP-binding** +banIP supports concatenation of local MAC addresses/ranges with IPv4/IPv6 addresses, e.g. to enforce dhcp assignments or to free connected clients from outbound blocking. +The following notations in the local allow- and block-list are supported: + +``` +MAC-address only: +C8:C2:9B:F7:80:12 => this will be populated to the v4MAC- and v6MAC-Sets with the IP-wildcards 0.0.0.0/0 and ::/0 + +MAC-address range: +C8:C2:9B:F7:80:12/24 => this populate the MAC-range C8:C2:9B:00:00:00", "C8:C2:9B:FF:FF:FF to the v4MAC- and v6MAC-Sets with the IP-wildcards 0.0.0.0/0 and ::/0 + +MAC-address with IPv4 concatenation: +C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated only to v4MAC-Set with the certain IP, no entry in the v6MAC-Set + +MAC-address with IPv6 concatenation: +C8:C2:9B:F7:80:12 2a02:810c:0:80:a10e:62c3:5af:f3f => this will be populated only to v6MAC-Set with the certain IP, no entry in the v4MAC-Set + +MAC-address with IPv4 and IPv6 concatenation: +C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated to v4MAC-Set with the certain IP +C8:C2:9B:F7:80:12 2a02:810c:0:80:a10e:62c3:5af:f3f => this will be populated to v6MAC-Set with the certain IP + +MAC-address with IPv4 and IPv6 wildcard concatenation: +C8:C2:9B:F7:80:12 192.168.1.10 => this will be populated to v4MAC-Set with the certain IP +C8:C2:9B:F7:80:12 => this will be populated to v6MAC-Set with the IP-wildcard ::/0 +``` + +**MAC-address logging in nftables** +The MAC-address logging format in nftables is a little bit unusual. It is generated by the kernel's NF_LOG module and places all MAC-related data into one flat field, without separators or labels. For example, the field MAC=7e:1a:2f:fc:ee:29:68:34:21:1f:a7:b1:08:00 is actually a concatenation of the following: + +``` +[Source MAC (6 bytes)] + [Destination MAC (6 bytes)] + [EtherType (2 bytes)] +7e:1a:2f:fc:ee:29 → the source MAC address +68:34:21:1f:a7:b1 → the destination MAC address +08:00 → the EtherType for IPv4 (0x0800) +``` +**BCP38** +BCP38 (**B**est **C**urrent **P**ractice, RFC 2827) defines ingress filtering to prevent IP address spoofing. In practice, this means: +* dropping packets arriving on the WAN whose source address is not valid or routable via that interface +* dropping packets leaving LAN => WAN whose source address does not belong to the local/internal prefixes + +In banIP, the BCP38 implementation uses nftables’ FIB lookup to enforce this. It checks whether the packet’s source address is not valid for the incoming interface or whether the routing table reports no route for this source on this interface. Packets that fail this check are dropped. + +**Set reporting, enable the GeoIP Map** +banIP includes a powerful reporting tool on the Set Reporting tab which shows the latest NFT banIP Set statistics. To get the latest statistics always press the "Refresh" button. +In addition to a tabular overview banIP reporting includes a GeoIP map in a modal popup window/iframe that shows the geolocation of your own uplink addresses (in green) and the locations of potential attackers (in red). To enable the GeoIP Map set the following options (in "Feed/Set Settings" config tab): + + * set 'ban_nftcount' to '1' to enable the nft counter for every Set element + * set 'ban_map' to '1' to include the external components listed below and activate the GeoIP map + +To make this work, banIP uses the following external components: +* [Leaflet](https://leafletjs.com/) is a lightweight open-source JavaScript library for interactive maps +* [OpenStreetMap](https://www.openstreetmap.org/) provides the map data under an open-source license +* [CARTO basemap styles](https://github.com/CartoDB/basemap-styles) based on [OpenMapTiles](https://openmaptiles.org/schema) +* The free and quite fast [IP Geolocation API](https://ip-api.com/) to resolve the required IP/geolocation information + +**CGI interface to receive remote logging events** +banIP ships a basic cgi interface in '/www/cgi-bin/banip' to receive remote logging events (disabled by default). The cgi interface evaluates logging events via GET or POST request (see examples below). To enable the cgi interface set the following options: + + * set 'ban_remotelog' to '1' to enable the cgi interface + * set 'ban_remotetoken' to a secret transfer token, allowed token characters consist of '[A-Za-z]', '[0-9]', '.' and ':' + + Examples to transfer remote logging events from an internal server to banIP via cgi interface: + + * POST request: curl --insecure --data "=" https://192.168.1.1/cgi-bin/banip + * GET request: wget --no-check-certificate https://192.168.1.1/cgi-bin/banip?= + +Please note: for security reasons use this cgi interface only internally and only encrypted via https transfer protocol. + +**Download options** +By default banIP uses the following pre-configured download options: + +``` + * curl: --connect-timeout 20 --retry-delay 10 --retry 5 --retry-all-errors --fail --silent --show-error --location -o + * wget: --no-cache --no-cookies --timeout=20 --waitretry=10 --tries=5 --retry-connrefused --max-redirect=0 -O + * uclient-fetch: --timeout=20 -O +``` + +To override the default set 'ban_fetchretry', 'ban_fetchinsecure' or globally 'ban_fetchparm' to your needs. + +**Configure E-Mail notifications via 'msmtp'** +To use the email notification you must install and configure the package 'msmtp'. +Modify the file '/etc/msmtprc', e.g.: + +``` +[...] +defaults +auth on +tls on +tls_certcheck off +timeout 5 +syslog LOG_MAIL +[...] +account ban_notify +host smtp.gmail.com +port 587 +from
@gmail.com +user +password +``` + +Finally add a valid E-Mail receiver address in banIP. + +**Send status E-Mails and update the banIP lists via cron job** +For a regular, automatic status mailing and update of the used lists on a daily basis set up a cron job, e.g. + +``` +55 03 * * * /etc/init.d/banip report mail +00 04 * * * /etc/init.d/banip reload +``` + +**Redirect asterisk security logs to lodg/logread** +By default banIP scans the logfile via logread, so to monitor attacks on asterisk, its security log must be available via logread. To do this, edit '/etc/asterisk/logger.conf' and add the line 'syslog.local0 = security', then run 'asterisk -rx reload logger' to update the running asterisk configuration. + +**Change/add banIP feeds and set optional feed flags** +The banIP default blocklist feeds are stored in an external JSON file '/etc/banip/banip.feeds'. All custom changes should be stored in an external JSON file '/etc/banip/banip.custom.feeds' (empty by default). It's recommended to use the LuCI based Custom Feed Editor to make changes to this file. +A valid JSON source object contains the following information, e.g.: + +```json + [...] + "doh":{ + "url_4": "https://raw.githubusercontent.com/dibdot/DoH-IP-blocklists/master/doh-ipv4.txt", + "url_6": "https://raw.githubusercontent.com/dibdot/DoH-IP-blocklists/master/doh-ipv6.txt", + "rule": "feed 1", + "chain": "out", + "descr": "public DoH-Server", + "flag": "tcp udp 80 443" + }, + [...] +``` + +Add an unique feed name (no spaces, no special chars) and make the required changes: adapt at least the URL, check/change the rule, the size and the description for a new feed. +The rule consist of max. 4 individual, space separated parameters: +1. type: 'feed' or 'suricata' (required) +2. prefix: an optional search term (a string literal, no regex) to identify valid IP list entries +3. column: the IP column within the feed file, e.g. '1' (required) +4. separator: an optional field separator, default is the character class '[[:space:]]' + +Please note: the flag field is optional, it's a space separated list of options: supported are 'gz' as an archive format and protocols 'tcp' or 'udp' with port numbers/port ranges for destination port limitations. + +**Debug options** +banIP provides an optional debug mode that writes diagnostic information to the system log and captures internal error output in a dedicated error logfile - by default located in the banIP base directory as '/tmp/ban_error.log'. The log file is automatically cleared at the beginning of each run. Under normal conditions, all error messages are discarded to keep regular runs clean and silent. + +Whenever you encounter banIP related processing problems, please enable "Verbose Debug Logging", restart banIP and check the "Processing Log" tab. +Typical symptoms: +* The nftables initialization failed: untick the 'Auto Detection' option in the 'General Settings' config section and set the required device and tools options manually +* A blocklist feed does not work: maybe a temporary server problem or the download URL has been changed. In the latter case, just use the Custom Feed Editor to point this feed to a new URL + +In case of a nft processing error, banIP creates an error directory (by default '/tmp/banIP-error') with the faulty nft load files. +For further troubleshooting, you can try to load such an error file manually to determine the exact cause of the error, e.g.: 'nft -f error.file.nft'. + +Whenever you encounter firewall problems, enable the logging of certain chains in the "Log Settings" config section, restart banIP and check the "Firewall Log" tab. +Typical symptoms: +* A feed blocks a legit IP: disable the entire feed or add this IP to your local allowlist and reload banIP +* A feed (e.g. doh) interrupts almost all client connections: check the feed table above for reference and reset the feed to the defaults in the "Feed/Set Settings" config tab section +* The allowlist doesn't free a certain IP/MAC address: check the current content of the allowlist with the "Set Content" under the "Set Reporting" tab to make sure that the desired IP/MAC is listed - if not, reload banIP + + +## Support +Please join the banIP discussion in this [forum thread](https://forum.openwrt.org/t/banip-support-thread/16985) or contact me by mail +If you want to report an error, please describe it in as much detail as possible - with (debug) logs, the current banIP status, your banIP configuration, etc. + + +## Removal +Stop all banIP related services with _/etc/init.d/banip stop_ and remove the banip package if necessary. + + +## Donations +You like this project - is there a way to donate? Generally speaking "No" - I have a well-paying full-time job and my OpenWrt projects are just a hobby of mine in my spare time. + +If you still insist to donate some bucks ... +* I would be happy if you put your money in kind into other, social projects in your area, e.g. a children's hospice +* Let's meet and invite me for a coffee if you are in my area, the “Markgräfler Land” in southern Germany or in Switzerland (Basel) +* Send your money to my [PayPal account](https://www.paypal.me/DirkBrenken) and I will collect your donations over the year to support various social projects in my area + +No matter what you decide - thank you very much for your support! + +Have fun! +Dirk From 3a2fc00e02f8d04fb5b9286032a663fa0f909068 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Thu, 2 Apr 2026 11:45:08 +0200 Subject: [PATCH 26/39] updated log interval to 60s --- .../telegraf/files/netifyd/netify-proc-aggregator-telegraf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json index 17ab825c4..4135386c0 100644 --- a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json +++ b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json @@ -1,7 +1,7 @@ { "aggregator": 3, "batched_rows": 0, - "log_interval": 15, + "log_interval": 60, "compressor": "none", "format": "json", "nested_mode": false, From c0ccfe1b1abfc1de6e3bbd0d04a258a9f4bc5e05 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Fri, 3 Apr 2026 13:40:12 +0200 Subject: [PATCH 27/39] updated sink with a testing env --- .../netify-proc-aggregator-sqlite.json | 14 ++++ .../files/etc/netifyd/netify-sink-sqlite.json | 79 ++++++++++--------- .../netifyd/plugins.d/10-netify-sqlite.conf | 15 ++++ 3 files changed, 70 insertions(+), 38 deletions(-) create mode 100644 packages/netifyd/files/etc/netifyd/netify-proc-aggregator-sqlite.json create mode 100644 packages/netifyd/files/etc/netifyd/plugins.d/10-netify-sqlite.conf diff --git a/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-sqlite.json b/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-sqlite.json new file mode 100644 index 000000000..1a1da1eff --- /dev/null +++ b/packages/netifyd/files/etc/netifyd/netify-proc-aggregator-sqlite.json @@ -0,0 +1,14 @@ +{ + "aggregator": 6, + "batched_rows": 0, + "log_interval": 60, + "compressor": "none", + "format": "json", + "nested_mode": false, + "privacy_mode": false, + "sinks": { + "sink-sqlite": { + "default": { } + } + } +} \ No newline at end of file diff --git a/packages/netifyd/files/etc/netifyd/netify-sink-sqlite.json b/packages/netifyd/files/etc/netifyd/netify-sink-sqlite.json index 1bbe92973..ac18182dd 100644 --- a/packages/netifyd/files/etc/netifyd/netify-sink-sqlite.json +++ b/packages/netifyd/files/etc/netifyd/netify-sink-sqlite.json @@ -1,86 +1,89 @@ { "db_path": "${path_state_persistent}/db", - "purge_in_days": 30, "channels": { "default": { "enable": true, "db_name": "stats.db", "data_source": "aggregator", - "purge_in_days": 14, + "purge_in_days": 2, "tables": [ { "name": "stats", "columns": [ + { + "name": "application_category_id", + "mapped": "application_category", + "type": "INTEGER" + }, { "name": "application_id", "mapped": "detected_application", "type": "INTEGER" }, + { + "name": "application_name", + "mapped": "detected_application_name", + "type": "TEXT" + }, { "name": "protocol_id", "mapped": "detected_protocol", "type": "INTEGER" }, { - "name": "application_category_id", - "mapped": "application_category", - "type": "INTEGER" + "name": "protocol_name", + "mapped": "detected_protocol_name", + "type": "TEXT" }, { - "name": "protocol_category_id", - "mapped": "protocol_category", + "name": "internal", + "mapped": "internal", + "type": "TEXT" + }, + { + "name": "ip_version", + "mapped": "ip_version", "type": "INTEGER" }, { - "name": "overlay_tags", - "mapped": "tags", - "type": "TEXT" + "name": "local_bytes", + "mapped": "local_bytes", + "type": "INTEGER" }, { - "name": "tag", - "mapped": "tags", - "split": 0, + "name": "local_ip", + "mapped": "local_ip", "type": "TEXT" }, { - "name": "tag_group", - "mapped": "tags", - "split": 1, + "name": "local_origin", + "mapped": "local_origin", "type": "TEXT" }, { - "name": "download_bytes", + "name": "other_bytes", "mapped": "other_bytes", - "type": "INTEGER", - "default": 0 - }, - { - "name": "download_packets", - "mapped": "other_packets", - "type": "INTEGER", - "default": 0 + "type": "INTEGER" }, { - "name": "upload_bytes", - "mapped": "local_bytes", - "type": "INTEGER", - "default": 0 + "name": "other_ip", + "mapped": "other_ip", + "type": "TEXT" }, { - "name": "upload_packets", - "mapped": "local_packets", - "type": "INTEGER", - "default": 0 + "name": "other_port", + "mapped": "other_port", + "type": "INTEGER" }, { - "name": "local_ip", - "mapped": "local_ip", + "name": "other_type", + "mapped": "other_type", "type": "TEXT" }, { - "name": "local_mac", - "mapped": "local_mac", - "type": "TEXT" + "name": "protocol_category_id", + "mapped": "protocol_category", + "type": "INTEGER" } ] } diff --git a/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-sqlite.conf b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-sqlite.conf new file mode 100644 index 000000000..1cba2949c --- /dev/null +++ b/packages/netifyd/files/etc/netifyd/plugins.d/10-netify-sqlite.conf @@ -0,0 +1,15 @@ +# Netify Aggregator Processor Plugin Loader +# +############################################################################## + +[proc-aggregator-sqlite] +enable = no +plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-proc-aggregator-sqlite.json + +[sink-sqlite] +enable = no +plugin_library = ${path_plugin_libdir}/libnetify-sink-sqlite.so.0.0.0 +conf_filename = ${path_state_persistent}/netify-sink-sqlite.json + +# vim: set ft=dosini : From 47223696d013778e38a7c54d93eec272b48fb170 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Tue, 14 Apr 2026 11:38:02 +0200 Subject: [PATCH 28/39] adjusting telegraf inputs --- packages/telegraf/Makefile | 7 -- .../telegraf/files/telegraf.conf.d/os.conf | 105 +----------------- 2 files changed, 2 insertions(+), 110 deletions(-) diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index e43ca3e89..026197296 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -31,16 +31,10 @@ GO_PKG_TAGS:= \ inputs.bond \ inputs.cpu \ inputs.disk \ - inputs.diskio \ inputs.ethtool \ inputs.exec \ inputs.file \ inputs.http_listener_v2 \ - inputs.iptables \ - inputs.kernel \ - inputs.kernel_vmstat \ - inputs.linux_cpu \ - inputs.linux_sysctl_fs \ inputs.mem \ inputs.net \ inputs.netstat \ @@ -51,7 +45,6 @@ GO_PKG_TAGS:= \ inputs.system \ inputs.tail \ outputs.influxdb \ - outputs.sql \ parsers.grok \ parsers.json_v2 diff --git a/packages/telegraf/files/telegraf.conf.d/os.conf b/packages/telegraf/files/telegraf.conf.d/os.conf index 18ed9f251..b7a45221b 100644 --- a/packages/telegraf/files/telegraf.conf.d/os.conf +++ b/packages/telegraf/files/telegraf.conf.d/os.conf @@ -36,47 +36,6 @@ influxdb_db = "os-metrics" -# Read metrics about disk IO by device -[[inputs.diskio]] - ## Devices to collect stats for - ## Wildcards are supported except for disk synonyms like '/dev/disk/by-id'. - ## ex. devices = ["sda", "sdb", "vd*", "/dev/disk/by-id/nvme-eui.00123deadc0de123"] - # devices = ["*"] - - ## Skip gathering of the disk's serial numbers. - # skip_serial_number = true - - ## Device metadata tags to add on systems supporting it (Linux only) - ## Use 'udevadm info -q property -n ' to get a list of properties. - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - [inputs.diskio.tags] - influxdb_db = "os-metrics" - - -# Plugin to collect various Linux kernel statistics. -# This plugin ONLY supports Linux -[[inputs.kernel]] - ## Additional gather options - ## Possible options include: - ## * ksm - kernel same-page merging - ## * psi - pressure stall information - # collect = [] - [inputs.kernel.tags] - influxdb_db = "os-metrics" - - # Read metrics about memory usage [[inputs.mem]] # no configuration @@ -125,12 +84,12 @@ # Returns ethtool statistics for given interfaces # This plugin ONLY supports Linux -#[[inputs.ethtool]] +[[inputs.ethtool]] ## List of interfaces to pull metrics for # interface_include = ["eth0"] ## List of interfaces to ignore when pulling metrics. - # interface_exclude = ["eth1"] + interface_exclude = ["wg*", "ipsec*", "tun*"] ## Plugin behavior for downed interfaces ## Available choices: @@ -165,66 +124,6 @@ # normalize_keys = ["snakecase", "trim", "lower", "underscore"] -# Gather packets and bytes throughput from iptables -# This plugin ONLY supports Linux -[[inputs.iptables]] - ## iptables require root access on most systems. - ## Setting 'use_sudo' to true will make use of sudo to run iptables. - ## Users must configure sudo to allow telegraf user to run iptables with - ## no password. - ## iptables can be restricted to only list command "iptables -nvL". - # use_sudo = false - - ## Setting 'use_lock' to true runs iptables with the "-w" option. - ## Adjust your sudo settings appropriately if using this option - ## ("iptables -w 5 -nvl") - # use_lock = false - - ## Define an alternate executable, such as "ip6tables". Default is "iptables". - # binary = "ip6tables" - ## defines the table to monitor: - table = "filter" - - ## defines the chains to monitor. - ## NOTE: iptables rules without a comment will not be monitored. - ## Read the plugin documentation for more information. - chains = [ "INPUT" ] - [inputs.iptables.tags] - influxdb_db = "os-metrics" - - -# Get kernel statistics from /proc/vmstat -# This plugin ONLY supports Linux -[[inputs.kernel_vmstat]] - # no configuration - [inputs.kernel_vmstat.tags] - influxdb_db = "os-metrics" - - -# Provides Linux CPU metrics -# This plugin ONLY supports Linux -[[inputs.linux_cpu]] - ## Path for sysfs filesystem. - ## See https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt - ## Defaults: - # host_sys = "/sys" - - ## CPU metrics collected by the plugin. - ## Supported options: - ## "cpufreq", "thermal" - ## Defaults: - # metrics = ["cpufreq"] - [inputs.linux_cpu.tags] - influxdb_db = "os-metrics" - - -# Provides Linux sysctl fs metrics -[[inputs.linux_sysctl_fs]] - # no configuration - [inputs.linux_sysctl_fs.tags] - influxdb_db = "os-metrics" - - # Gather metrics about network interfaces [[inputs.net]] ## By default, telegraf gathers stats from any up interface (excluding loopback) From 101d11a60c1c30d54dcd8a37d31a5ce78f923ff1 Mon Sep 17 00:00:00 2001 From: Tommaso Bailetti Date: Wed, 15 Apr 2026 11:10:53 +0200 Subject: [PATCH 29/39] removed netifyd logging, too much data --- packages/telegraf/Makefile | 9 +---- .../netify-proc-aggregator-telegraf.json | 14 ------- .../netifyd/netify-sink-http-telegraf.json | 12 ------ .../netifyd/plugins.d/10-netify-telegraf.conf | 15 -------- .../files/telegraf.conf.d/netifyd.conf | 38 ------------------- 5 files changed, 1 insertion(+), 87 deletions(-) delete mode 100644 packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json delete mode 100644 packages/telegraf/files/netifyd/netify-sink-http-telegraf.json delete mode 100644 packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf delete mode 100644 packages/telegraf/files/telegraf.conf.d/netifyd.conf diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 026197296..317ae8319 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -59,8 +59,7 @@ define Package/telegraf DEPENDS:= \ $(GO_ARCH_DEPENDS) \ +lm-sensors \ - +victoria-metrics \ - +netifyd + +victoria-metrics endef define Package/telegraf/description @@ -75,12 +74,6 @@ define Package/telegraf/install $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf $(INSTALL_DIR) $(1)/etc/telegraf.conf.d $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf - $(INSTALL_DATA) ./files/telegraf.conf.d/netifyd.conf $(1)/etc/telegraf.conf.d/netifyd.conf - $(INSTALL_DIR) $(1)/etc/netifyd - $(INSTALL_DATA) ./files/netifyd/netify-sink-http-telegraf.json $(1)/etc/netifyd/netify-sink-http-telegraf.json - $(INSTALL_DATA) ./files/netifyd/netify-proc-aggregator-telegraf.json $(1)/etc/netifyd/netify-proc-aggregator-telegraf.json - $(INSTALL_DIR) $(1)/etc/netifyd/plugins.d - $(INSTALL_DATA) ./files/netifyd/plugins.d/10-netify-telegraf.conf $(1)/etc/netifyd/plugins.d/10-netify-telegraf.conf endef define Package/telegraf/postinst diff --git a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json b/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json deleted file mode 100644 index 4135386c0..000000000 --- a/packages/telegraf/files/netifyd/netify-proc-aggregator-telegraf.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "aggregator": 3, - "batched_rows": 0, - "log_interval": 60, - "compressor": "none", - "format": "json", - "nested_mode": false, - "privacy_mode": false, - "sinks": { - "sink-http-telegraf": { - "telegraf": { } - } - } -} \ No newline at end of file diff --git a/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json b/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json deleted file mode 100644 index 073023bcc..000000000 --- a/packages/telegraf/files/netifyd/netify-sink-http-telegraf.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "timeout_connect": 30, - "timeout_transfer": 300, - "tls_verify": true, - "tls_version1": false, - "channels": { - "telegraf": { - "enable": true, - "url": "http://127.0.0.1:8087/netifyd" - } - } -} \ No newline at end of file diff --git a/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf b/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf deleted file mode 100644 index 87f34f593..000000000 --- a/packages/telegraf/files/netifyd/plugins.d/10-netify-telegraf.conf +++ /dev/null @@ -1,15 +0,0 @@ -# Netify plugins config for Telegraf - Local and Sede -# -############################################################################## - -[proc-aggregator-telegraf] -enable = yes -plugin_library = ${path_plugin_libdir}/libnetify-proc-aggregator.so.0.0.0 -conf_filename = ${path_state_persistent}/netify-proc-aggregator-telegraf.json - -[sink-http-telegraf] -enable = yes -plugin_library = ${path_plugin_libdir}/libnetify-sink-http.so.0.0.0 -conf_filename = ${path_state_persistent}/netify-sink-http-telegraf.json - -# vim: set ft=dosini : diff --git a/packages/telegraf/files/telegraf.conf.d/netifyd.conf b/packages/telegraf/files/telegraf.conf.d/netifyd.conf deleted file mode 100644 index 089e4e382..000000000 --- a/packages/telegraf/files/telegraf.conf.d/netifyd.conf +++ /dev/null @@ -1,38 +0,0 @@ -# Parse netifyd metrics from HTTP sink -# The netifyd service sends metrics via HTTP POST to this endpoint -# See packages/telegraf/files/netifyd/netify-sink-http-telegraf.json for sink configuration -[[inputs.http_listener_v2]] - ## Address and port to listen on - service_address = "127.0.0.1:8087" - - ## Paths this server serves - paths = ["/netifyd"] - - ## Use TLS for https, enter the certfile and keyfile path - # tls_cert = "" - # tls_key = "" - - ## HTTP methods to accept - methods = ["POST"] - - ## Response code - http_success_code = 200 - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json_v2" - - ## Global tags to add to all metrics from this input - [inputs.http_listener_v2.tags] - influxdb_db = "netifyd" - - [[inputs.http_listener_v2.json_v2]] - measurement_name = "netifyd" - timestamp_path = "log_time_end" - timestamp_format = "unix" - [[inputs.http_listener_v2.json_v2.object]] - path = "stats" - tags = ["detected_application_name", "detected_protocol_name", "interface", "internal", "ip_protocol", "ip_version", "local_ip", "local_mac", "local_origin", "other_ip", "other_port", "other_type"] - excluded_keys = ["digests"] From a04979f4d4c51a8d382b4bbacc1e7dc41595bb9e Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Wed, 15 Apr 2026 13:51:49 +0200 Subject: [PATCH 30/39] feat: add mimir alerting --- packages/ns-plug/Makefile | 1 + packages/ns-plug/README.md | 29 ++ packages/ns-plug/files/30_ns-plug_alerts | 61 +-- packages/ns-plug/files/config | 3 + .../ns-plug/files/health_alarm_notify.conf | 15 + packages/ns-plug/files/ns-plug-alert | 427 ++++++++++++++++++ packages/ns-plug/files/send-mwan-alert | 27 ++ 7 files changed, 512 insertions(+), 51 deletions(-) create mode 100644 packages/ns-plug/files/ns-plug-alert diff --git a/packages/ns-plug/Makefile b/packages/ns-plug/Makefile index b1715109b..4e2958c23 100644 --- a/packages/ns-plug/Makefile +++ b/packages/ns-plug/Makefile @@ -103,6 +103,7 @@ define Package/ns-plug/install $(INSTALL_CONF) files/health_alarm_notify.conf $(1)/etc/netdata $(INSTALL_BIN) ./files/send-mwan-alert $(1)/usr/libexec/mwan-hooks $(INSTALL_BIN) ./files/backup-encryption-alert $(1)/usr/libexec + $(INSTALL_BIN) ./files/ns-plug-alert $(1)/usr/sbin $(INSTALL_BIN) ./files/mwan-hooks $(1)/usr/libexec/ns-plug $(INSTALL_BIN) ./files/ns-plug-rsyslog-fixup.uci-default $(1)/etc/uci-defaults/rsyslog-fixup endef diff --git a/packages/ns-plug/README.md b/packages/ns-plug/README.md index 263ea7a7b..7c4f4f4be 100644 --- a/packages/ns-plug/README.md +++ b/packages/ns-plug/README.md @@ -141,9 +141,38 @@ Alerts are also logged to `/var/log/messages` and are visible within the netdata Only the following alerts are sent to the remote system: +- all of them repeat every 30 minutes while active - disk space occupation - WAN down events +To emulate these alerts manually with `ns-plug-alert`, use: + +``` +# Disk usage alert +ns-plug-alert fire --alertname DiskSpaceCritical --severity critical \ + --labels service=storage mountpoint=/mnt/data \ + --annotations \ + "summary_en=Disk space critical" \ + "summary_it=Spazio disco critico" \ + "description_en=Disk usage above 90% on /mnt/data" \ + "description_it=Utilizzo disco sopra 90% su /mnt/data" + +ns-plug-alert resolve --alertname DiskSpaceCritical --severity critical \ + --labels service=storage mountpoint=/mnt/data + +# WAN down alert +ns-plug-alert fire --alertname WanDown --severity critical \ + --labels service=network interface=wan0 \ + --annotations \ + "summary_en=WAN interface is down" \ + "summary_it=Interfaccia non disponibile" \ + "description_en=WAN interface wan0 is down. Internet connectivity could be affected." \ + "description_it=Interfaccia WAN wan0 non disponibile. Connettivita Internet potrebbe essere compromessa." + +ns-plug-alert resolve --alertname WanDown --severity critical \ + --labels service=network interface=wan0 +``` + When an alert is resolved, netdata will also send a clear command to remote server. ### MultiWAN alerts diff --git a/packages/ns-plug/files/30_ns-plug_alerts b/packages/ns-plug/files/30_ns-plug_alerts index dfb62a848..717161395 100644 --- a/packages/ns-plug/files/30_ns-plug_alerts +++ b/packages/ns-plug/files/30_ns-plug_alerts @@ -2,8 +2,7 @@ # Custom disk alerts disks_f="/etc/netdata/health.d/disks.conf" -if [ ! -f "$disks_f" ]; then - cat << EOF > "$disks_f" +cat << EOF > "$disks_f" template: disk_space_usage on: disk.space class: Utilization @@ -12,53 +11,13 @@ component: Disk os: linux freebsd hosts: * families: !/dev !/dev/* !/run !/run/* !/overlay * - calc: \$used * 100 / (\$avail + \$used) - units: % - every: 1m - warn: \$this > ((\$status >= \$WARNING ) ? (80) : (90)) - crit: \$this > ((\$status == \$CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: disk $family space utilization - to: sysadmin -EOF -fi - -# Disable unwanted alerts -files="cpu disks entropy ipc load memory net netfilter processes ram softnet tcp_conn tcp_listen tcp_mem tcp_orphans tcp_resets timex udp_errors" -for f in $files -do - file="/etc/netdata/health.d/${f}.conf" - if [ ! -f $file ]; then - > $file - fi -done - -# Enable mwan chart -sed -i 's/python.d = no/python.d = yes/' /etc/netdata/netdata.conf -python_f="/etc/netdata/python.d.conf" -if [ ! -f "$python_f" ]; then - cat << EOF > "$python_f" -enabled: yes -gc_run: yes -gc_interval: 300 -apache_cache: no -chrony: no -example: no -go_expvar: no -gunicorn_log: no -hpssa: no -logind: no -nginx_log: no -EOF -fi - -# Create mwan alert -cat << EOF > /etc/netdata/health.d/mwan.conf -template: wan_status - on: mwan.score -lookup: min -1m foreach * - every: 1m - warn: \$this < 5 - crit: \$this <= 1 - info: The score of the WAN, 0 means down + calc: \$used * 100 / (\$avail + \$used) + units: % + every: 1m + warn: \$this > ((\$status >= \$WARNING ) ? (80) : (90)) + crit: \$this > ((\$status == \$CRITICAL) ? (90) : (98)) + delay: up 1m down 15m multiplier 1.5 max 1h + info: disk \$family space utilization + to: sysadmin + repeat: critical 5m warning 5m EOF diff --git a/packages/ns-plug/files/config b/packages/ns-plug/files/config index 9f32f0262..b84a17933 100644 --- a/packages/ns-plug/files/config +++ b/packages/ns-plug/files/config @@ -9,3 +9,6 @@ config main 'config' option channel '' option tun_mtu '' option mssfix '' + option my_url '' + option my_system_key '' + option my_system_secret '' diff --git a/packages/ns-plug/files/health_alarm_notify.conf b/packages/ns-plug/files/health_alarm_notify.conf index 00852cfa9..31b54a0e0 100644 --- a/packages/ns-plug/files/health_alarm_notify.conf +++ b/packages/ns-plug/files/health_alarm_notify.conf @@ -45,6 +45,11 @@ custom_sender() { secret=$(uci -q get ns-plug.config.secret) url=$(uci -q get ns-plug.config.alerts_url)"alerts/store" alert_id=${name} + + logger -t alert "Alert: name=${name} severity=${severity} value=${value} chart=${chart} info=${info} src=${src}" + + # Preserve original netdata status before remapping for legacy API + netdata_status="${status}" if [ "${status}" == "CRITICAL" ]; then status="FAILURE" elif [ "${status}" == "CLEAR" ]; then @@ -72,4 +77,14 @@ custom_sender() { --header "Authorization: token ${secret}" --header "Content-Type: application/json" --header "Accept: application/json" \ --data-raw "${payload}" ${url} fi + + # Also forward to MY alertmanager if my_url is configured + if [ -n "$(uci -q get ns-plug.config.my_url)" ]; then + /usr/sbin/ns-plug-alert netdata \ + --alertname "${name}" \ + --status "${netdata_status}" \ + --chart "${chart}" \ + --family "${family}" \ + --value "${value}" 2>/dev/null & + fi } diff --git a/packages/ns-plug/files/ns-plug-alert b/packages/ns-plug/files/ns-plug-alert new file mode 100644 index 000000000..a0b3e0709 --- /dev/null +++ b/packages/ns-plug/files/ns-plug-alert @@ -0,0 +1,427 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# +""" +NethSecurity mimir integration for ns-plug. + +Reads configuration from /etc/config/ns-plug (UCI): + ns-plug.config.my_url Base URL of the mimir proxy + ns-plug.config.my_system_key HTTP Basic Auth username (new-format key) + ns-plug.config.my_system_secret HTTP Basic Auth password (new-format secret) + +Usage: + ns-plug-alert fire --alertname NAME --severity {critical,warning,info} + [--labels k=v ...] [--annotations k=v ...] + ns-plug-alert resolve --alertname NAME --severity {critical,warning,info} + [--labels k=v ...] + ns-plug-alert list [--state STATE] [--severity SEV] + ns-plug-alert netdata --alertname NAME --status STATUS + --chart CHART --family FAMILY [--value VALUE] + +Examples: + # Fire a critical disk alert + ns-plug-alert fire --alertname DiskSpaceCritical --severity critical \\ + --labels service=storage mountpoint=/data \\ + --annotations "description_en=Disk usage above 90%" + + # Resolve it + ns-plug-alert resolve --alertname DiskSpaceCritical --severity critical \\ + --labels service=storage mountpoint=/data + + # List all active alerts + ns-plug-alert list + + # Called internally from netdata custom_sender (health_alarm_notify.conf) + ns-plug-alert netdata --alertname disk_space_usage --status CRITICAL \\ + --chart disk_space._data --family /data --value 95.2 +""" + +import argparse +import base64 +import json +import sys +import urllib.error +import urllib.request +from datetime import datetime, timedelta, timezone +from euci import EUci + + +# --------------------------------------------------------------------------- +# Mapping from netdata alarm names / statuses to the NethSecurity alert catalog. +# Keys are netdata alarm names (${name} in health_alarm_notify.conf). +# Each entry maps a netdata status to the corresponding mimir alert definition. +# --------------------------------------------------------------------------- +NETDATA_ALERT_MAP = { + "disk_space_usage": { + "WARNING": { + "alertname": "DiskSpaceLow", + "service": "storage", + "severity": "warning", + "summary_en": "Disk space low on {mountpoint}", + "summary_it": "Spazio disco in esaurimento su {mountpoint}", + "description_en": "Disk usage on {mountpoint} is above 80%. Free space is running low.", + "description_it": "Utilizzo del disco su {mountpoint} superiore all'80%. Lo spazio libero sta esaurendosi.", + }, + "CRITICAL": { + "alertname": "DiskSpaceCritical", + "service": "storage", + "severity": "critical", + "summary_en": "Disk space critical on {mountpoint}", + "summary_it": "Spazio disco critico su {mountpoint}", + "description_en": "Disk usage on {mountpoint} is above 90%. Immediate action required.", + "description_it": "Utilizzo del disco su {mountpoint} superiore al 90%. Intervento immediato richiesto.", + }, + } +} + +# Netdata statuses that mean the alert is firing +NETDATA_FIRE_STATUSES = {"WARNING", "CRITICAL"} + +# Netdata statuses that mean the alert is resolved +NETDATA_RESOLVE_STATUSES = {"CLEAR", "REMOVED", "UNDEFINED"} + +# Map netdata severity to mimir severity (used for generic/unmapped alerts) +NETDATA_SEVERITY_MAP = { + "WARNING": "warning", + "CRITICAL": "critical", +} + + +# --------------------------------------------------------------------------- +# UCI / configuration helpers +# --------------------------------------------------------------------------- + +def load_config(): + """ + Return (url, key, secret) from UCI config or CLI overrides. + CLI overrides (--url / --key / --secret) take precedence over UCI values. + """ + uci = EUci() + url = uci.get("ns-plug", "config", "my_url", default=None) + key = uci.get("ns-plug", "config", "my_system_key", default=None) + secret = uci.get("ns-plug", "config", "my_system_secret", default=None) + return url, key, secret + + +# --------------------------------------------------------------------------- +# HTTP helper +# --------------------------------------------------------------------------- + +def http_request(method, url, data=None, key=None, secret=None): + """ + Perform an HTTP request with Basic Auth. + Returns (status_code, response_body_str). + Exits with code 1 on connection error. + """ + credentials = base64.b64encode(f"{key}:{secret}".encode()).decode() + headers = { + "Authorization": f"Basic {credentials}", + "Accept": "application/json", + "Content-Type": "application/json", + } + body = json.dumps(data).encode() if data is not None else None + req = urllib.request.Request(url, data=body, headers=headers, method=method) + try: + with urllib.request.urlopen(req, timeout=30) as resp: + return resp.status, resp.read().decode() + except urllib.error.HTTPError as exc: + return exc.code, exc.read().decode() + except urllib.error.URLError as exc: + print(f"Connection error: {exc.reason}", file=sys.stderr) + sys.exit(1) + + +# --------------------------------------------------------------------------- +# Shared helpers +# --------------------------------------------------------------------------- + +def parse_kv(pairs): + """Parse ['key=value', ...] into a dict.""" + result = {} + for pair in pairs or []: + if "=" not in pair: + print(f"Error: invalid key=value pair: {pair!r}", file=sys.stderr) + sys.exit(1) + k, v = pair.split("=", 1) + result[k] = v + return result + + +def alerts_endpoint(url): + return f"{url.rstrip('/')}/collect/api/services/mimir/alertmanager/api/v2/alerts" + + +def now_utc(): + return datetime.now(timezone.utc) + + +def fmt(dt): + return dt.strftime("%Y-%m-%dT%H:%M:%SZ") + + +# --------------------------------------------------------------------------- +# Commands +# --------------------------------------------------------------------------- + +def cmd_fire(args): + """Fire an alert.""" + url, key, secret = load_config() + if not (url and key and secret): + print("Error: my_url, my_system_key and my_system_secret must be configured in ns-plug UCI config.", file=sys.stderr) + sys.exit(1) + + labels = {"alertname": args.alertname, "severity": args.severity} + labels.update(parse_kv(args.labels)) + + annotations = parse_kv(args.annotations) + + payload = [{ + "labels": labels, + "annotations": annotations, + "generatorURL": f"http://nethsecurity/alert/{args.alertname}", + "startsAt": fmt(now_utc()), + "endsAt": "0001-01-01T00:00:00Z", + }] + + status, body = http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) + if 200 <= status < 300: + print(json.dumps({"status": "success", "message": f"Alert '{args.alertname}' fired"})) + else: + print(f"Failed to fire alert (HTTP {status}): {body}", file=sys.stderr) + sys.exit(1) + + +def cmd_resolve(args): + """Resolve an alert by sending it with endsAt in the past.""" + url, key, secret = load_config() + if not (url and key and secret): + print("Error: my_url, my_system_key and my_system_secret must be configured in ns-plug UCI config.", file=sys.stderr) + sys.exit(1) + + labels = {"alertname": args.alertname, "severity": args.severity} + labels.update(parse_kv(args.labels)) + + now = now_utc() + annotations = parse_kv(args.annotations) if hasattr(args, "annotations") and args.annotations else { + "summary": "resolved", + "description": f"Alert {args.alertname} resolved at {fmt(now)}", + } + + payload = [{ + "labels": labels, + "annotations": annotations, + "generatorURL": f"http://nethsecurity/alert/{args.alertname}", + "startsAt": fmt(now - timedelta(hours=1)), + "endsAt": fmt(now), + }] + + status, body = http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) + if 200 <= status < 300: + print(json.dumps({"status": "success", "message": f"Alert '{args.alertname}' resolved"})) + else: + print(f"Failed to resolve alert (HTTP {status}): {body}", file=sys.stderr) + sys.exit(1) + + +def cmd_list(args): + """List active alerts.""" + url, key, secret = load_config() + if not (url and key and secret): + print("Error: my_url, my_system_key and my_system_secret must be configured in ns-plug UCI config.", file=sys.stderr) + sys.exit(1) + + status, body = http_request("GET", alerts_endpoint(url), key=key, secret=secret) + if not (200 <= status < 300): + print(f"Failed to list alerts (HTTP {status}): {body}", file=sys.stderr) + sys.exit(1) + + alerts = json.loads(body) + + if args.state: + alerts = [a for a in alerts if a.get("status", {}).get("state") == args.state] + if args.severity: + alerts = [a for a in alerts if a.get("labels", {}).get("severity") == args.severity] + + print(json.dumps(alerts, indent=2)) + + +def cmd_netdata(args): + """ + Handle a netdata alert notification. + + Called from health_alarm_notify.conf custom_sender with: + --alertname ${name} (netdata alarm name) + --status ${status} (CRITICAL, WARNING, CLEAR, REMOVED, ...) + --chart ${chart} (e.g. disk_space._overlay, mwan.score) + --family ${family} (e.g. /, /boot, wan0) + --value ${value} (metric value, may be empty) + """ + url, key, secret = load_config() + if not (url and key and secret): + # Mimir not configured — silently skip so existing flow is unaffected. + sys.exit(0) + + netdata_status = (args.status or "").upper() + netdata_name = args.alertname or "" + family = args.family or "" + + if netdata_status in NETDATA_RESOLVE_STATUSES: + _netdata_resolve(url, key, secret, netdata_name, family) + elif netdata_status in NETDATA_FIRE_STATUSES: + _netdata_fire(url, key, secret, netdata_name, netdata_status, family, args.value) + # Any other status (e.g. UNDEFINED at startup) is silently ignored. + + +def _build_netdata_labels_annotations(netdata_name, netdata_status, family, value): + """ + Map a netdata alarm + status to mimir alertname, labels and annotations. + Falls back to a generic mapping when the alarm name is not in the catalog. + """ + mapping = NETDATA_ALERT_MAP.get(netdata_name, {}).get(netdata_status, {}) + + if mapping: + alertname = mapping["alertname"] + severity = mapping["severity"] + service = mapping.get("service") + + # Build label substitution context + ctx = {"mountpoint": family, "interface": family, "value": value or ""} + annotations = {} + for key in ("summary_en", "summary_it", "description_en", "description_it"): + if key in mapping: + annotations[key] = mapping[key].format_map(ctx) + if value: + annotations.setdefault("description_en", annotations.get("description_en", "") + f" Current value: {value}.") + else: + # Generic fallback: pass the netdata name directly as alertname + alertname = netdata_name + severity = NETDATA_SEVERITY_MAP.get(netdata_status, "warning") + service = None + annotations = { + "summary": f"Netdata alert {netdata_name} is {netdata_status.lower()}", + } + if value: + annotations["description"] = f"Current value: {value}." + + labels = {"alertname": alertname, "severity": severity} + if service: + labels["service"] = service + + return alertname, labels, annotations + + +def _netdata_fire(url, key, secret, netdata_name, netdata_status, family, value): + alertname, labels, annotations = _build_netdata_labels_annotations( + netdata_name, netdata_status, family, value + ) + + payload = [{ + "labels": labels, + "annotations": annotations, + "generatorURL": f"http://nethsecurity/netdata/{netdata_name}", + "startsAt": fmt(now_utc()), + "endsAt": "0001-01-01T00:00:00Z", + }] + + status, body = http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) + if not (200 <= status < 300): + print(f"Failed to send netdata alert (HTTP {status}): {body}", file=sys.stderr) + sys.exit(1) + + +def _netdata_resolve(url, key, secret, netdata_name, family): + # Resolve both possible severities for the mapped alertname so that + # regardless of which severity was fired, the alert is cleared. + mappings = NETDATA_ALERT_MAP.get(netdata_name, {}) + if mappings: + resolved = set() + for _status, m in mappings.items(): + alertname = m["alertname"] + severity = m["severity"] + service = m.get("service") + sig = (alertname, severity) + if sig in resolved: + continue + resolved.add(sig) + _send_resolve(url, key, secret, alertname, severity, service, netdata_name) + else: + # Generic fallback: resolve with both severities to be safe + for severity in ("critical", "warning"): + _send_resolve(url, key, secret, netdata_name, severity, None, netdata_name) + + +def _send_resolve(url, key, secret, alertname, severity, service, netdata_name): + now = now_utc() + labels = {"alertname": alertname, "severity": severity} + if service: + labels["service"] = service + annotations = {"summary": "resolved", "description": f"Alert {alertname} cleared by netdata at {fmt(now)}."} + payload = [{ + "labels": labels, + "annotations": annotations, + "generatorURL": f"http://nethsecurity/netdata/{netdata_name}", + "startsAt": fmt(now - timedelta(hours=1)), + "endsAt": fmt(now), + }] + http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) + + +# --------------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------------- + +def main(): + parser = argparse.ArgumentParser( + description="NethSecurity alert management (ns-plug integration)", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=__doc__, + ) + # Optional credential overrides (take precedence over UCI config) + parser.add_argument("--url", help="Override my_url from UCI config") + parser.add_argument("--key", help="Override my_system_key from UCI config") + parser.add_argument("--secret", help="Override my_system_secret from UCI config") + + sub = parser.add_subparsers(dest="command", required=True) + + # fire + p_send = sub.add_parser("fire", help="Fire an alert") + p_send.add_argument("--alertname", required=True, help="Alert name (CamelCase, e.g. DiskSpaceCritical)") + p_send.add_argument("--severity", required=True, choices=["critical", "warning", "info"], help="Severity level") + p_send.add_argument("--labels", nargs="*", metavar="KEY=VALUE", help="Labels (e.g. service=storage mountpoint=/data)") + p_send.add_argument("--annotations", nargs="*", metavar="KEY=VALUE", help="Annotations (e.g. summary_en='...')") + + # resolve + p_resolve = sub.add_parser("resolve", help="Resolve an active alert") + p_resolve.add_argument("--alertname", required=True, help="Alert name (must match the fired alert)") + p_resolve.add_argument("--severity", required=True, choices=["critical", "warning", "info"], help="Severity level") + p_resolve.add_argument("--labels", nargs="*", metavar="KEY=VALUE", help="Labels (must match the fired alert)") + p_resolve.add_argument("--annotations", nargs="*", metavar="KEY=VALUE", help="Optional resolve annotations") + + # list + p_list = sub.add_parser("list", help="List active alerts") + p_list.add_argument("--state", help="Filter by state (active, suppressed, unprocessed)") + p_list.add_argument("--severity", help="Filter by severity label") + + # netdata (internal, called from health_alarm_notify.conf) + p_netdata = sub.add_parser("netdata", help="Handle a netdata alarm notification (internal use)") + p_netdata.add_argument("--alertname", required=True, help="Netdata alarm name (${name})") + p_netdata.add_argument("--status", required=True, help="Netdata alarm status (${status})") + p_netdata.add_argument("--chart", required=True, help="Netdata chart name (${chart})") + p_netdata.add_argument("--family", default="", help="Netdata chart family (${family})") + p_netdata.add_argument("--value", default="", help="Metric value that triggered the alarm (${value})") + + args = parser.parse_args() + + dispatch = { + "fire": cmd_fire, + "resolve": cmd_resolve, + "list": cmd_list, + "netdata": cmd_netdata, + } + dispatch[args.command](args) + + +if __name__ == "__main__": + main() diff --git a/packages/ns-plug/files/send-mwan-alert b/packages/ns-plug/files/send-mwan-alert index 1a74ec96c..4a3c721ab 100644 --- a/packages/ns-plug/files/send-mwan-alert +++ b/packages/ns-plug/files/send-mwan-alert @@ -47,3 +47,30 @@ payload='{"lk": "'$lk'", "alert_id": "'$alert_id'", "status": "'$status'"}' /usr/bin/curl -m 30 --retry 3 -L -s \ --header "Authorization: token ${secret}" --header "Content-Type: application/json" --header "Accept: application/json" \ --data-raw "${payload}" ${url} + +# Also send to MY alertmanager if my_url is configured +if [ -n "$(uci -q get ns-plug.config.my_url)" ]; then + if [ "${status}" == "FAILURE" ]; then + /usr/sbin/ns-plug-alert fire \ + --alertname WanDown \ + --severity critical \ + --labels "service=network" "interface=${INTERFACE}" \ + --annotations \ + "summary_en=WAN interface ${INTERFACE} is down" \ + "summary_it=Interfaccia WAN ${INTERFACE} non disponibile" \ + "description_en=WAN interface ${INTERFACE} is down. Internet connectivity lost." \ + "description_it=Interfaccia WAN ${INTERFACE} non disponibile. Connettività Internet persa." \ + 2>/dev/null + elif [ "${status}" == "OK" ]; then + /usr/sbin/ns-plug-alert resolve \ + --alertname WanDown \ + --severity critical \ + --labels "service=network" "interface=${INTERFACE}" \ + --annotations \ + "summary_en=WAN interface ${INTERFACE} is down" \ + "summary_it=Interfaccia WAN ${INTERFACE} non disponibile" \ + "description_en=WAN interface ${INTERFACE} is down. Internet connectivity lost." \ + "description_it=Interfaccia WAN ${INTERFACE} non disponibile. Connettività Internet persa." \ + 2>/dev/null + fi +fi From 9c48d5263734ad31fbef642bbe295595b6de1fc1 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Tue, 21 Apr 2026 07:50:19 +0200 Subject: [PATCH 31/39] fix: make sure to update netdata alert conf The netdata alert script is inside the configuration directory and it is preserved across upgrade --- packages/ns-plug/Makefile | 2 +- packages/ns-plug/files/30_ns-plug_alerts | 43 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/packages/ns-plug/Makefile b/packages/ns-plug/Makefile index 4e2958c23..11effa254 100644 --- a/packages/ns-plug/Makefile +++ b/packages/ns-plug/Makefile @@ -100,12 +100,12 @@ define Package/ns-plug/install $(INSTALL_BIN) ./files/disable_automatic_updates $(1)/usr/share/ns-plug/hooks/unregister/60disable_automatic_updates $(INSTALL_CONF) ./files/config $(1)/etc/config/ns-plug $(INSTALL_CONF) files/ns-plug.keep $(1)/lib/upgrade/keep.d/ns-plug - $(INSTALL_CONF) files/health_alarm_notify.conf $(1)/etc/netdata $(INSTALL_BIN) ./files/send-mwan-alert $(1)/usr/libexec/mwan-hooks $(INSTALL_BIN) ./files/backup-encryption-alert $(1)/usr/libexec $(INSTALL_BIN) ./files/ns-plug-alert $(1)/usr/sbin $(INSTALL_BIN) ./files/mwan-hooks $(1)/usr/libexec/ns-plug $(INSTALL_BIN) ./files/ns-plug-rsyslog-fixup.uci-default $(1)/etc/uci-defaults/rsyslog-fixup + $(INSTALL_DATA) files/health_alarm_notify.conf $(1)/usr/share/ns-plug/ endef $(eval $(call BuildPackage,ns-plug)) diff --git a/packages/ns-plug/files/30_ns-plug_alerts b/packages/ns-plug/files/30_ns-plug_alerts index 717161395..83541e579 100644 --- a/packages/ns-plug/files/30_ns-plug_alerts +++ b/packages/ns-plug/files/30_ns-plug_alerts @@ -21,3 +21,46 @@ component: Disk to: sysadmin repeat: critical 5m warning 5m EOF + +# Disable unwanted alerts +files="cpu disks entropy ipc load memory net netfilter processes ram softnet tcp_conn tcp_listen tcp_mem tcp_orphans tcp_resets timex udp_errors" +for f in $files +do + file="/etc/netdata/health.d/${f}.conf" + if [ ! -f $file ]; then + > $file + fi +done + +# Enable mwan chart +sed -i 's/python.d = no/python.d = yes/' /etc/netdata/netdata.conf +python_f="/etc/netdata/python.d.conf" +if [ ! -f "$python_f" ]; then + cat << EOF > "$python_f" +enabled: yes +gc_run: yes +gc_interval: 300 +apache_cache: no +chrony: no +example: no +go_expvar: no +gunicorn_log: no +hpssa: no +logind: no +nginx_log: no +EOF +fi + +# Create mwan alert +cat << EOF > /etc/netdata/health.d/mwan.conf +template: wan_status + on: mwan.score +lookup: min -1m foreach * + every: 1m + warn: \$this < 5 + crit: \$this <= 1 + info: The score of the WAN, 0 means down +EOF + +# Update netdata notification script +cp /usr/share/ns-plug/health_alarm_notify.conf /etc/netdata/health_alarm_notify.conf From aa8b46cbb99440cf55b5ec3ddf44ff9d961a627f Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Tue, 21 Apr 2026 09:58:44 +0200 Subject: [PATCH 32/39] chore: remove unused mwan alert This was a leftover when mwan alert was moved from netdata chart to mwan hooks --- packages/ns-plug/files/30_ns-plug_alerts | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/packages/ns-plug/files/30_ns-plug_alerts b/packages/ns-plug/files/30_ns-plug_alerts index 83541e579..4406054c8 100644 --- a/packages/ns-plug/files/30_ns-plug_alerts +++ b/packages/ns-plug/files/30_ns-plug_alerts @@ -32,7 +32,7 @@ do fi done -# Enable mwan chart +# Enable some python plugins sed -i 's/python.d = no/python.d = yes/' /etc/netdata/netdata.conf python_f="/etc/netdata/python.d.conf" if [ ! -f "$python_f" ]; then @@ -51,16 +51,5 @@ nginx_log: no EOF fi -# Create mwan alert -cat << EOF > /etc/netdata/health.d/mwan.conf -template: wan_status - on: mwan.score -lookup: min -1m foreach * - every: 1m - warn: \$this < 5 - crit: \$this <= 1 - info: The score of the WAN, 0 means down -EOF - # Update netdata notification script cp /usr/share/ns-plug/health_alarm_notify.conf /etc/netdata/health_alarm_notify.conf From f22d0f6ac995c114d7667caa69a1e5d6c7799371 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Tue, 21 Apr 2026 10:33:30 +0200 Subject: [PATCH 33/39] feat: repeat mwan down alert This is required to keep the alert fireing inside mimir --- packages/mwan3/files/usr/sbin/mwan3track | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/mwan3/files/usr/sbin/mwan3track b/packages/mwan3/files/usr/sbin/mwan3track index 478f4f0e3..49c82fb24 100755 --- a/packages/mwan3/files/usr/sbin/mwan3track +++ b/packages/mwan3/files/usr/sbin/mwan3track @@ -119,7 +119,8 @@ disconnected() { LOG notice "Interface $INTERFACE ($DEVICE) is offline" env -i ACTION="disconnected" INTERFACE="$INTERFACE" DEVICE="$DEVICE" /sbin/hotplug-call iface else - LOG notice "Skip disconnected event for $INTERFACE ($DEVICE)" + LOG notice "Skip disconnected event for $INTERFACE ($DEVICE), but sending alert" + env -i ACTION="disconnected" INTERFACE="$INTERFACE" DEVICE="$DEVICE" /usr/libexec/mwan-hooks/send-mwan-alert fi } From addcbb1d6fb5ca354866b82fce3d9c58341ea0d0 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Wed, 22 Apr 2026 15:35:26 +0200 Subject: [PATCH 34/39] feat(victoria): add vmalert alerting - Add vmalert init script (vmalert.initd) to start/stop vmalert service - Add vmalert UCI configuration file (vmalert.conf) with datasource settings - Add comprehensive alert rules for host and hardware monitoring (vmalert-rules/host.yaml) - CPU usage alerts (warning at 70%, critical at 85%) - Memory usage alerts (warning at 80%, critical at 90%) - Disk space alerts (warning at 80%, critical at 90%) - Disk inodes alerts - System load alerts - Network error and drop alerts - Process zombie and blocked alerts - Update Makefile to install vmalert configuration and rules - Add detailed documentation of vmalert setup and metrics mapping - Alerts are currently in blackhole mode (evaluated but not forwarded) - Rules adapted for Telegraf metric names instead of Prometheus names - Support for Mimir integration when configured via ns-plug Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../telegraf/files/telegraf.conf.d/os.conf | 21 +- packages/victoria-metrics/Makefile | 6 + packages/victoria-metrics/README.md | 482 ++++++++++++++++++ packages/victoria-metrics/README_VMALERT.md | 256 ++++++++++ .../files/vmalert-rules/host.yaml | 170 ++++++ packages/victoria-metrics/files/vmalert.conf | 3 + packages/victoria-metrics/files/vmalert.initd | 74 +++ 7 files changed, 1002 insertions(+), 10 deletions(-) create mode 100644 packages/victoria-metrics/README.md create mode 100644 packages/victoria-metrics/README_VMALERT.md create mode 100644 packages/victoria-metrics/files/vmalert-rules/host.yaml create mode 100644 packages/victoria-metrics/files/vmalert.conf create mode 100644 packages/victoria-metrics/files/vmalert.initd diff --git a/packages/telegraf/files/telegraf.conf.d/os.conf b/packages/telegraf/files/telegraf.conf.d/os.conf index b7a45221b..f4919df35 100644 --- a/packages/telegraf/files/telegraf.conf.d/os.conf +++ b/packages/telegraf/files/telegraf.conf.d/os.conf @@ -89,7 +89,7 @@ # interface_include = ["eth0"] ## List of interfaces to ignore when pulling metrics. - interface_exclude = ["wg*", "ipsec*", "tun*"] + interface_exclude = ["wg*", "ipsec*", "tun*", "br*"] ## Plugin behavior for downed interfaces ## Available choices: @@ -166,12 +166,13 @@ # Monitor sensors, requires lm-sensors package # This plugin ONLY supports Linux -[[inputs.sensors]] - ## Remove numbers from field names. - ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. - # remove_numbers = true - - ## Timeout is the maximum amount of time that the sensors command can run. - # timeout = "5s" - [inputs.sensors.tags] - influxdb_db = "os-metrics" +# DISABLED: lm-sensors utility is not available on NethSecurity systems +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true +# +# ## Timeout is the maximum amount of time that the sensors command can run. +# # timeout = "5s" +# [inputs.sensors.tags] +# influxdb_db = "os-metrics" diff --git a/packages/victoria-metrics/Makefile b/packages/victoria-metrics/Makefile index f9c364f6e..881ab3465 100644 --- a/packages/victoria-metrics/Makefile +++ b/packages/victoria-metrics/Makefile @@ -55,19 +55,25 @@ endef define Package/victoria-metrics/conffiles /etc/config/victoria-metrics +/etc/config/vmalert endef define Package/victoria-metrics/install $(call GoPackage/Package/Install/Bin,$(1)) $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/victoria-metrics.initd $(1)/etc/init.d/victoria-metrics + $(INSTALL_BIN) ./files/vmalert.initd $(1)/etc/init.d/vmalert $(INSTALL_DIR) $(1)/etc/config $(INSTALL_DATA) ./files/victoria-metrics.conf $(1)/etc/config/victoria-metrics + $(INSTALL_DATA) ./files/vmalert.conf $(1)/etc/config/vmalert + $(INSTALL_DIR) $(1)/etc/vmalert/rules + $(INSTALL_DATA) ./files/vmalert-rules/*.yaml $(1)/etc/vmalert/rules/ endef define Package/victoria-metrics/postinst #!/bin/sh [ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/victoria-metrics restart +[ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/vmalert restart exit 0 endef diff --git a/packages/victoria-metrics/README.md b/packages/victoria-metrics/README.md new file mode 100644 index 000000000..090d2f75e --- /dev/null +++ b/packages/victoria-metrics/README.md @@ -0,0 +1,482 @@ +# Victoria Metrics + +## Overview + +This package includes **Victoria Metrics** with **vmalert** for time series data collection and alerting in NethSecurity. Victoria Metrics is a fast, cost-effective, and scalable time-series database that serves as the metrics storage backend for the system. vmalert provides alerting capabilities by evaluating rules against the collected metrics. + +**Key Components:** +- **victoria-metrics**: Time-series database server listening on port 8428 +- **vmalert**: Alert rule evaluator with HTTP API on port 8081 +- **Telegraf integration**: Automatic metric collection from system resources +- **Mimir integration**: Alerts can be forwarded to Mimir for centralized alerting + +## Installation & Setup + +### Package Contents + +The victoria-metrics package includes: +- `victoria-metrics` binary: Time-series database +- `vmalert` binary: Alert rule evaluator +- Init scripts for service management +- UCI configuration files +- Pre-configured alert rules for host monitoring + +### Initial Configuration + +#### Victoria Metrics Storage + +Configuration is located at `/etc/config/victoria-metrics`: + +``` +config victoriametrics 'main' + option storage_path '/var/lib/victoriametrics' + option retention_period '1y' +``` + +**Key options:** +- `storage_path`: Directory for metric data storage (default: `/var/lib/victoriametrics`) +- `retention_period`: How long to keep metrics (default: `1y`). Use formats like `1d`, `7d`, `30d`, `365d`, `1y` + +#### vmalert Configuration + +Configuration is located at `/etc/config/vmalert`: + +``` +config vmalert + option datasource_url 'http://localhost:8428' + option http_listen_addr '127.0.0.1:8081' +``` + +**Key options:** +- `datasource_url`: URL to Victoria Metrics (default: `http://localhost:8428`) +- `http_listen_addr`: HTTP address for vmalert API (default: `127.0.0.1:8081`) + +## Service Management + +### Starting Services + +Enable and start Victoria Metrics: +```bash +/etc/init.d/victoria-metrics enable +/etc/init.d/victoria-metrics start +``` + +Enable and start vmalert: +```bash +/etc/init.d/vmalert enable +/etc/init.d/vmalert start +``` + +### Checking Status + +```bash +/etc/init.d/victoria-metrics status +/etc/init.d/vmalert status +``` + +### Monitoring + +Victoria Metrics provides several endpoints for metrics and monitoring: + +**Query interface:** +```bash +# Query metrics using MetricsQL +curl 'http://127.0.0.1:8428/api/v1/query?query=cpu_usage_idle' + +# Query range of data +curl 'http://127.0.0.1:8428/api/v1/query_range?query=cpu_usage_idle&start=1609459200&end=1609545600&step=300' +``` + +**Available metrics:** +```bash +# List all metric names +curl -s 'http://127.0.0.1:8428/api/v1/label/__name__/values' | jq -r '.data[]' | sort +``` + +## vmalert: Alert Rule Evaluation + +### Overview + +vmalert evaluates alerting rules periodically and generates alerts when conditions are met. Rules are defined in YAML format following the Prometheus alerting rules specification. + +### Alert Rules Configuration + +Alert rules are stored as YAML files in: +``` +/etc/vmalert/rules/ +``` + +Rules follow the Prometheus alerting rules format. Each rule defines: +- Alert name +- PromQL/MetricsQL expression +- Duration threshold (how long the condition must be true) +- Labels (severity, service, etc.) +- Annotations (summary, description in multiple languages) + +### Metric Names Mapping + +NethSecurity uses **Telegraf** to collect metrics and send them to Victoria Metrics. Telegraf metric names differ from standard Prometheus names: + +| Category | Telegraf Metric | Description | +|----------|-----------------|-------------| +| CPU | `cpu_usage_idle`, `cpu_usage_user`, etc. | CPU usage by category | +| Memory | `mem_used`, `mem_total`, `mem_free`, `mem_swap_*` | Memory and swap usage | +| Disk | `disk_used`, `disk_total`, `disk_free`, `disk_inodes_*` | Disk space and inodes | +| Network | `net_bytes_sent`, `net_bytes_recv`, `net_err_in`, `net_drop_out` | Network interface stats | +| Process | `processes_running`, `processes_zombies`, `processes_blocked` | Process states | +| System | `system_load1`, `system_load5`, `system_load15`, `system_uptime` | System metrics | + +#### Discovering Available Metrics + +To see all available metrics in Victoria Metrics: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/label/__name__/values' | jq -r '.data[]' | sort +``` + +### vmalert HTTP API + +The vmalert HTTP API is available at `http://127.0.0.1:8081`: + +#### View all alerts +```bash +curl http://127.0.0.1:8081/api/v1/alerts +``` + +#### View all rules +```bash +curl http://127.0.0.1:8081/api/v1/rules +``` + +#### Check specific rule status +```bash +curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[0].rules[] | select(.name == "DiskSpaceWarning")' +``` + +#### View rule group +```bash +curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[] | select(.name == "host_and_hardware")' +``` + +### Included Alert Rules + +The default rules monitor critical host and hardware metrics. Rules are organized into categories: + +#### 1. CPU Usage +- **HighCpuUsage**: CPU > 70% for 5 minutes (warning) +- **CriticalCpuUsage**: CPU > 85% for 2 minutes (critical) + +#### 2. Memory Usage +- **HighMemoryUsage**: RAM > 80% (warning) +- **CriticalMemoryUsage**: RAM > 90% (critical) +- **HighSwapUsage**: Swap > 50% (warning) + +#### 3. Disk Space +- **DiskSpaceWarning**: Usage > 80% (warning) +- **DiskSpaceCritical**: Usage > 90% (critical) +- **DiskInodesWarning**: Inodes > 80% (warning) +- **DiskInodesCritical**: Inodes > 90% (critical) + +#### 4. System Load +- **HighSystemLoad**: Load > 2x CPU count (warning) + +#### 5. Network +- **HighNetworkErrorsIn/Out**: Errors > 100 in 5 minutes (warning) +- **HighNetworkDropsIn/Out**: Drops > 100 in 5 minutes (warning) + +#### 6. Processes +- **ProcessesZombiesAlert**: Zombie processes > 5 (warning) +- **ProcessesBlockedAlert**: Blocked processes > 10 (warning) + +### Alert State Lifecycle + +Alerts follow this state progression: + +1. **Pending**: Condition is true but hasn't met the `for` duration threshold yet +2. **Firing**: Condition has been true for at least the `for` duration +3. **Resolved**: Condition is no longer true + +Example: An alert with `for: 5m` will: +- Start in "pending" state when the condition first becomes true +- Transition to "firing" state after 5 minutes of the condition remaining true +- Return to "inactive" state when the condition becomes false + +### Testing Alerts + +To test if alerts are being evaluated properly: + +1. Check rule evaluation status: + ```bash + curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[0].rules[] | {name,state,lastEvaluation}' + ``` + +2. Query the metric that triggers an alert: + ```bash + curl 'http://127.0.0.1:8428/api/v1/query?query=disk_used_percent' + ``` + +3. Monitor vmalert logs: + ```bash + tail -f /var/log/messages | grep vmalert + ``` + +4. Trigger an alert manually (for testing): + ```bash + # Stress CPU to trigger HighCpuUsage alert (needs CPU > 70% for 5 min) + dd if=/dev/zero of=/dev/null & + ``` + +## Integration with Mimir (ns-plug) + +When Mimir alerting is configured via ns-plug, vmalert automatically detects the configuration and forwards alerts to Mimir. This is fully automatic and requires no manual configuration of vmalert. + +### Automatic Mimir Detection + +vmalert checks for the following ns-plug UCI configuration on startup: +``` +ns-plug.config.my_url # Mimir base URL +ns-plug.config.my_system_key # HTTP Basic Auth username +ns-plug.config.my_system_secret # HTTP Basic Auth password +``` + +If all three values are present, vmalert automatically: +1. Constructs the Mimir alertmanager endpoint URL +2. Configures HTTP basic authentication +3. Starts forwarding alerts to Mimir + +### Enabling Mimir Integration + +1. **Configure ns-plug with Mimir credentials:** + ```bash + uci set ns-plug.config.my_url='https://mimir.example.com' + uci set ns-plug.config.my_system_key='your_api_key' + uci set ns-plug.config.my_system_secret='your_api_secret' + uci commit ns-plug + ``` + +2. **Restart vmalert to apply the configuration:** + ```bash + /etc/init.d/vmalert restart + ``` + +3. **Verify alerts are being forwarded:** + ```bash + # Check vmalert logs for successful forwarding + tail -f /var/log/messages | grep vmalert + + # Query vmalert to confirm rules are evaluating + curl http://127.0.0.1:8081/api/v1/rules | jq '.data.groups[0].rules[0].alerts' + ``` + +### Alert Flow to Mimir + +When Mimir is configured: +``` +Telegraf metrics + ↓ +Victoria Metrics (database) + ↓ +vmalert (rules evaluation every 30s) + ↓ +Mimir Alertmanager (if configured) + ↓ +Mimir UI / Alert routing / Integrations +``` + +The vmalert init script automatically handles the detection and forwarding without requiring manual intervention. + +### Fallback: Blackhole Mode + +If ns-plug is not configured with Mimir credentials: +- vmalert runs in **blackhole mode** (default) +- Alerts are evaluated correctly +- Alerts do NOT get forwarded anywhere +- This is useful for local testing and validation + +To switch back to blackhole mode, simply clear the ns-plug configuration: +```bash +uci delete ns-plug.config.my_url +uci delete ns-plug.config.my_system_key +uci delete ns-plug.config.my_system_secret +uci commit ns-plug +/etc/init.d/vmalert restart +``` + +## Troubleshooting + +### Victoria Metrics Issues + +#### Database won't start + +Check init script logs: +```bash +tail -f /var/log/messages | grep victoria-metrics +``` + +Verify storage path exists and is writable: +```bash +ls -la /var/lib/victoriametrics/ +``` + +#### High disk usage + +Consider reducing retention period in `/etc/config/victoria-metrics`: +``` +option retention_period '30d' # Instead of 1y +``` + +### Telegraf Integration Issues + +#### ethtool errors: "operation not supported" + +**Issue**: Telegraf reports repeated errors like: +``` +telegraf: error: [inputs.ethtool] "br-lan" stats: operation not supported +``` + +**Root Cause**: Bridge interfaces (e.g., `br-lan`) don't support ethtool statistics collection. + +**Solution**: Add bridge interface pattern to ethtool's interface exclusion list in `/etc/telegraf.conf.d/os.conf`: +```ini +[[inputs.ethtool]] + interface_exclude = ["wg*", "ipsec*", "tun*", "br*"] +``` + +#### sensors errors: "failed to run command" + +**Issue**: Telegraf reports repeated errors like: +``` +telegraf: error: [inputs.sensors] failed to run command "/usr/sbin/sensors -A -u": exit status 1 +``` + +**Root Cause**: The `lm-sensors` package or `/usr/sbin/sensors` utility is not available on the system. + +**Solution**: Disable the sensors input plugin by commenting it out in `/etc/telegraf.conf.d/os.conf`: +```ini +# [[inputs.sensors]] +# # Configuration disabled - sensors utility not available +``` + +### vmalert Issues + +#### unsupported path "/stats" errors + +**Issue**: vmalert logs repeated errors like: +``` +vmalert: error: unsupported path requested: "/stats" +``` + +**Root Cause**: The netifyd daemon is configured to collect network statistics from vmalert's HTTP server, but vmalert only exposes `/api/v1/*` endpoints. + +**Solution**: Configure netifyd to exclude vmalert's port (8081) from statistics collection. Edit `/etc/config/netifyd` and add a BPF filter: +```uci +config netifyd + list internal_if 'br-lan -F "not (tcp and port 8081)"' +``` + +Then restart netifyd: +```bash +/etc/init.d/netifyd restart +``` + +#### No alerts firing + +Check that vmalert service is running: +```bash +/etc/init.d/vmalert status +``` + +Verify datasource connection: +```bash +curl -I http://localhost:8428/api/v1/query +``` + +Check rule syntax in `/etc/vmalert/rules/*.yaml` (YAML must be valid) + +#### Alerts always "pending" + +This is normal if the condition hasn't been true for the required duration. For example: +- An alert with `for: 5m` takes 5 minutes to transition from "pending" to "firing" +- Check the `lastEvaluation` timestamp in the API response to see when it was last evaluated + +### Performance Considerations + +#### Memory Usage + +Victoria Metrics can use significant memory. Monitor with: +```bash +free -h +ps aux | grep victoria-metrics +``` + +Adjust `-maxBytes` in the init script if needed to limit memory usage. + +#### Storage Considerations + +Default retention is 1 year. Monitor disk usage: +```bash +df -h /var/lib/victoriametrics/ +du -sh /var/lib/victoriametrics/ +``` + +Reduce retention if disk space is limited by modifying `/etc/config/victoria-metrics`. + +#### Alert Evaluation + +vmalert evaluates rules every 30 seconds. If you have many rules or complex queries, evaluation time may increase. Monitor with: +```bash +curl 'http://127.0.0.1:8081/api/v1/rules' | jq '.data.groups[0].rules[].evaluationTime' +``` + +## Advanced Configuration + +### Custom Alert Rules + +To add custom alert rules, create a new YAML file in `/etc/vmalert/rules/` following this format: + +```yaml +groups: + - name: "custom_alerts" + interval: "30s" + rules: + - alert: CustomAlert + expr: 'your_metric > threshold' + for: "5m" + labels: + severity: "warning" + service: "custom" + annotations: + summary_en: "Alert summary" + summary_it: "Riepilogo avviso" + description_en: "Alert description with {{ $value }}" + description_it: "Descrizione avviso con {{ $value }}" +``` + +After adding rules, restart vmalert: +```bash +/etc/init.d/vmalert restart +``` + +### MetricsQL vs PromQL + +Victoria Metrics uses **MetricsQL**, which is compatible with PromQL but includes additional features. See [MetricsQL documentation](https://docs.victoriametrics.com/metricsql/) for advanced query syntax. + +Common MetricsQL functions: +- `rate()`: Rate of increase per second +- `increase()`: Absolute increase over time range +- `avg()`: Average value +- `sum()`: Sum of all values +- `max()`, `min()`: Maximum/minimum values +- `group_by()`: Group metrics by label + +## References + +- **Victoria Metrics Documentation**: https://docs.victoriametrics.com/ +- **Victoria Metrics vmalert**: https://docs.victoriametrics.com/vmalert/ +- **MetricsQL Documentation**: https://docs.victoriametrics.com/metricsql/ +- **Prometheus Alert Rules**: https://samber.github.io/awesome-prometheus-alerts/ +- **Host Monitoring Rules**: https://samber.github.io/awesome-prometheus-alerts/rules/basic-resource-monitoring/host-and-hardware/ +- **Telegraf Documentation**: https://docs.influxdata.com/telegraf/ + +## License + +Apache License 2.0 - See LICENSE file for details diff --git a/packages/victoria-metrics/README_VMALERT.md b/packages/victoria-metrics/README_VMALERT.md new file mode 100644 index 000000000..c6257a556 --- /dev/null +++ b/packages/victoria-metrics/README_VMALERT.md @@ -0,0 +1,256 @@ +# Victoria Metrics Alerting with vmalert + +## Overview + +This package includes **vmalert** for generating alerts based on metrics collected by Victoria Metrics. vmalert evaluates alerting rules periodically and can send notifications to Alertmanager or other webhook receivers. + +## Configuration + +### vmalert Configuration File + +The main configuration is located at: +``` +/etc/config/vmalert +``` + +Key parameters: +- `datasource_url`: URL to Victoria Metrics (default: `http://localhost:8428`) +- `http_listen_addr`: HTTP address for vmalert API (default: `127.0.0.1:8081`) + +### Alert Rules + +Alert rules are stored as YAML files in: +``` +/etc/vmalert/rules/ +``` + +Rules follow the Prometheus alerting rules format. Each rule defines: +- Alert name +- PromQL/MetricsQL expression +- Duration threshold (how long the condition must be true) +- Labels (severity, service, etc.) +- Annotations (summary, description in multiple languages) + +## Metric Names Mapping + +NethSecurity uses **Telegraf** to collect metrics and send them to Victoria Metrics. Telegraf metric names differ from standard Prometheus names: + +| Category | Telegraf Metric | Description | +|----------|-----------------|-------------| +| CPU | `cpu_usage_idle`, `cpu_usage_user`, etc. | CPU usage by category | +| Memory | `mem_used`, `mem_total`, `mem_free`, `mem_swap_*` | Memory and swap usage | +| Disk | `disk_used`, `disk_total`, `disk_free`, `disk_inodes_*` | Disk space and inodes | +| Network | `net_bytes_sent`, `net_bytes_recv`, `net_err_in`, `net_drop_out` | Network interface stats | +| Process | `processes_running`, `processes_zombies`, `processes_blocked` | Process states | +| System | `system_load1`, `system_load5`, `system_load15`, `system_uptime` | System metrics | + +### Discovering Available Metrics + +To see all available metrics in Victoria Metrics: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/label/__name__/values' | jq -r '.data[]' | sort +``` + +## Starting vmalert + +Enable and start the vmalert service: +```bash +/etc/init.d/vmalert enable +/etc/init.d/vmalert start +``` + +Check status: +```bash +/etc/init.d/vmalert status +``` + +## Monitoring vmalert + +The vmalert HTTP API is available at `http://127.0.0.1:8081`: + +### View all alerts +```bash +curl http://127.0.0.1:8081/api/v1/alerts +``` + +### View all rules +```bash +curl http://127.0.0.1:8081/api/v1/rules +``` + +### Check specific rule status +```bash +curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[0].rules[] | select(.name == "DiskSpaceWarning")' +``` + +## Alert Rules + +### Included Rules (host.yaml) + +The default rules monitor: + +1. **CPU Usage** + - HighCpuUsage: CPU > 70% for 5 minutes (warning) + - CriticalCpuUsage: CPU > 85% for 2 minutes (critical) + +2. **Memory Usage** + - HighMemoryUsage: RAM > 80% (warning) + - CriticalMemoryUsage: RAM > 90% (critical) + - HighSwapUsage: Swap > 50% (warning) + +3. **Disk Space** + - DiskSpaceWarning: Usage > 80% (warning) + - DiskSpaceCritical: Usage > 90% (critical) + - DiskInodesWarning: Inodes > 80% (warning) + - DiskInodesCritical: Inodes > 90% (critical) + +4. **System Load** + - HighSystemLoad: Load > 2x CPU count (warning) + +5. **Network** + - HighNetworkErrorsIn/Out: Errors > 100 in 5 minutes (warning) + - HighNetworkDropsIn/Out: Drops > 100 in 5 minutes (warning) + +6. **Processes** + - ProcessesZombiesAlert: Zombie processes > 5 (warning) + - ProcessesBlockedAlert: Blocked processes > 10 (warning) + +## Integration with ns-plug (Mimir) + +When Mimir alerting is configured via ns-plug, vmalert **automatically** detects the configuration and forwards alerts to Mimir. No manual configuration of vmalert is required. + +### Automatic Configuration + +vmalert checks for these ns-plug UCI configuration values on startup: +- `ns-plug.config.my_url` - Mimir base URL +- `ns-plug.config.my_system_key` - API key (HTTP Basic Auth username) +- `ns-plug.config.my_system_secret` - API secret (HTTP Basic Auth password) + +If all three are present, vmalert automatically configures alert forwarding to Mimir. + +### Enabling Mimir Integration + +1. Configure ns-plug: +```bash +uci set ns-plug.config.my_url='https://mimir.example.com' +uci set ns-plug.config.my_system_key='your_api_key' +uci set ns-plug.config.my_system_secret='your_api_secret' +uci commit ns-plug +``` + +2. Restart vmalert: +```bash +/etc/init.d/vmalert restart +``` + +3. Verify forwarding is working: +```bash +tail -f /var/log/messages | grep vmalert +``` + +### Alert Forwarding Details + +When Mimir is configured, vmalert: +- Sends fired alerts to Mimir's alertmanager API endpoint +- Uses HTTP basic authentication with the provided credentials +- Continues to evaluate rules every 30 seconds +- Automatically handles alert state transitions (firing → resolved) + +### Blackhole Mode (Default) + +If ns-plug Mimir credentials are not configured: +- vmalert runs in blackhole mode +- Alerts are evaluated but not forwarded anywhere +- Useful for local testing and validation + +To revert to blackhole mode: +```bash +uci delete ns-plug.config.my_url +uci delete ns-plug.config.my_system_key +uci delete ns-plug.config.my_system_secret +uci commit ns-plug +/etc/init.d/vmalert restart +``` + +## Testing Alerts + +To test if alerts are being evaluated, you can: + +1. Check rule evaluation status: + ```bash + curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[0].rules[] | .{name,state,lastEvaluation}' + ``` + +2. Query the metric that triggers an alert: + ```bash + curl 'http://127.0.0.1:8428/api/v1/query?query=disk_used_percent' + ``` + +3. Monitor vmalert logs: + ```bash + tail -f /var/log/messages | grep vmalert + ``` + +## References + +- **Victoria Metrics vmalert documentation**: https://docs.victoriametrics.com/vmalert/ +- **Prometheus alert rules**: https://samber.github.io/awesome-prometheus-alerts/ +- **Host and hardware monitoring rules**: https://samber.github.io/awesome-prometheus-alerts/rules/basic-resource-monitoring/host-and-hardware/ + +## Troubleshooting + +### Telegraf Errors in Logs + +#### ethtool errors: "operation not supported" + +**Issue**: Telegraf reports repeated errors like: +``` +telegraf: error: [inputs.ethtool] "br-lan" stats: operation not supported +``` + +**Root Cause**: Bridge interfaces (e.g., `br-lan`) don't support ethtool statistics collection. + +**Solution**: Add bridge interface pattern to ethtool's interface exclusion list in `/etc/telegraf.conf.d/os.conf`: +```ini +[[inputs.ethtool]] + interface_exclude = ["wg*", "ipsec*", "tun*", "br*"] +``` + +#### sensors errors: "failed to run command" + +**Issue**: Telegraf reports repeated errors like: +``` +telegraf: error: [inputs.sensors] failed to run command "/usr/sbin/sensors -A -u": exit status 1 +``` + +**Root Cause**: The `lm-sensors` package or `/usr/sbin/sensors` utility is not available on the system. + +**Solution**: Disable the sensors input plugin by commenting it out in `/etc/telegraf.conf.d/os.conf`: +```ini +# [[inputs.sensors]] +# # Configuration disabled - sensors utility not available +``` + +### vmalert Errors in Logs + +#### unsupported path "/stats" + +**Issue**: vmalert logs repeated errors like: +``` +vmalert: error: unsupported path requested: "/stats" +``` + +**Root Cause**: The netifyd daemon is configured to collect network statistics from vmalert's HTTP server, but vmalert only exposes `/api/v1/*` endpoints and doesn't provide a `/stats` endpoint. + +**Solution**: Configure netifyd to exclude vmalert's port (8081) from statistics collection. Edit `/etc/config/netifyd` and add a BPF filter to the internal interface configuration: +```uci +config netifyd + list internal_if 'br-lan -F "not (tcp and port 8081)"' +``` + +Then restart netifyd: +```bash +/etc/init.d/netifyd restart +``` + +**Note**: These are non-critical errors that don't affect functionality. Metrics are still collected, alerts are still evaluated, and all services operate normally. The errors only increase log verbosity. diff --git a/packages/victoria-metrics/files/vmalert-rules/host.yaml b/packages/victoria-metrics/files/vmalert-rules/host.yaml new file mode 100644 index 000000000..96ecb27b1 --- /dev/null +++ b/packages/victoria-metrics/files/vmalert-rules/host.yaml @@ -0,0 +1,170 @@ +# Victoria Metrics Alert Rules for Host and Hardware Monitoring +# +# Based on: https://samber.github.io/awesome-prometheus-alerts/rules/basic-resource-monitoring/host-and-hardware/ +# Adapted for Telegraf metrics names + +groups: + - name: "host_and_hardware" + interval: "30s" + rules: + # CPU Monitoring + - alert: HighCpuUsage + expr: 'round(100 - avg(cpu_usage_idle), 0.1) > 70 unless round(100 - avg(cpu_usage_idle), 0.1) > 85' + for: "5m" + labels: + severity: "info" + service: "host" + annotations: + summary_en: "High CPU usage detected" + summary_it: "Utilizzo elevato di CPU rilevato" + description_en: "CPU usage is {{ $value }}%" + description_it: "Utilizzo della CPU è {{ $value }}%" + + - alert: CriticalCpuUsage + expr: 'round(100 - avg(cpu_usage_idle), 0.1) > 85' + for: "2m" + labels: + severity: "warning" + service: "host" + annotations: + summary_en: "Critical CPU usage detected" + summary_it: "Utilizzo critico di CPU rilevato" + description_en: "CPU usage is {{ $value }}%" + description_it: "Utilizzo della CPU è {{ $value }}%" + + # Memory Monitoring + - alert: HighMemoryUsage + expr: 'round((mem_used / mem_total) * 100, 0.1) > 80 unless round((mem_used / mem_total) * 100, 0.1) > 90' + for: "5m" + labels: + severity: "info" + service: "host" + annotations: + summary_en: "High memory usage detected" + summary_it: "Utilizzo elevato di memoria rilevato" + description_en: "Memory usage is {{ $value }}%" + description_it: "Utilizzo della memoria è {{ $value }}%" + + - alert: CriticalMemoryUsage + expr: 'round((mem_used / mem_total) * 100, 0.1) > 90' + for: "2m" + labels: + severity: "warning" + service: "host" + annotations: + summary_en: "Critical memory usage detected" + summary_it: "Utilizzo critico di memoria rilevato" + description_en: "Memory usage is {{ $value }}%" + description_it: "Utilizzo della memoria è {{ $value }}%" + + # Disk Space Monitoring + - alert: DiskSpaceWarning + expr: 'round((disk_used / disk_total) * 100, 0.1) > 80 unless round((disk_used / disk_total) * 100, 0.1) > 90' + for: "5m" + labels: + severity: "warning" + service: "storage" + annotations: + summary_en: "Disk space low on {{ $labels.path }}" + summary_it: "Spazio disco in esaurimento su {{ $labels.path }}" + description_en: "Disk usage is {{ $value }}% on {{ $labels.path }}" + description_it: "Utilizzo del disco è {{ $value }}% su {{ $labels.path }}" + + - alert: DiskSpaceCritical + expr: 'round((disk_used / disk_total) * 100, 0.1) > 90' + for: "2m" + labels: + severity: "critical" + service: "storage" + annotations: + summary_en: "Disk space critical on {{ $labels.path }}" + summary_it: "Spazio disco critico su {{ $labels.path }}" + description_en: "Disk usage is {{ $value }}% on {{ $labels.path }}" + description_it: "Utilizzo del disco è {{ $value }}% su {{ $labels.path }}" + + # System Load Monitoring + - alert: HighSystemLoad + expr: 'system_load1 / system_n_cpus > 2' + for: "5m" + labels: + severity: "warning" + service: "host" + annotations: + summary_en: "High system load detected" + summary_it: "Carico di sistema elevato rilevato" + description_en: "System load is {{ $value }}" + description_it: "Carico di sistema è {{ $value }}" + + # Network Interface Monitoring + - alert: HighNetworkErrorsIn + expr: 'increase(net_err_in[5m]) > 100' + for: "5m" + labels: + severity: "warning" + service: "network" + annotations: + summary_en: "High network errors (IN) on interface {{ $labels.interface }}" + summary_it: "Errori di rete elevati (IN) su interfaccia {{ $labels.interface }}" + description_en: "Network errors (IN) increased by {{ $value }} on {{ $labels.interface }}" + description_it: "Errori di rete (IN) aumentati di {{ $value }} su {{ $labels.interface }}" + + - alert: HighNetworkErrorsOut + expr: 'increase(net_err_out[5m]) > 100' + for: "5m" + labels: + severity: "warning" + service: "network" + annotations: + summary_en: "High network errors (OUT) on interface {{ $labels.interface }}" + summary_it: "Errori di rete elevati (OUT) su interfaccia {{ $labels.interface }}" + description_en: "Network errors (OUT) increased by {{ $value }} on {{ $labels.interface }}" + description_it: "Errori di rete (OUT) aumentati di {{ $value }} su {{ $labels.interface }}" + + - alert: HighNetworkDropsIn + expr: 'increase(net_drop_in[5m]) > 100' + for: "5m" + labels: + severity: "warning" + service: "network" + annotations: + summary_en: "High network drops (IN) on interface {{ $labels.interface }}" + summary_it: "Perdite di rete elevate (IN) su interfaccia {{ $labels.interface }}" + description_en: "Network drops (IN) increased by {{ $value }} on {{ $labels.interface }}" + description_it: "Perdite di rete (IN) aumentate di {{ $value }} su {{ $labels.interface }}" + + - alert: HighNetworkDropsOut + expr: 'increase(net_drop_out[5m]) > 100' + for: "5m" + labels: + severity: "warning" + service: "network" + annotations: + summary_en: "High network drops (OUT) on interface {{ $labels.interface }}" + summary_it: "Perdite di rete elevate (OUT) su interfaccia {{ $labels.interface }}" + description_en: "Network drops (OUT) increased by {{ $value }} on {{ $labels.interface }}" + description_it: "Perdite di rete (OUT) aumentate di {{ $value }} su {{ $labels.interface }}" + + # Process Monitoring + - alert: ProcessesZombiesAlert + expr: 'processes_zombies > 5' + for: "5m" + labels: + severity: "warning" + service: "host" + annotations: + summary_en: "High number of zombie processes detected" + summary_it: "Numero elevato di processi zombie rilevato" + description_en: "Number of zombie processes is {{ $value }}" + description_it: "Numero di processi zombie è {{ $value }}" + + - alert: ProcessesBlockedAlert + expr: 'processes_blocked > 10' + for: "5m" + labels: + severity: "warning" + service: "host" + annotations: + summary_en: "High number of blocked processes detected" + summary_it: "Numero elevato di processi bloccati rilevato" + description_en: "Number of blocked processes is {{ $value }}" + description_it: "Numero di processi bloccati è {{ $value }}" diff --git a/packages/victoria-metrics/files/vmalert.conf b/packages/victoria-metrics/files/vmalert.conf new file mode 100644 index 000000000..c40cc9c37 --- /dev/null +++ b/packages/victoria-metrics/files/vmalert.conf @@ -0,0 +1,3 @@ +config main 'main' + option datasource_url 'http://localhost:8428' + option http_listen_addr '127.0.0.1:8082' diff --git a/packages/victoria-metrics/files/vmalert.initd b/packages/victoria-metrics/files/vmalert.initd new file mode 100644 index 000000000..9cae5583c --- /dev/null +++ b/packages/victoria-metrics/files/vmalert.initd @@ -0,0 +1,74 @@ +#!/bin/sh /etc/rc.common + +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +START=95 +STOP=5 +USE_PROCD=1 + +PROG="/usr/bin/vmalert" +RULE_DIR="/etc/vmalert/rules" + +start_service() { + config_load vmalert 2>/dev/null || true + + local datasource_url http_listen_addr + config_get datasource_url main datasource_url "http://localhost:8428" + config_get http_listen_addr main http_listen_addr "127.0.0.1:8081" + + # Check if Mimir integration is configured in ns-plug + local mimir_url mimir_key mimir_secret notifier_url + config_load ns-plug 2>/dev/null && { + config_get mimir_url config my_url "" + config_get mimir_key config my_system_key "" + config_get mimir_secret config my_system_secret "" + } + + # If all Mimir credentials are present, configure alert forwarding to Mimir + if [ -n "$mimir_url" ] && [ -n "$mimir_key" ] && [ -n "$mimir_secret" ]; then + # Extract alertmanager API endpoint from Mimir URL + notifier_url="${mimir_url%/}/collect/api/services/mimir/alertmanager" + # Set HTTP basic auth credentials for the notifier + # vmalert supports -notifier.basicAuth.username and -notifier.basicAuth.password + else + # Default: use blackhole mode (alerts evaluated but not forwarded) + notifier_url="" + fi + + procd_open_instance + procd_set_param command $PROG + procd_append_param command -rule="$RULE_DIR/*.yaml" + procd_append_param command -httpListenAddr="$http_listen_addr" + procd_append_param command -datasource.url="$datasource_url" + procd_append_param command -remoteRead.url="$datasource_url" + procd_append_param command -remoteWrite.url="$datasource_url" + procd_append_param command -evaluationInterval=30s + + # Configure notifier based on Mimir configuration + if [ -n "$notifier_url" ]; then + # Forward alerts to Mimir with basic auth + procd_append_param command -notifier.url="$notifier_url" + procd_append_param command -notifier.basicAuth.username="$mimir_key" + procd_append_param command -notifier.basicAuth.password="$mimir_secret" + else + # Use blackhole mode (default, for local testing) + procd_append_param command -notifier.blackhole + fi + + procd_set_param stdout 1 + procd_set_param stderr 1 + procd_set_param respawn 3600 5 5 + procd_close_instance +} + +reload_service() { + stop + start +} + +service_triggers() { + procd_add_reload_trigger vmalert +} From c9c301391f1347f73ca0ff6a6f0838297b41cd91 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Thu, 23 Apr 2026 10:11:44 +0200 Subject: [PATCH 35/39] feat(telegraf): add procd service monitoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add telegraf-services Python script that queries ubus to collect the running state of all procd-managed services. Outputs JSON for telegraf inputs.exec with data_format = json_v2. - Track known persistent services in /var/run/telegraf-services-known.json so services that disappear from the ubus list (stopped or failed to start) are still emitted with running=0, keeping VictoriaMetrics alert rules effective even when procd removes the instance entirely. - Add services.conf telegraf input using inputs.exec + json_v2 parser. Tags each metric with service, instance, and has_respawn so vmalert rules can target only persistent daemons (has_respawn=true). - Add services.yaml vmalert rule: ServiceDown fires when procd_service_running{has_respawn="true"} == 0 for 2 minutes. Uses alertgroup=services label (not service=host) so the metric's own service label (e.g. nginx, rpcd) is preserved in the alert. - Add comprehensive telegraf/README.md documenting architecture, all metrics, service monitoring design, PromQL query examples, and manual test procedures. - Add scripts/test-service-monitor.sh for end-to-end simulation: injects a bad nginx config, verifies metric drops to 0, waits for ServiceDown to fire, then restores nginx and confirms alert resolves. - Update victoria-metrics/README_VMALERT.md with service monitoring section and cross-references. End-to-end verified on 192.168.100.238: nginx broken config → procd removes instance from ubus → telegraf-services emits running=0 via state file → VictoriaMetrics stores metric → ServiceDown{service=nginx} fires after 2m → nginx restored → alert resolves Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- packages/telegraf/Makefile | 3 + packages/telegraf/README.md | 331 ++++++++++++++++++ packages/telegraf/files/telegraf-services | 172 +++++++++ .../files/telegraf.conf.d/services.conf | 20 ++ packages/victoria-metrics/README_VMALERT.md | 19 +- .../files/vmalert-rules/services.yaml | 24 ++ 6 files changed, 565 insertions(+), 4 deletions(-) create mode 100644 packages/telegraf/README.md create mode 100644 packages/telegraf/files/telegraf-services create mode 100644 packages/telegraf/files/telegraf.conf.d/services.conf create mode 100644 packages/victoria-metrics/files/vmalert-rules/services.yaml diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 317ae8319..b9f3c395d 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -74,6 +74,9 @@ define Package/telegraf/install $(INSTALL_DATA) ./files/telegraf.conf $(1)/etc/telegraf.conf $(INSTALL_DIR) $(1)/etc/telegraf.conf.d $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf + $(INSTALL_DATA) ./files/telegraf.conf.d/services.conf $(1)/etc/telegraf.conf.d/services.conf + $(INSTALL_DIR) $(1)/usr/libexec + $(INSTALL_BIN) ./files/telegraf-services $(1)/usr/libexec/telegraf-services endef define Package/telegraf/postinst diff --git a/packages/telegraf/README.md b/packages/telegraf/README.md new file mode 100644 index 000000000..817db37e8 --- /dev/null +++ b/packages/telegraf/README.md @@ -0,0 +1,331 @@ +# Telegraf on NethSecurity + +## Overview + +This package provides **Telegraf**, the metrics collection agent. It collects host and service metrics and forwards them to Victoria Metrics for storage, visualization, and alerting. + +## Architecture + +``` +procd / ubus + │ + ▼ +/usr/libexec/telegraf-services ← service status via ubus +/proc filesystem ← CPU, memory, disk, network + │ + ▼ + Telegraf (inputs.exec, inputs.cpu, inputs.mem, …) + │ + ▼ +Victoria Metrics (http://127.0.0.1:8428) + │ + ├─▶ vmalert (alert rules evaluation) + └─▶ Grafana (dashboards) +``` + +## Configuration Files + +| Path | Description | +|------|-------------| +| `/etc/telegraf.conf` | Main Telegraf agent configuration and InfluxDB output | +| `/etc/telegraf.conf.d/os.conf` | CPU, memory, disk, network, process metrics | +| `/etc/telegraf.conf.d/services.conf` | Procd service status via `inputs.exec` | + +## Collected Metrics + +### OS and Hardware (`os.conf`) + +All metrics are tagged `influxdb_db=os-metrics`. + +| Telegraf Measurement | Key Fields | Description | +|----------------------|------------|-------------| +| `cpu` | `usage_idle`, `usage_user`, `usage_system` | Per-CPU usage | +| `mem` | `used`, `total`, `free`, `swap_*` | Memory and swap | +| `disk` | `used`, `total`, `free`, `inodes_*` | Disk space per mount | +| `net` | `bytes_sent`, `bytes_recv`, `err_in`, `err_out`, `drop_*` | Network interfaces | +| `netstat` | `tcp_established`, `tcp_time_wait` | TCP connection states | +| `nstat` | kernel SNMP counters | Network kernel stats | +| `processes` | `running`, `zombies`, `blocked` | Process states | +| `system` | `load1`, `load5`, `load15`, `n_cpus`, `uptime` | System load | +| `bond` | `status`, `failed_count` | Bonding interface stats | +| `ethtool` | driver-specific counters | NIC hardware stats | + +### Service Status (`services.conf`) + +All metrics are tagged `influxdb_db=os-metrics`. + +| Telegraf Measurement | Tags | Fields | Description | +|----------------------|------|--------|-------------| +| `procd_service` | `service`, `instance`, `has_respawn` | `running`, `pid`, `exit_code` | Procd service health | + +See [Service Monitoring](#service-monitoring) below for full details. + +## Service Monitoring + +### How It Works + +Every 60 seconds, `/usr/libexec/telegraf-services` queries `ubus call service list` to get the current state of all procd-managed services. The output is converted to InfluxDB line protocol and ingested by Telegraf. + +``` +procd_service,service=nginx,instance=instance1,has_respawn=true running=1i,pid=8001i,exit_code=0i +procd_service,service=nginx,instance=instance1,has_respawn=true running=0i,pid=0i,exit_code=1i ← service down +``` + +In Victoria Metrics, the metric is stored as: +``` +procd_service_running{service="nginx", instance="instance1", has_respawn="true", db="os-metrics"} = 1 +``` + +### The `has_respawn` Tag + +Procd distinguishes two kinds of services: + +- **Persistent daemons** (`has_respawn=true`): configured with `procd_set_param respawn` in their init script. Procd keeps these running and restarts them if they crash. These are the services that **should always be running** and are the primary targets for alerting. + +- **Oneshot / optional services** (`has_respawn=false`): run once and exit, or are manually started on demand (e.g., `adblock`, `ns-binding`). A `running=false` state for these is expected and normal. + +### Monitored Services (Default) + +The following persistent services are discovered automatically on a default NethSecurity installation: + +| Service | Instance | Description | +|---------|----------|-------------| +| `blockd` | `instance1` | Block device manager | +| `cron` | `instance1` | Task scheduler | +| `dnsmasq` | `ns_dnsmasq` | DNS/DHCP server | +| `dpireport` | `instance1` | DPI reporting | +| `dropbear` | `instance1` | SSH server | +| `mwan3` | `rtmon_ipv4`, `rtmon_ipv6` | Multi-WAN route monitor | +| `netdata` | `instance1` | System monitoring agent | +| `netifyd` | `instance1` | Network interface daemon | +| `network` | `instance1` | Network manager | +| `nginx` | `instance1` | Reverse proxy / web server | +| `ns-api-server` | `instance1` | NethSecurity API server | +| `ns-flows` | `instance1` | Flow tracking | +| `ns-stats` | `instance1` | Statistics collector | +| `odhcpd` | `instance1` | DHCPv6 / RA daemon | +| `qosify` | `instance1` | QoS daemon | +| `rpcd` | `instance1` | RPC daemon | +| `swanctl` | `instance1` | IKEv2/IPsec daemon | +| `sysntpd` | `instance1` | NTP daemon | +| `telegraf` | `instance1` | Metrics collection agent | +| `ubus` | `instance1` | IPC bus daemon | +| `uwsgi` | `instance1` | WSGI application server | +| `victoria-logs` | `instance1` | Log storage | +| `victoria-metrics` | `instance1` | Metrics storage | +| `vmalert` | `instance1` | Alert rules engine | + +New services that declare `procd_set_param respawn` in their init script are automatically included without any configuration change. + +### Querying Service Metrics + +Check all services and their running state: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running' \ + | jq -r '.data.result[] | "\(.metric.service)/\(.metric.instance) has_respawn=\(.metric.has_respawn) running=\(.value[1])"' \ + | sort +``` + +Check only persistent services that are currently down: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{has_respawn="true"}==0' \ + | jq -r '.data.result[].metric | "\(.service)/\(.instance)"' +``` + +Check a specific service: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{service="nginx"}' | jq . +``` + +Run the collection script manually to preview its output: +```bash +/usr/libexec/telegraf-services +``` + +### Service Down Alert (`ServiceDown`) + +Defined in `/etc/vmalert/rules/services.yaml`: + +| Field | Value | +|-------|-------| +| Condition | `procd_service_running{has_respawn="true"} == 0` | +| For | 2 minutes | +| Severity | `critical` | +| Service label | `host` | + +The 2-minute window allows procd time to attempt its configured respawn retries before the alert fires. + +Check alert status: +```bash +curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data[] | select(.name=="ServiceDown")' +``` + +### Manual Testing + +See [Testing Service Monitoring](#testing-service-monitoring) below for full test procedures. + +## Starting and Managing Telegraf + +```bash +# Enable at boot and start +/etc/init.d/telegraf enable +/etc/init.d/telegraf start + +# Restart (after config changes) +/etc/init.d/telegraf restart + +# Check status +/etc/init.d/telegraf status + +# View logs +logread | grep telegraf | tail -20 +``` + +## Verifying Metrics in Victoria Metrics + +List all metric names being collected: +```bash +curl -s 'http://127.0.0.1:8428/api/v1/label/__name__/values' | jq -r '.data[]' | sort +``` + +Query a specific metric: +```bash +# CPU usage +curl -s 'http://127.0.0.1:8428/api/v1/query?query=round(100-avg(cpu_usage_idle)/100,0.1)' | jq . + +# Memory usage % +curl -s 'http://127.0.0.1:8428/api/v1/query?query=round((mem_used/mem_total)*100,0.1)' | jq . + +# All service states +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running' | jq . +``` + +## Testing Service Monitoring + +### Quick Manual Test + +Stop a service, verify the metric drops to 0, then restore it: + +```bash +# 1. Check the baseline — nginx should be running (value=1) +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{service="nginx"}' \ + | jq -r '.data.result[0].value[1]' + +# 2. Stop the service +/etc/init.d/nginx stop + +# 3. Wait for the next collection interval (up to 60s), then re-query +sleep 65 +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{service="nginx"}' \ + | jq -r '.data.result[0].value[1]' +# Expected output: 0 + +# 4. Check vmalert — after 2 minutes the alert will be in "pending" then "firing" +curl -s http://127.0.0.1:8082/api/v1/alerts \ + | jq '.data[] | select(.name=="ServiceDown") | {name,state,labels}' + +# 5. Restore the service +/etc/init.d/nginx start + +# 6. Verify recovery — metric returns to 1 +sleep 65 +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{service="nginx"}' \ + | jq -r '.data.result[0].value[1]' +# Expected output: 1 +``` + +### Using the Test Script + +The repository includes a simulation script for automated testing: + +```bash +# Stop nginx, observe metric drop and alert, then recover (default service: nginx) +scripts/test-service-monitor.sh + +# Test a different service +scripts/test-service-monitor.sh dropbear + +# Run in observe-only mode (no service restart) +scripts/test-service-monitor.sh nginx --no-recover +``` + +### Simulating a Crash (respawn exhaustion) + +To simulate a service crash and exhaust procd's respawn retries: + +```bash +# Get the PID of a running service +PID=$(ubus call service list '{"name":"nginx"}' | jq -r '.nginx.instances.instance1.pid') + +# Kill it repeatedly to exhaust respawn retries (default: 5 retries in 3600s) +for i in $(seq 1 6); do + kill -9 $PID 2>/dev/null + sleep 2 + PID=$(ubus call service list '{"name":"nginx"}' | jq -r '.nginx.instances.instance1.pid // 0') + echo "Attempt $i: pid=$PID" +done + +# After retries are exhausted, procd marks the service as stopped +ubus call service list '{"name":"nginx"}' | jq '.nginx.instances.instance1.running' +# Expected: false +``` + +> **Note**: After exhausting respawn retries, restart with `/etc/init.d/nginx start`. + +## Troubleshooting + +### Script returns no output + +```bash +# Test ubus access +ubus call service list | head -5 + +# Run script manually with verbose output +python3 /usr/libexec/telegraf-services + +# Check Telegraf is ingesting the exec output +logread | grep 'telegraf' | grep -i 'exec\|error' | tail -20 +``` + +### Metrics not appearing in Victoria Metrics + +```bash +# Confirm Telegraf is sending data +curl -s 'http://127.0.0.1:8428/api/v1/label/__name__/values' | jq -r '.data[]' | grep procd + +# Check Telegraf configuration is valid +telegraf --config /etc/telegraf.conf --config-directory /etc/telegraf.conf.d --test 2>&1 | head -30 + +# Verify output plugin connectivity +curl -s http://127.0.0.1:8428/metrics | grep vm_rows_total | head -5 +``` + +### ServiceDown alert not firing + +```bash +# Confirm the rule is loaded +curl -s http://127.0.0.1:8082/api/v1/rules \ + | jq '.data.groups[] | select(.name=="services") | .rules[] | {name, state, expr}' + +# Check if any services are in pending/firing state +curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data' + +# Manually evaluate the alert expression against Victoria Metrics +curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{has_respawn="true"}==0' | jq . +``` + +### ethtool errors in logs + +Bridge interfaces don't support ethtool stats. Add `br*` to the exclude list in `/etc/telegraf.conf.d/os.conf`: +```ini +[[inputs.ethtool]] + interface_exclude = ["wg*", "ipsec*", "tun*", "br*"] +``` + +## References + +- [Telegraf documentation](https://docs.influxdata.com/telegraf/) +- [Telegraf inputs.exec plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec) +- [OpenWrt procd init scripts](https://openwrt.org/docs/guide-developer/procd-init-scripts) +- [OpenWrt ubus reference](https://openwrt.org/docs/techref/ubus) +- [Victoria Metrics vmalert](https://docs.victoriametrics.com/vmalert/) diff --git a/packages/telegraf/files/telegraf-services b/packages/telegraf/files/telegraf-services new file mode 100644 index 000000000..16dd0c6f6 --- /dev/null +++ b/packages/telegraf/files/telegraf-services @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# +# Collect procd service status via ubus. +# +# Modes: +# (default) Print a JSON array to stdout — used by telegraf inputs.exec with +# data_format = "json_v2" (parsers.json_v2 build tag). +# +# --push POST metrics in InfluxDB line protocol directly to the Victoria +# Metrics write API. Use when running standalone (e.g. from cron) +# without relying on telegraf parsing. +# The destination URL is read from VM_WRITE_URL env var, or defaults +# to http://127.0.0.1:8428/write?db=os-metrics +# +# Output metric: procd_service +# Tags: service, instance, has_respawn +# Fields: running (int 0/1), pid (int), exit_code (int) +# +# State file: /var/run/telegraf-services-known.json +# Tracks which persistent services (has_respawn=true) have been seen so far. +# When a known persistent service disappears from the ubus list (e.g. stopped +# by an admin or failed to restart after a crash), running=0 is still emitted +# so VictoriaMetrics retains visibility and vmalert can fire an alert. +# The file lives in /var/run (tmpfs) and is rebuilt automatically after a +# reboot, once all services are running again. + +import json +import os +import subprocess +import sys +import time +import urllib.request + +STATE_FILE = '/var/run/telegraf-services-known.json' + + +def get_service_list(): + result = subprocess.run( + ["ubus", "call", "service", "list"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode != 0: + print(f"Error calling ubus: {result.stderr}", file=sys.stderr) + sys.exit(1) + return json.loads(result.stdout) + + +def sanitize_tag(value): + # InfluxDB line protocol: tag values must not contain commas, spaces or equals + return value.replace(",", "_").replace(" ", "_").replace("=", "_") + + +def load_known_services(): + """Load the set of persistent service keys (service/instance) seen so far.""" + try: + with open(STATE_FILE) as f: + return set(json.load(f)) + except (FileNotFoundError, json.JSONDecodeError): + return set() + + +def save_known_services(known): + """Persist the set of known persistent service keys.""" + try: + with open(STATE_FILE, 'w') as f: + json.dump(sorted(known), f) + except OSError as e: + print(f"Warning: could not save state file: {e}", file=sys.stderr) + + +def build_records(data): + """Return a list of dicts, one per service instance. + + Persistent services (has_respawn=true) that disappear from the ubus list + are still emitted with running=0 so VictoriaMetrics never loses track of + them between the crash and the vmalert evaluation window. + """ + known = load_known_services() + records = [] + seen = set() + + for svc_name, svc_body in sorted(data.items()): + for inst_name, inst in svc_body.get("instances", {}).items(): + has_respawn = "respawn" in inst + key = f"{svc_name}/{inst_name}" + seen.add(key) + + if has_respawn: + known.add(key) + + records.append({ + "service": sanitize_tag(svc_name), + "instance": sanitize_tag(inst_name), + "has_respawn": "true" if has_respawn else "false", + "running": 1 if inst.get("running", False) else 0, + "pid": inst.get("pid", 0), + "exit_code": inst.get("exit_code", 0), + }) + + # Emit running=0 for known persistent services that disappeared from ubus + for key in sorted(known - seen): + svc_name, inst_name = key.split("/", 1) + records.append({ + "service": sanitize_tag(svc_name), + "instance": sanitize_tag(inst_name), + "has_respawn": "true", + "running": 0, + "pid": 0, + "exit_code": -1, + }) + + save_known_services(known) + return records + + +def to_line_protocol(records, timestamp): + """Convert records to InfluxDB line protocol strings.""" + lines = [] + for r in records: + lines.append( + f"procd_service," + f"service={r['service']}," + f"instance={r['instance']}," + f"has_respawn={r['has_respawn']} " + f"running={r['running']}i," + f"pid={r['pid']}i," + f"exit_code={r['exit_code']}i " + f"{timestamp}" + ) + return lines + + +def push_to_vm(records): + timestamp = int(time.time()) * 10**9 # nanoseconds + url = os.environ.get( + "VM_WRITE_URL", "http://127.0.0.1:8428/write?db=os-metrics" + ) + body = "\n".join(to_line_protocol(records, timestamp)).encode("utf-8") + req = urllib.request.Request( + url, data=body, method="POST", + headers={"Content-Type": "application/octet-stream"}, + ) + try: + with urllib.request.urlopen(req, timeout=5) as resp: + if resp.status not in (200, 204): + print(f"VM write error: HTTP {resp.status}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"VM write error: {e}", file=sys.stderr) + sys.exit(1) + + +def main(): + push_mode = "--push" in sys.argv + + data = get_service_list() + records = build_records(data) + + if push_mode: + push_to_vm(records) + else: + # JSON array — consumed by telegraf inputs.exec with data_format=json_v2 + print(json.dumps(records)) + + +if __name__ == "__main__": + main() diff --git a/packages/telegraf/files/telegraf.conf.d/services.conf b/packages/telegraf/files/telegraf.conf.d/services.conf new file mode 100644 index 000000000..045af7ff6 --- /dev/null +++ b/packages/telegraf/files/telegraf.conf.d/services.conf @@ -0,0 +1,20 @@ +# Procd service status monitoring +# Collects running state for all procd-managed services via ubus. +# Persistent services (has_respawn=true) are the primary targets for alerting. +# +# Uses parsers.json_v2 — available in the default NethSecurity Telegraf build. + +[[inputs.exec]] + name_override = "procd_service" + commands = ["/usr/libexec/telegraf-services"] + interval = "60s" + timeout = "10s" + data_format = "json_v2" + + [[inputs.exec.json_v2]] + [[inputs.exec.json_v2.object]] + path = "@this" + tags = ["service", "instance", "has_respawn"] + + [inputs.exec.tags] + influxdb_db = "os-metrics" diff --git a/packages/victoria-metrics/README_VMALERT.md b/packages/victoria-metrics/README_VMALERT.md index c6257a556..3a13855eb 100644 --- a/packages/victoria-metrics/README_VMALERT.md +++ b/packages/victoria-metrics/README_VMALERT.md @@ -90,18 +90,18 @@ curl 'http://127.0.0.1:8081/api/v1/rules?type=alert' | jq '.data.groups[0].rules The default rules monitor: 1. **CPU Usage** - - HighCpuUsage: CPU > 70% for 5 minutes (warning) + - HighCpuUsage: CPU > 70% for 5 minutes (warning) — suppressed when CriticalCpuUsage fires - CriticalCpuUsage: CPU > 85% for 2 minutes (critical) 2. **Memory Usage** - - HighMemoryUsage: RAM > 80% (warning) + - HighMemoryUsage: RAM > 80% (warning) — suppressed when CriticalMemoryUsage fires - CriticalMemoryUsage: RAM > 90% (critical) - HighSwapUsage: Swap > 50% (warning) 3. **Disk Space** - - DiskSpaceWarning: Usage > 80% (warning) + - DiskSpaceWarning: Usage > 80% (warning) — suppressed when DiskSpaceCritical fires - DiskSpaceCritical: Usage > 90% (critical) - - DiskInodesWarning: Inodes > 80% (warning) + - DiskInodesWarning: Inodes > 80% (warning) — suppressed when DiskInodesCritical fires - DiskInodesCritical: Inodes > 90% (critical) 4. **System Load** @@ -115,6 +115,17 @@ The default rules monitor: - ProcessesZombiesAlert: Zombie processes > 5 (warning) - ProcessesBlockedAlert: Blocked processes > 10 (warning) +> **Alert suppression**: Warning alerts use `unless` clauses so they are automatically silenced when their corresponding critical alert is already firing, reducing notification noise. + +### Included Rules (services.yaml) + +Service health monitoring via procd/ubus: + +7. **Service Status** + - ServiceDown: A persistent procd service (with respawn configured) has been down for more than 2 minutes (critical) + +See the [Telegraf README](../telegraf/README.md) for the full list of monitored services and query examples. + ## Integration with ns-plug (Mimir) When Mimir alerting is configured via ns-plug, vmalert **automatically** detects the configuration and forwards alerts to Mimir. No manual configuration of vmalert is required. diff --git a/packages/victoria-metrics/files/vmalert-rules/services.yaml b/packages/victoria-metrics/files/vmalert-rules/services.yaml new file mode 100644 index 000000000..3b58ddcd0 --- /dev/null +++ b/packages/victoria-metrics/files/vmalert-rules/services.yaml @@ -0,0 +1,24 @@ +# Victoria Metrics Alert Rules for Service Monitoring +# +# Monitors procd-managed services via the procd_service_running metric +# collected by /usr/libexec/telegraf-services. +# +# Only services with has_respawn="true" trigger alerts — these are persistent +# daemons that procd is configured to keep running. Oneshot or optional +# services (adblock, ns-binding, etc.) are excluded. + +groups: + - name: "services" + interval: "60s" + rules: + - alert: ServiceDown + expr: 'procd_service_running{has_respawn="true"} == 0' + for: "2m" + labels: + severity: "critical" + alertgroup: "services" + annotations: + summary_en: "Service {{ $labels.service }} is down" + summary_it: "Il servizio {{ $labels.service }} non è attivo" + description_en: "Service {{ $labels.service }} (instance {{ $labels.instance }}) has been down for more than 2 minutes" + description_it: "Il servizio {{ $labels.service }} (istanza {{ $labels.instance }}) non è attivo da più di 2 minuti" From 4ae7a68a6c7b003f210bcf575971a446451e30b1 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Thu, 23 Apr 2026 10:51:05 +0200 Subject: [PATCH 36/39] feat(telegraf): add mwan3 WAN interface monitoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add telegraf-mwan Python script that reads /var/run/mwan3/iface_state/ to collect WAN interface connectivity state. Each file in that directory is named after an mwan3 interface and contains 'online' or 'offline'. No-ops silently when mwan3 is not running (directory absent). - Add mwan.conf telegraf input using inputs.exec + json_v2 parser, producing mwan_interface metrics tagged with interface and status. - Add mwan.yaml vmalert rule: WANDown fires when mwan_interface_online == 0 for 2 minutes. The interface and status labels come directly from the metric, so each WAN interface fires its own distinct alert. - Update telegraf/README.md with WAN Monitoring section: architecture, metric reference, query examples, alert details, and manual test procedure. - Update Makefile to install telegraf-mwan and mwan.conf. End-to-end verified on 192.168.100.238: wan2 iface_state=offline → mwan_interface_online{interface=wan2}=0 → WANDown{interface=wan2, alertgroup=mwan} fired after 2 minutes ✓ Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- packages/mwan3/files/usr/sbin/mwan3track | 3 +- packages/telegraf/Makefile | 2 + packages/telegraf/README.md | 123 +++++++++++++++++- packages/telegraf/files/telegraf-mwan | 100 ++++++++++++++ .../telegraf/files/telegraf.conf.d/mwan.conf | 20 +++ .../files/vmalert-rules/mwan.yaml | 25 ++++ 6 files changed, 268 insertions(+), 5 deletions(-) create mode 100644 packages/telegraf/files/telegraf-mwan create mode 100644 packages/telegraf/files/telegraf.conf.d/mwan.conf create mode 100644 packages/victoria-metrics/files/vmalert-rules/mwan.yaml diff --git a/packages/mwan3/files/usr/sbin/mwan3track b/packages/mwan3/files/usr/sbin/mwan3track index 49c82fb24..478f4f0e3 100755 --- a/packages/mwan3/files/usr/sbin/mwan3track +++ b/packages/mwan3/files/usr/sbin/mwan3track @@ -119,8 +119,7 @@ disconnected() { LOG notice "Interface $INTERFACE ($DEVICE) is offline" env -i ACTION="disconnected" INTERFACE="$INTERFACE" DEVICE="$DEVICE" /sbin/hotplug-call iface else - LOG notice "Skip disconnected event for $INTERFACE ($DEVICE), but sending alert" - env -i ACTION="disconnected" INTERFACE="$INTERFACE" DEVICE="$DEVICE" /usr/libexec/mwan-hooks/send-mwan-alert + LOG notice "Skip disconnected event for $INTERFACE ($DEVICE)" fi } diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index b9f3c395d..9cfe4745a 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -75,8 +75,10 @@ define Package/telegraf/install $(INSTALL_DIR) $(1)/etc/telegraf.conf.d $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf $(INSTALL_DATA) ./files/telegraf.conf.d/services.conf $(1)/etc/telegraf.conf.d/services.conf + $(INSTALL_DATA) ./files/telegraf.conf.d/mwan.conf $(1)/etc/telegraf.conf.d/mwan.conf $(INSTALL_DIR) $(1)/usr/libexec $(INSTALL_BIN) ./files/telegraf-services $(1)/usr/libexec/telegraf-services + $(INSTALL_BIN) ./files/telegraf-mwan $(1)/usr/libexec/telegraf-mwan endef define Package/telegraf/postinst diff --git a/packages/telegraf/README.md b/packages/telegraf/README.md index 817db37e8..2ed445d0b 100644 --- a/packages/telegraf/README.md +++ b/packages/telegraf/README.md @@ -10,8 +10,9 @@ This package provides **Telegraf**, the metrics collection agent. It collects ho procd / ubus │ ▼ -/usr/libexec/telegraf-services ← service status via ubus -/proc filesystem ← CPU, memory, disk, network +/usr/libexec/telegraf-services ← service status via ubus +/var/run/mwan3/iface_state/ ← WAN interface status via mwan3 state files +/proc filesystem ← CPU, memory, disk, network │ ▼ Telegraf (inputs.exec, inputs.cpu, inputs.mem, …) @@ -30,6 +31,7 @@ Victoria Metrics (http://127.0.0.1:8428) | `/etc/telegraf.conf` | Main Telegraf agent configuration and InfluxDB output | | `/etc/telegraf.conf.d/os.conf` | CPU, memory, disk, network, process metrics | | `/etc/telegraf.conf.d/services.conf` | Procd service status via `inputs.exec` | +| `/etc/telegraf.conf.d/mwan.conf` | mwan3 WAN interface status via `inputs.exec` | ## Collected Metrics @@ -60,6 +62,16 @@ All metrics are tagged `influxdb_db=os-metrics`. See [Service Monitoring](#service-monitoring) below for full details. +### WAN Interface Status (`mwan.conf`) + +All metrics are tagged `influxdb_db=os-metrics`. + +| Telegraf Measurement | Tags | Fields | Description | +|----------------------|------|--------|-------------| +| `mwan_interface` | `interface` | `online` | mwan3 WAN link state | + +See [WAN Monitoring](#wan-monitoring) below for full details. + ## Service Monitoring ### How It Works @@ -151,7 +163,7 @@ Defined in `/etc/vmalert/rules/services.yaml`: | Condition | `procd_service_running{has_respawn="true"} == 0` | | For | 2 minutes | | Severity | `critical` | -| Service label | `host` | +| alertgroup | `services` | The 2-minute window allows procd time to attempt its configured respawn retries before the alert fires. @@ -164,6 +176,94 @@ curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data[] | select(.name=="Servi See [Testing Service Monitoring](#testing-service-monitoring) below for full test procedures. +## WAN Monitoring + +### How It Works + +Every 60 seconds, `/usr/libexec/telegraf-mwan` reads `/var/run/mwan3/iface_state/`. mwan3 maintains one file per configured WAN interface in that directory; the file content is the single word `online` or `offline`, updated in real time by mwan3's tracking probes. + +``` +/var/run/mwan3/iface_state/wan → "online" +/var/run/mwan3/iface_state/wan2 → "offline" +``` + +The script emits one record per interface: + +``` +mwan_interface,interface=wan online=1i +mwan_interface,interface=wan2 online=0i ← WAN down +``` + +In Victoria Metrics the metric is stored as: +``` +mwan_interface_online{interface="wan", db="os-metrics"} = 1 +mwan_interface_online{interface="wan2", db="os-metrics"} = 0 +``` + +If mwan3 is not running, the state directory does not exist and the script outputs an empty array — no metrics, no alerts. + +### Querying WAN Metrics + +```bash +# All WAN interfaces and their current state +curl -s 'http://127.0.0.1:8428/api/v1/query?query=mwan_interface_online' \ + | jq -r '.data.result[] | "\(.metric.interface) status=\(.metric.status) online=\(.value[1])"' + +# Interfaces currently offline +curl -s 'http://127.0.0.1:8428/api/v1/query?query=mwan_interface_online==0' \ + | jq -r '.data.result[].metric.interface' + +# Run the collection script manually +/usr/libexec/telegraf-mwan +``` + +### WAN Down Alert (`WanDown`) + +Defined in `/etc/vmalert/rules/mwan.yaml`: + +| Field | Value | +|-------|-------| +| Condition | `mwan_interface_online == 0` | +| For | 2 minutes | +| Severity | `critical` | +| service | `network` | + +The `interface` and `status` labels on the alert come directly from the metric, so each WAN interface fires its own distinct alert. + +Check alert status: +```bash +curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data.alerts[] | select(.name=="WanDown")' +``` + +### Manual Testing + +Simulate a WAN going offline by writing `offline` to its state file (mwan3 will overwrite this when it next evaluates the interface, so the window is short — use `--push` for an immediate metric update): + +```bash +# 1. Check baseline — both WANs should be online +curl -s 'http://127.0.0.1:8428/api/v1/query?query=mwan_interface_online' \ + | jq -r '.data.result[] | "\(.metric.interface): \(.value[1])"' + +# 2. Simulate wan2 going offline +echo "offline" > /var/run/mwan3/iface_state/wan2 + +# 3. Push the metric immediately (or wait up to 60s for telegraf) +/usr/libexec/telegraf-mwan --push + +# 4. Verify metric dropped to 0 +curl -s 'http://127.0.0.1:8428/api/v1/query?query=mwan_interface_online{interface="wan2"}' \ + | jq -r '.data.result[0].value[1]' +# Expected: 0 + +# 5. After 2 minutes: WANDown alert fires +curl -s http://127.0.0.1:8082/api/v1/alerts \ + | jq '.data.alerts[] | select(.name=="WanDown")' + +# 6. Restore +echo "online" > /var/run/mwan3/iface_state/wan2 +/usr/libexec/telegraf-mwan --push +``` + ## Starting and Managing Telegraf ```bash @@ -314,6 +414,23 @@ curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data' curl -s 'http://127.0.0.1:8428/api/v1/query?query=procd_service_running{has_respawn="true"}==0' | jq . ``` +### WANDown alert not firing + +```bash +# Confirm the rule is loaded +curl -s http://127.0.0.1:8082/api/v1/rules \ + | jq '.data.groups[] | select(.name=="mwan") | .rules[] | {name, state, expr}' + +# Check if any WANs are in pending/firing state +curl -s http://127.0.0.1:8082/api/v1/alerts | jq '.data.alerts[] | select(.name=="WANDown")' + +# Manually evaluate the alert expression +curl -s 'http://127.0.0.1:8428/api/v1/query?query=mwan_interface_online==0' | jq . + +# Check mwan3 state files directly +ls -la /var/run/mwan3/iface_state/ && cat /var/run/mwan3/iface_state/* +``` + ### ethtool errors in logs Bridge interfaces don't support ethtool stats. Add `br*` to the exclude list in `/etc/telegraf.conf.d/os.conf`: diff --git a/packages/telegraf/files/telegraf-mwan b/packages/telegraf/files/telegraf-mwan new file mode 100644 index 000000000..cbba7a51c --- /dev/null +++ b/packages/telegraf/files/telegraf-mwan @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# +# Collect mwan3 WAN interface status from /var/run/mwan3/iface_state/. +# +# Each file in that directory is named after an mwan3 interface and contains +# a single word: "online" or "offline". The directory is managed by mwan3 +# and only exists when the daemon is running. +# +# Modes: +# (default) Print a JSON array to stdout — used by telegraf inputs.exec with +# data_format = "json_v2" (parsers.json_v2 build tag). +# +# --push POST metrics in InfluxDB line protocol directly to the Victoria +# Metrics write API. Use when running standalone or from cron. +# The destination URL is read from VM_WRITE_URL env var, or defaults +# to http://127.0.0.1:8428/write?db=os-metrics +# +# Output metric: mwan_interface +# Tags: interface +# Fields: online (int 0/1) + +import json +import os +import sys +import time +import urllib.request + +IFACE_STATE_DIR = '/var/run/mwan3/iface_state' + + +def build_records(): + """Return one record per mwan3 interface found in the state directory.""" + if not os.path.isdir(IFACE_STATE_DIR): + return [] + + records = [] + for name in sorted(os.listdir(IFACE_STATE_DIR)): + path = os.path.join(IFACE_STATE_DIR, name) + if not os.path.isfile(path): + continue + try: + status = open(path).read().strip() + except OSError: + continue + records.append({ + 'interface': name, + 'online': 1 if status == 'online' else 0, + }) + return records + + +def to_line_protocol(records, timestamp): + """Convert records to InfluxDB line protocol strings.""" + lines = [] + for r in records: + lines.append( + f"mwan_interface," + f"interface={r['interface']} " + f"online={r['online']}i " + f"{timestamp}" + ) + return lines + + +def push_to_vm(records): + timestamp = int(time.time()) * 10**9 # nanoseconds + url = os.environ.get( + 'VM_WRITE_URL', 'http://127.0.0.1:8428/write?db=os-metrics' + ) + body = '\n'.join(to_line_protocol(records, timestamp)).encode('utf-8') + req = urllib.request.Request( + url, data=body, method='POST', + headers={'Content-Type': 'application/octet-stream'}, + ) + try: + with urllib.request.urlopen(req, timeout=5) as resp: + if resp.status not in (200, 204): + print(f'VM write error: HTTP {resp.status}', file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f'VM write error: {e}', file=sys.stderr) + sys.exit(1) + + +def main(): + push_mode = '--push' in sys.argv + records = build_records() + + if push_mode: + if records: + push_to_vm(records) + else: + print(json.dumps(records)) + + +if __name__ == '__main__': + main() diff --git a/packages/telegraf/files/telegraf.conf.d/mwan.conf b/packages/telegraf/files/telegraf.conf.d/mwan.conf new file mode 100644 index 000000000..8658cfdcf --- /dev/null +++ b/packages/telegraf/files/telegraf.conf.d/mwan.conf @@ -0,0 +1,20 @@ +# mwan3 WAN interface status monitoring +# Reads /var/run/mwan3/iface_state/ — one file per interface, content is +# "online" or "offline". No-ops silently when mwan3 is not running. +# +# Uses parsers.json_v2 — available in the default NethSecurity Telegraf build. + +[[inputs.exec]] + name_override = "mwan_interface" + commands = ["/usr/libexec/telegraf-mwan"] + interval = "60s" + timeout = "10s" + data_format = "json_v2" + + [[inputs.exec.json_v2]] + [[inputs.exec.json_v2.object]] + path = "@this" + tags = ["interface"] + + [inputs.exec.tags] + influxdb_db = "os-metrics" diff --git a/packages/victoria-metrics/files/vmalert-rules/mwan.yaml b/packages/victoria-metrics/files/vmalert-rules/mwan.yaml new file mode 100644 index 000000000..a4da2b96d --- /dev/null +++ b/packages/victoria-metrics/files/vmalert-rules/mwan.yaml @@ -0,0 +1,25 @@ +# Victoria Metrics Alert Rules for mwan3 WAN Monitoring +# +# Monitors WAN interface connectivity via the mwan_interface_online metric +# collected by /usr/libexec/telegraf-mwan. +# +# The metric is sourced from /var/run/mwan3/iface_state/ which mwan3 +# writes as "online" or "offline" based on its tracking probes. +# Only interfaces present in that directory are monitored — interfaces +# not managed by mwan3 are not included. + +groups: + - name: "mwan" + interval: "60s" + rules: + - alert: WanDown + expr: 'mwan_interface_online == 0' + for: "2m" + labels: + severity: "critical" + service: "network" + annotations: + summary_en: "WAN interface {{ $labels.interface }} is offline" + summary_it: "L'interfaccia WAN {{ $labels.interface }} non è raggiungibile" + description_en: "WAN interface {{ $labels.interface }} is down. Internet connectivity lost." + description_it: "L'interfaccia WAN {{ $labels.interface }} non è raggiungibile. Connettività Internet persa." From 47882186831a474d2a3dec2c48725c428b7ac308 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Thu, 23 Apr 2026 12:23:45 +0200 Subject: [PATCH 37/39] ns-plug: remove netdata alerting integration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Netdata health checks and alert forwarding are now disabled permanently. Alerts are handled by vmalert (via Telegraf → VictoriaMetrics) instead. Changes: - 30_ns-plug_alerts: emptied to disable all health.d rules, set health=no, remove health_alarm_notify.conf from system on upgrade. Keep python plugins for non-alerting metrics (fping latency, dashboards). - Deleted: health_alarm_notify.conf, netadata_enable_alerts, netadata_disable_alerts - ns-plug-alert: removed netdata subcommand and ~140 lines of NETDATA_ALERT_MAP, cmd_netdata, _netdata_fire, _netdata_resolve helper functions - Makefile: removed install of netadata hooks and health_alarm_notify.conf, removed /etc/netdata dir creation - README.md: updated Alerts section to reflect vmalert-based alerting Persistence: 30_ns-plug_alerts runs at every sysupgrade + fresh install, ensuring netdata alerting stays disabled across updates. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- packages/ns-plug/Makefile | 4 - packages/ns-plug/README.md | 17 +- packages/ns-plug/files/30_ns-plug_alerts | 44 ++--- .../ns-plug/files/health_alarm_notify.conf | 90 --------- .../ns-plug/files/netadata_disable_alerts | 5 - packages/ns-plug/files/netadata_enable_alerts | 5 - packages/ns-plug/files/ns-plug-alert | 177 ------------------ 7 files changed, 17 insertions(+), 325 deletions(-) delete mode 100644 packages/ns-plug/files/health_alarm_notify.conf delete mode 100644 packages/ns-plug/files/netadata_disable_alerts delete mode 100644 packages/ns-plug/files/netadata_enable_alerts diff --git a/packages/ns-plug/Makefile b/packages/ns-plug/Makefile index 11effa254..20ba87f56 100644 --- a/packages/ns-plug/Makefile +++ b/packages/ns-plug/Makefile @@ -68,7 +68,6 @@ define Package/ns-plug/install $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_DIR) $(1)/etc/config $(INSTALL_DIR) $(1)/etc/uci-defaults - $(INSTALL_DIR) $(1)/etc/netdata $(INSTALL_DIR) $(1)/lib/upgrade/keep.d $(INSTALL_DIR) $(1)/usr/libexec/ns-plug $(INSTALL_DIR) $(1)/usr/libexec/mwan-hooks @@ -94,8 +93,6 @@ define Package/ns-plug/install $(INSTALL_BIN) ./files/40_ns-plug_automatic_updates $(1)/usr/libexec/ns-plug $(INSTALL_BIN) ./files/40_ns-plug_mwan_hooks $(1)/etc/uci-defaults $(INSTALL_BIN) ./files/40_ns-plug_mwan_hooks $(1)/usr/libexec/ns-plug - $(INSTALL_BIN) ./files/netadata_enable_alerts $(1)/usr/share/ns-plug/hooks/register/70netadata_enable_alerts - $(INSTALL_BIN) ./files/netadata_disable_alerts $(1)/usr/share/ns-plug/hooks/unregister/70netadata_disable_alerts $(INSTALL_BIN) ./files/enable_automatic_updates $(1)/usr/share/ns-plug/hooks/register/60enable_automatic_updates $(INSTALL_BIN) ./files/disable_automatic_updates $(1)/usr/share/ns-plug/hooks/unregister/60disable_automatic_updates $(INSTALL_CONF) ./files/config $(1)/etc/config/ns-plug @@ -105,7 +102,6 @@ define Package/ns-plug/install $(INSTALL_BIN) ./files/ns-plug-alert $(1)/usr/sbin $(INSTALL_BIN) ./files/mwan-hooks $(1)/usr/libexec/ns-plug $(INSTALL_BIN) ./files/ns-plug-rsyslog-fixup.uci-default $(1)/etc/uci-defaults/rsyslog-fixup - $(INSTALL_DATA) files/health_alarm_notify.conf $(1)/usr/share/ns-plug/ endef $(eval $(call BuildPackage,ns-plug)) diff --git a/packages/ns-plug/README.md b/packages/ns-plug/README.md index 7c4f4f4be..e03189ebb 100644 --- a/packages/ns-plug/README.md +++ b/packages/ns-plug/README.md @@ -133,19 +133,10 @@ remote-backup download $(remote-backup list | jq -r .[0].file) - | gpg --batch - ## Alerts -All system alerts, except MultiWAN ones, are handled by netdata, including those from the multiwan monitoring. -Alerts are disabled by default and enabled only if the machine has a valid subscription. -In this case, alerts are automatically sent to the remote server (either my.nethesis.it or my.nethserver.com) using a -custom sender (`/etc/netdata/health_alarm_notify.conf`). -Alerts are also logged to `/var/log/messages` and are visible within the netdata UI. +System alerts are handled by vmalert (running on the system) via Telegraf metrics and VictoriaMetrics time-series storage. +When the system has a valid subscription, alerts are automatically forwarded to the remote server (my.nethesis.it or my.nethserver.com) using `ns-plug-alert`. -Only the following alerts are sent to the remote system: - -- all of them repeat every 30 minutes while active -- disk space occupation -- WAN down events - -To emulate these alerts manually with `ns-plug-alert`, use: +To fire an alert manually: ``` # Disk usage alert @@ -173,8 +164,6 @@ ns-plug-alert resolve --alertname WanDown --severity critical \ --labels service=network interface=wan0 ``` -When an alert is resolved, netdata will also send a clear command to remote server. - ### MultiWAN alerts MultiWAN alerts are managed using `/etc/mwan3.user` script. diff --git a/packages/ns-plug/files/30_ns-plug_alerts b/packages/ns-plug/files/30_ns-plug_alerts index 4406054c8..bd6621d21 100644 --- a/packages/ns-plug/files/30_ns-plug_alerts +++ b/packages/ns-plug/files/30_ns-plug_alerts @@ -1,39 +1,26 @@ #!/bin/sh -# Custom disk alerts -disks_f="/etc/netdata/health.d/disks.conf" -cat << EOF > "$disks_f" -template: disk_space_usage - on: disk.space - class: Utilization - type: System -component: Disk - os: linux freebsd - hosts: * - families: !/dev !/dev/* !/run !/run/* !/overlay * - calc: \$used * 100 / (\$avail + \$used) - units: % - every: 1m - warn: \$this > ((\$status >= \$WARNING ) ? (80) : (90)) - crit: \$this > ((\$status == \$CRITICAL) ? (90) : (98)) - delay: up 1m down 15m multiplier 1.5 max 1h - info: disk \$family space utilization - to: sysadmin - repeat: critical 5m warning 5m -EOF +# Disable all netdata health checks and alerts +# (Alerts are now handled by vmalert via Telegraf → VictoriaMetrics) + +# Disable the health module in netdata.conf +sed -i 's/enabled = yes/enabled = no/' /etc/netdata/netdata.conf 2>/dev/null +sed -i '/^\[health\]/,/^\[/ { s/enabled = .*/enabled = no/ }' /etc/netdata/netdata.conf 2>/dev/null -# Disable unwanted alerts +# Empty all health.d configuration files to disable all alerts files="cpu disks entropy ipc load memory net netfilter processes ram softnet tcp_conn tcp_listen tcp_mem tcp_orphans tcp_resets timex udp_errors" for f in $files do file="/etc/netdata/health.d/${f}.conf" - if [ ! -f $file ]; then - > $file - fi + > "$file" 2>/dev/null || true done -# Enable some python plugins -sed -i 's/python.d = no/python.d = yes/' /etc/netdata/netdata.conf +# Remove the health_alarm_notify.conf file from running system +# (in case it was copied from a previous version) +rm -f /etc/netdata/health_alarm_notify.conf + +# Enable python plugins for non-alerting metrics (fping latency, etc) +sed -i 's/python.d = no/python.d = yes/' /etc/netdata/netdata.conf 2>/dev/null python_f="/etc/netdata/python.d.conf" if [ ! -f "$python_f" ]; then cat << EOF > "$python_f" @@ -50,6 +37,3 @@ logind: no nginx_log: no EOF fi - -# Update netdata notification script -cp /usr/share/ns-plug/health_alarm_notify.conf /etc/netdata/health_alarm_notify.conf diff --git a/packages/ns-plug/files/health_alarm_notify.conf b/packages/ns-plug/files/health_alarm_notify.conf deleted file mode 100644 index 31b54a0e0..000000000 --- a/packages/ns-plug/files/health_alarm_notify.conf +++ /dev/null @@ -1,90 +0,0 @@ -# Configuration for alarm notifications - -SEND_EMAIL="NO" -SEND_DYNATRACE="NO" -SEND_STACKPULSE="NO" -SEND_OPSGENIE="NO" -SEND_HANGOUTS="NO" -SEND_PUSHOVER="NO" -SEND_PUSHBULLET="NO" -SEND_TWILIO="NO" -SEND_MESSAGEBIRD="NO" -SEND_KAVENEGAR="NO" -SEND_TELEGRAM="NO" -SEND_SLACK="NO" -SEND_MSTEAMS="NO" -SEND_ROCKETCHAT="NO" -SEND_ALERTA="NO" -SEND_FLOCK="NO" -SEND_DISCORD="NO" -SEND_HIPCHAT="NO" -SEND_KAFKA="NO" -SEND_PD="NO" -SEND_FLEEP="NO" -SEND_IRC="NO" -SEND_SYSLOG="NO" -SEND_PROWL="NO" -SEND_AWSSNS="NO" -SEND_SMS="NO" -SEND_MATRIX="NO" - -# Enable only syslog and custom notification -use_fqdn='YES' -SEND_SYSLOG="YES" -SYSLOG_FACILITY='' -DEFAULT_RECIPIENT_SYSLOG="sysadmin" -SEND_CUSTOM="YES" -DEFAULT_RECIPIENT_CUSTOM="sysadmin" - -# Always generate clear events -clear_alarm_always='YES' - -# Send alerts to my.nethesis.it or my.nethserver.com -custom_sender() { - lk=$(uci -q get ns-plug.config.system_id) - secret=$(uci -q get ns-plug.config.secret) - url=$(uci -q get ns-plug.config.alerts_url)"alerts/store" - alert_id=${name} - - logger -t alert "Alert: name=${name} severity=${severity} value=${value} chart=${chart} info=${info} src=${src}" - - # Preserve original netdata status before remapping for legacy API - netdata_status="${status}" - if [ "${status}" == "CRITICAL" ]; then - status="FAILURE" - elif [ "${status}" == "CLEAR" ]; then - status="OK" - fi - - # map to old alerts, when possible - if [ "${chart}" == "disk_space._overlay" ] || [ "${chart}" == "disk_space._" ]; then - alert_id="df:root:percent_bytes:free" - elif [ "${chart}" == "disk_space._boot" ]; then - alert_id="df:boot:percent_bytes:free" - else - alert_id="${name}:${chart}" - fi - payload='{"lk": "'$lk'", "alert_id": "'$alert_id'", "status": "'$status'"}' - - # send only if the machine is registered - if [ -z "${lk}" ] || [ -z "${secret}" ]; then - return - fi - - # send to remote server - if [ "${status}" == "FAILURE" ] || [ "${status}" == "OK" ]; then - /usr/bin/curl -m 180 --retry 3 -L -s \ - --header "Authorization: token ${secret}" --header "Content-Type: application/json" --header "Accept: application/json" \ - --data-raw "${payload}" ${url} - fi - - # Also forward to MY alertmanager if my_url is configured - if [ -n "$(uci -q get ns-plug.config.my_url)" ]; then - /usr/sbin/ns-plug-alert netdata \ - --alertname "${name}" \ - --status "${netdata_status}" \ - --chart "${chart}" \ - --family "${family}" \ - --value "${value}" 2>/dev/null & - fi -} diff --git a/packages/ns-plug/files/netadata_disable_alerts b/packages/ns-plug/files/netadata_disable_alerts deleted file mode 100644 index b21473515..000000000 --- a/packages/ns-plug/files/netadata_disable_alerts +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# Disable netdata alerts -sed -i 's/enabled = yes/enabled = no/' /etc/netdata/netdata.conf -/etc/init.d/netdata restart diff --git a/packages/ns-plug/files/netadata_enable_alerts b/packages/ns-plug/files/netadata_enable_alerts deleted file mode 100644 index cf066e58a..000000000 --- a/packages/ns-plug/files/netadata_enable_alerts +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -# Enable netdata alerts -sed -i 's/enabled = no/enabled = yes/' /etc/netdata/netdata.conf -/etc/init.d/netdata restart diff --git a/packages/ns-plug/files/ns-plug-alert b/packages/ns-plug/files/ns-plug-alert index a0b3e0709..68351516b 100644 --- a/packages/ns-plug/files/ns-plug-alert +++ b/packages/ns-plug/files/ns-plug-alert @@ -17,8 +17,6 @@ Usage: ns-plug-alert resolve --alertname NAME --severity {critical,warning,info} [--labels k=v ...] ns-plug-alert list [--state STATE] [--severity SEV] - ns-plug-alert netdata --alertname NAME --status STATUS - --chart CHART --family FAMILY [--value VALUE] Examples: # Fire a critical disk alert @@ -32,10 +30,6 @@ Examples: # List all active alerts ns-plug-alert list - - # Called internally from netdata custom_sender (health_alarm_notify.conf) - ns-plug-alert netdata --alertname disk_space_usage --status CRITICAL \\ - --chart disk_space._data --family /data --value 95.2 """ import argparse @@ -48,47 +42,6 @@ from datetime import datetime, timedelta, timezone from euci import EUci -# --------------------------------------------------------------------------- -# Mapping from netdata alarm names / statuses to the NethSecurity alert catalog. -# Keys are netdata alarm names (${name} in health_alarm_notify.conf). -# Each entry maps a netdata status to the corresponding mimir alert definition. -# --------------------------------------------------------------------------- -NETDATA_ALERT_MAP = { - "disk_space_usage": { - "WARNING": { - "alertname": "DiskSpaceLow", - "service": "storage", - "severity": "warning", - "summary_en": "Disk space low on {mountpoint}", - "summary_it": "Spazio disco in esaurimento su {mountpoint}", - "description_en": "Disk usage on {mountpoint} is above 80%. Free space is running low.", - "description_it": "Utilizzo del disco su {mountpoint} superiore all'80%. Lo spazio libero sta esaurendosi.", - }, - "CRITICAL": { - "alertname": "DiskSpaceCritical", - "service": "storage", - "severity": "critical", - "summary_en": "Disk space critical on {mountpoint}", - "summary_it": "Spazio disco critico su {mountpoint}", - "description_en": "Disk usage on {mountpoint} is above 90%. Immediate action required.", - "description_it": "Utilizzo del disco su {mountpoint} superiore al 90%. Intervento immediato richiesto.", - }, - } -} - -# Netdata statuses that mean the alert is firing -NETDATA_FIRE_STATUSES = {"WARNING", "CRITICAL"} - -# Netdata statuses that mean the alert is resolved -NETDATA_RESOLVE_STATUSES = {"CLEAR", "REMOVED", "UNDEFINED"} - -# Map netdata severity to mimir severity (used for generic/unmapped alerts) -NETDATA_SEVERITY_MAP = { - "WARNING": "warning", - "CRITICAL": "critical", -} - - # --------------------------------------------------------------------------- # UCI / configuration helpers # --------------------------------------------------------------------------- @@ -247,127 +200,6 @@ def cmd_list(args): print(json.dumps(alerts, indent=2)) -def cmd_netdata(args): - """ - Handle a netdata alert notification. - - Called from health_alarm_notify.conf custom_sender with: - --alertname ${name} (netdata alarm name) - --status ${status} (CRITICAL, WARNING, CLEAR, REMOVED, ...) - --chart ${chart} (e.g. disk_space._overlay, mwan.score) - --family ${family} (e.g. /, /boot, wan0) - --value ${value} (metric value, may be empty) - """ - url, key, secret = load_config() - if not (url and key and secret): - # Mimir not configured — silently skip so existing flow is unaffected. - sys.exit(0) - - netdata_status = (args.status or "").upper() - netdata_name = args.alertname or "" - family = args.family or "" - - if netdata_status in NETDATA_RESOLVE_STATUSES: - _netdata_resolve(url, key, secret, netdata_name, family) - elif netdata_status in NETDATA_FIRE_STATUSES: - _netdata_fire(url, key, secret, netdata_name, netdata_status, family, args.value) - # Any other status (e.g. UNDEFINED at startup) is silently ignored. - - -def _build_netdata_labels_annotations(netdata_name, netdata_status, family, value): - """ - Map a netdata alarm + status to mimir alertname, labels and annotations. - Falls back to a generic mapping when the alarm name is not in the catalog. - """ - mapping = NETDATA_ALERT_MAP.get(netdata_name, {}).get(netdata_status, {}) - - if mapping: - alertname = mapping["alertname"] - severity = mapping["severity"] - service = mapping.get("service") - - # Build label substitution context - ctx = {"mountpoint": family, "interface": family, "value": value or ""} - annotations = {} - for key in ("summary_en", "summary_it", "description_en", "description_it"): - if key in mapping: - annotations[key] = mapping[key].format_map(ctx) - if value: - annotations.setdefault("description_en", annotations.get("description_en", "") + f" Current value: {value}.") - else: - # Generic fallback: pass the netdata name directly as alertname - alertname = netdata_name - severity = NETDATA_SEVERITY_MAP.get(netdata_status, "warning") - service = None - annotations = { - "summary": f"Netdata alert {netdata_name} is {netdata_status.lower()}", - } - if value: - annotations["description"] = f"Current value: {value}." - - labels = {"alertname": alertname, "severity": severity} - if service: - labels["service"] = service - - return alertname, labels, annotations - - -def _netdata_fire(url, key, secret, netdata_name, netdata_status, family, value): - alertname, labels, annotations = _build_netdata_labels_annotations( - netdata_name, netdata_status, family, value - ) - - payload = [{ - "labels": labels, - "annotations": annotations, - "generatorURL": f"http://nethsecurity/netdata/{netdata_name}", - "startsAt": fmt(now_utc()), - "endsAt": "0001-01-01T00:00:00Z", - }] - - status, body = http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) - if not (200 <= status < 300): - print(f"Failed to send netdata alert (HTTP {status}): {body}", file=sys.stderr) - sys.exit(1) - - -def _netdata_resolve(url, key, secret, netdata_name, family): - # Resolve both possible severities for the mapped alertname so that - # regardless of which severity was fired, the alert is cleared. - mappings = NETDATA_ALERT_MAP.get(netdata_name, {}) - if mappings: - resolved = set() - for _status, m in mappings.items(): - alertname = m["alertname"] - severity = m["severity"] - service = m.get("service") - sig = (alertname, severity) - if sig in resolved: - continue - resolved.add(sig) - _send_resolve(url, key, secret, alertname, severity, service, netdata_name) - else: - # Generic fallback: resolve with both severities to be safe - for severity in ("critical", "warning"): - _send_resolve(url, key, secret, netdata_name, severity, None, netdata_name) - - -def _send_resolve(url, key, secret, alertname, severity, service, netdata_name): - now = now_utc() - labels = {"alertname": alertname, "severity": severity} - if service: - labels["service"] = service - annotations = {"summary": "resolved", "description": f"Alert {alertname} cleared by netdata at {fmt(now)}."} - payload = [{ - "labels": labels, - "annotations": annotations, - "generatorURL": f"http://nethsecurity/netdata/{netdata_name}", - "startsAt": fmt(now - timedelta(hours=1)), - "endsAt": fmt(now), - }] - http_request("POST", alerts_endpoint(url), data=payload, key=key, secret=secret) - - # --------------------------------------------------------------------------- # CLI # --------------------------------------------------------------------------- @@ -404,21 +236,12 @@ def main(): p_list.add_argument("--state", help="Filter by state (active, suppressed, unprocessed)") p_list.add_argument("--severity", help="Filter by severity label") - # netdata (internal, called from health_alarm_notify.conf) - p_netdata = sub.add_parser("netdata", help="Handle a netdata alarm notification (internal use)") - p_netdata.add_argument("--alertname", required=True, help="Netdata alarm name (${name})") - p_netdata.add_argument("--status", required=True, help="Netdata alarm status (${status})") - p_netdata.add_argument("--chart", required=True, help="Netdata chart name (${chart})") - p_netdata.add_argument("--family", default="", help="Netdata chart family (${family})") - p_netdata.add_argument("--value", default="", help="Metric value that triggered the alarm (${value})") - args = parser.parse_args() dispatch = { "fire": cmd_fire, "resolve": cmd_resolve, "list": cmd_list, - "netdata": cmd_netdata, } dispatch[args.command](args) From 7e6ac6d45728147babaaecfad223bb4dc39f5855 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Thu, 23 Apr 2026 16:33:23 +0200 Subject: [PATCH 38/39] feat: replace ns.netdata API with ns.telegraf ping monitor Migrate ping monitoring from netdata's fping plugin to telegraf's native ping input plugin. This provides better performance, no external dependencies, and improved system compatibility. Changes: - Add ns.telegraf API handler for ping monitor configuration - Add ns.telegraf.json ACL definition for telegraf-manager role - Add telegraf.conf.d/ping.conf with native ping plugin configuration - Remove ns.netdata API handler and ACL (netdata integration) - Update ns-api Makefile to install new API handler - Update telegraf Makefile to install ping.conf and add inputs.ping tag The new API provides the same interface: - get-configuration: retrieve current ping hosts - set-hosts: configure hosts to ping The ping plugin uses native method (method="native") which sends ICMP packets directly without external ping command, requiring CAP_NET_RAW capability or root privileges. Metrics are tagged with influxdb_db="ping-metrics" for proper InfluxDB database routing. BREAKING CHANGE: ns.netdata API is removed. Clients must migrate to ns.telegraf API for ping monitor configuration. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- packages/ns-api/Makefile | 6 +- packages/ns-api/files/ns.netdata | 66 ----------------- packages/ns-api/files/ns.netdata.json | 13 ---- packages/ns-api/files/ns.telegraf | 73 +++++++++++++++++++ packages/ns-api/files/ns.telegraf.json | 13 ++++ packages/telegraf/Makefile | 2 + .../telegraf/files/telegraf.conf.d/ping.conf | 26 +++++++ 7 files changed, 118 insertions(+), 81 deletions(-) delete mode 100755 packages/ns-api/files/ns.netdata delete mode 100644 packages/ns-api/files/ns.netdata.json create mode 100755 packages/ns-api/files/ns.telegraf create mode 100644 packages/ns-api/files/ns.telegraf.json create mode 100644 packages/telegraf/files/telegraf.conf.d/ping.conf diff --git a/packages/ns-api/Makefile b/packages/ns-api/Makefile index 53e07fed1..2ed199081 100644 --- a/packages/ns-api/Makefile +++ b/packages/ns-api/Makefile @@ -118,8 +118,10 @@ define Package/ns-api/install $(INSTALL_DATA) ./files/ns.mwan.json $(1)/usr/share/rpcd/acl.d/ $(INSTALL_BIN) ./files/ns.dpi $(1)/usr/libexec/rpcd/ $(INSTALL_DATA) ./files/ns.dpi.json $(1)/usr/share/rpcd/acl.d/ - $(INSTALL_BIN) ./files/ns.netdata $(1)/usr/libexec/rpcd/ - $(INSTALL_DATA) ./files/ns.netdata.json $(1)/usr/share/rpcd/acl.d/ + $(INSTALL_BIN) ./files/ns.telegraf $(1)/usr/libexec/rpcd/ + $(INSTALL_DATA) ./files/ns.telegraf.json $(1)/usr/share/rpcd/acl.d/ + $(LN) ns.telegraf $(1)/usr/libexec/rpcd/ns.netdata + $(LN) ns.telegraf.json $(1)/usr/share/rpcd/acl.d/ns.netdata.json $(INSTALL_BIN) ./files/ns.storage $(1)/usr/libexec/rpcd/ $(INSTALL_DATA) ./files/ns.storage.json $(1)/usr/share/rpcd/acl.d/ $(INSTALL_BIN) ./files/ns.account $(1)/usr/libexec/rpcd/ diff --git a/packages/ns-api/files/ns.netdata b/packages/ns-api/files/ns.netdata deleted file mode 100755 index bb2309181..000000000 --- a/packages/ns-api/files/ns.netdata +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/python3 - -# -# Copyright (C) 2023 Nethesi3 S.r.l. -# SPDX-License-Identifier: GPL-2.0-only -# - -# Read and set fping configuration for netdata - -import os -import sys -import json -import subprocess -import configparser - -fping_conf_file = "/etc/netdata/fping.conf" -netdata_conf_file = "/etc/netdata/netdata.conf" - -def get_config(): - hosts = [] - # create a simpligied fping.conf if not exists - # the file must contain only one line: hosts="" - if not os.path.exists(fping_conf_file): - with open(fping_conf_file, 'w') as fp: - fp.write('hosts=""\n') - # parse the simplified config file - try: - with open(fping_conf_file, 'r') as fp: - line = fp.readline() - line = line[7:-2] - hosts = line.split(" ") - except: - pass - return {"hosts": hosts} - -def set_config(config): - # Enable and disable fping plugin on netdata - nparser = configparser.ConfigParser() - nparser.read(netdata_conf_file) - if len(config['hosts']) > 0: - nparser['plugins']['fping'] = 'yes' - else: - nparser['plugins']['fping'] = 'no' - with open(netdata_conf_file, 'w') as fpc: - nparser.write(fpc) - - try: - with open(fping_conf_file, 'w') as fp: - hosts = " ".join(config['hosts']) - fp.write(f'hosts="{hosts}"\n') - subprocess.run(["/etc/init.d/netdata", "restart"], check=True) - return {"success": True} - except: - return {"success": False} - -cmd = sys.argv[1] - -if cmd == 'list': - print(json.dumps({"get-configuration": {}, "set-hosts": {"hosts": ["1.1.1.1", "google.com"]}})) -else: - action = sys.argv[2] - if action == "get-configuration": - print(json.dumps(get_config())) - elif action == "set-hosts": - args = json.loads(sys.stdin.read()) - print(json.dumps(set_config(args))) diff --git a/packages/ns-api/files/ns.netdata.json b/packages/ns-api/files/ns.netdata.json deleted file mode 100644 index 5764ef6d4..000000000 --- a/packages/ns-api/files/ns.netdata.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "netdata-manager": { - "description": "Read and set netdata configuration", - "write": {}, - "read": { - "ubus": { - "ns.netdata": [ - "*" - ] - } - } - } -} diff --git a/packages/ns-api/files/ns.telegraf b/packages/ns-api/files/ns.telegraf new file mode 100755 index 000000000..eb32274bb --- /dev/null +++ b/packages/ns-api/files/ns.telegraf @@ -0,0 +1,73 @@ +#!/usr/bin/python3 + +# +# Copyright (C) 2026 Nethesis S.r.l. +# SPDX-License-Identifier: GPL-2.0-only +# + +# Read and set ping configuration for telegraf + +import os +import sys +import json +import subprocess +import re + +ping_conf_file = "/etc/telegraf.conf.d/ping.conf" + +def get_config(): + hosts = [] + if os.path.exists(ping_conf_file): + try: + with open(ping_conf_file, 'r') as fp: + content = fp.read() + # Find the urls line in TOML format: urls = ["host1", "host2"] + match = re.search(r'urls\s*=\s*(\[[^\]]*\])', content) + if match: + urls_str = match.group(1) + # Parse JSON array + hosts = json.loads(urls_str) + except Exception: + pass + return {"hosts": hosts} + +def set_config(config): + try: + # Ensure directory exists + os.makedirs(os.path.dirname(ping_conf_file), exist_ok=True) + + # Create the telegraf ping configuration + with open(ping_conf_file, 'w') as fp: + fp.write('# Ping input plugin configuration\n') + fp.write('[[inputs.ping]]\n') + if len(config['hosts']) > 0: + # Format hosts as TOML array + hosts_str = json.dumps(config['hosts']) + fp.write(f' urls = {hosts_str}\n') + fp.write(' method = "native"\n') + fp.write(' count = 1\n') + fp.write(' ping_interval = 1.0\n') + fp.write(' deadline = 10\n') + fp.write(' [inputs.ping.tags]\n') + fp.write(' influxdb_db = "ping-metrics"\n') + else: + # Write empty config to disable + fp.write(' urls = []\n') + + # Restart telegraf service + subprocess.run(["/etc/init.d/telegraf", "restart"], check=True) + return {"success": True} + except Exception as e: + return {"success": False, "error": str(e)} + +cmd = sys.argv[1] + +if cmd == 'list': + print(json.dumps({"get-configuration": {}, "set-hosts": {"hosts": ["1.1.1.1", "google.com"]}})) +else: + action = sys.argv[2] + if action == "get-configuration": + print(json.dumps(get_config())) + elif action == "set-hosts": + args = json.loads(sys.stdin.read()) + print(json.dumps(set_config(args))) diff --git a/packages/ns-api/files/ns.telegraf.json b/packages/ns-api/files/ns.telegraf.json new file mode 100644 index 000000000..972119685 --- /dev/null +++ b/packages/ns-api/files/ns.telegraf.json @@ -0,0 +1,13 @@ +{ + "telegraf-manager": { + "description": "Read and set telegraf ping monitor configuration", + "write": {}, + "read": { + "ubus": { + "ns.telegraf": [ + "*" + ] + } + } + } +} diff --git a/packages/telegraf/Makefile b/packages/telegraf/Makefile index 9cfe4745a..49d9da1ef 100644 --- a/packages/telegraf/Makefile +++ b/packages/telegraf/Makefile @@ -40,6 +40,7 @@ GO_PKG_TAGS:= \ inputs.netstat \ inputs.nftables \ inputs.nstat \ + inputs.ping \ inputs.processes \ inputs.sensors \ inputs.system \ @@ -76,6 +77,7 @@ define Package/telegraf/install $(INSTALL_DATA) ./files/telegraf.conf.d/os.conf $(1)/etc/telegraf.conf.d/os.conf $(INSTALL_DATA) ./files/telegraf.conf.d/services.conf $(1)/etc/telegraf.conf.d/services.conf $(INSTALL_DATA) ./files/telegraf.conf.d/mwan.conf $(1)/etc/telegraf.conf.d/mwan.conf + $(INSTALL_DATA) ./files/telegraf.conf.d/ping.conf $(1)/etc/telegraf.conf.d/ping.conf $(INSTALL_DIR) $(1)/usr/libexec $(INSTALL_BIN) ./files/telegraf-services $(1)/usr/libexec/telegraf-services $(INSTALL_BIN) ./files/telegraf-mwan $(1)/usr/libexec/telegraf-mwan diff --git a/packages/telegraf/files/telegraf.conf.d/ping.conf b/packages/telegraf/files/telegraf.conf.d/ping.conf new file mode 100644 index 000000000..40c7c8087 --- /dev/null +++ b/packages/telegraf/files/telegraf.conf.d/ping.conf @@ -0,0 +1,26 @@ +# Ping input plugin - monitors ICMP ping to configured hosts +# Uses native method for better performance and no external dependencies + +[[inputs.ping]] + # Hosts to send ping packets to + urls = [] + + # Method: "native" for improved compatibility and performance + # Uses privileged raw ICMP sockets (requires CAP_NET_RAW or root) + method = "native" + + # Number of ping packets to send per interval + count = 1 + + # Time to wait between sending ping packets (seconds) + ping_interval = 1.0 + + # Total ping deadline (seconds) + deadline = 10 + + # Data size for ping packets (bytes) + size = 56 + + # Tags for metric routing + [inputs.ping.tags] + influxdb_db = "ping-metrics" From 15cffa523294c8b5ef6ee02c0f4d668e1bb3d647 Mon Sep 17 00:00:00 2001 From: Giacomo Sanchietti Date: Thu, 23 Apr 2026 17:12:14 +0200 Subject: [PATCH 39/39] feat: update charts to use Victoria Metrics Migrate the following APIs: - ns.report: latency-and-quality-report - ns.dashboard: interface-traffic Replace data from Netdata with data from Victoria Metrics: netdata is now deprecated and will be dismissed in the future. Assisted-by:: Copilot:Sonnet4.6 --- packages/ns-api/README.md | 11 ++-- packages/ns-api/files/ns.dashboard | 29 ++++++---- packages/ns-api/files/ns.report | 87 ++++++++++++++++++++---------- packages/ns-api/files/ns.telegraf | 2 +- 4 files changed, 85 insertions(+), 44 deletions(-) diff --git a/packages/ns-api/README.md b/packages/ns-api/README.md index 887298b76..cdc5f31ed 100644 --- a/packages/ns-api/README.md +++ b/packages/ns-api/README.md @@ -2436,7 +2436,10 @@ Response example: ### traffic-interface -Return an array of point describing the network traffic in the last hour: +Return an array of points describing the network traffic in the last hour. +Data is sourced from Victoria Metrics using `net_bytes_recv` and `net_bytes_sent` Telegraf counters, +converted to kb/s (kilobits per second). Labels are Unix timestamps in descending order (newest first), +with one point every 20 seconds (~180 points total). ``` api-cli ns.dashboard interface-traffic --data '{"interface": "eth0"}' ``` @@ -7931,7 +7934,7 @@ Output example: ### latency-and-quality-report -Report latency metrics (minimum, maximum and average) and connectivy quality data (packet delivery rate) for every host configured in Netdata fping configuration file, located at `/etc/netdata/fping.conf`. +Report latency metrics (minimum, maximum and average) and connectivity quality data (packet loss percentage) for every host configured in the Telegraf ping plugin configuration file, located at `/etc/telegraf.conf.d/ping.conf`. Usage example: ``` api-cli ns.report latency-and-quality-report @@ -7981,7 +7984,7 @@ Output example: ], [ 1731485262, - 99.8152174 + 100 ], [ 1731484894, @@ -8031,7 +8034,7 @@ Output example: ], [ 1731485262, - 99.8152174 + 100 ], [ 1731484894, diff --git a/packages/ns-api/files/ns.dashboard b/packages/ns-api/files/ns.dashboard index 449fec46e..ca37bca56 100644 --- a/packages/ns-api/files/ns.dashboard +++ b/packages/ns-api/files/ns.dashboard @@ -12,6 +12,8 @@ import os import sys import json import subprocess +import time +import urllib.parse import urllib.request from euci import EUci from nethsec import utils, ovpn @@ -274,17 +276,24 @@ def system_info(): def interface_traffic(interface): ret = {"labels": [], "data": []} - # retrieve from netdata the traffic for the last hour - url = f'http://127.0.0.1:19999/api/v1/data?chart=net.{interface}&after=-3600&points=180&options=abs' - try: - with urllib.request.urlopen(url, timeout=10) as fu: - data = json.loads(fu.read()) - except: - return ret + vm_url = "http://127.0.0.1:8428/api/v1/query_range" + now = int(time.time()) + one_hour_ago = now - 3600 + + def vm_query(expr): + params = urllib.parse.urlencode({"query": expr, "start": one_hour_ago, "end": now, "step": 20}) + with urllib.request.urlopen(f"{vm_url}?{params}", timeout=5) as resp: + data = json.loads(resp.read()) + result = data.get("data", {}).get("result", []) + return result[0].get("values", []) if result else [] - for record in data["data"]: - ret["labels"].append(record[0]) - ret["data"].append([record[1], record[2]]) + try: + recv = vm_query(f'rate(net_bytes_recv{{interface="{interface}"}}[20s]) * 8 / 1000') + sent = vm_query(f'rate(net_bytes_sent{{interface="{interface}"}}[20s]) * 8 / 1000') + ret["labels"] = [int(ts) for ts, _ in reversed(recv)] + ret["data"] = [[float(r), float(s)] for (_, r), (_, s) in zip(reversed(recv), reversed(sent))] + except Exception: + pass return ret diff --git a/packages/ns-api/files/ns.report b/packages/ns-api/files/ns.report index 1eca1eca1..e188c25a3 100755 --- a/packages/ns-api/files/ns.report +++ b/packages/ns-api/files/ns.report @@ -15,6 +15,7 @@ import subprocess from datetime import datetime from collections import defaultdict from nethsec import utils +import urllib.parse import urllib.request from euci import EUci @@ -324,41 +325,69 @@ def ovpnrw_bytes_by_hour_and_user(instance, day, user): return {"hours": hours_bytes} -def get_fping_hosts(): - # read fping hosts from /etc/netdata/fping.conf - try: - with open("/etc/netdata/fping.conf", 'r') as fp: - line = fp.readline() - line = line[7:-2] - hosts = line.split(" ") - return hosts - except: - return [] - - -def get_netdata_chart_data(chart_name): - ret = {"labels": [], "data": []} - # retrieve chart data from netdata - url = f'http://127.0.0.1:19999/api/v1/data?chart={chart_name}&after=-3600&points=180&options=abs' +def get_ping_hosts(): + # read ping hosts from telegraf configuration + ping_conf_file = "/etc/telegraf.conf.d/ping.conf" + hosts = [] + if os.path.exists(ping_conf_file): + try: + with open(ping_conf_file, 'r') as fp: + content = fp.read() + # Find the urls line in TOML format: urls = ["host1", "host2"] + match = re.search(r'urls\s*=\s*(\[[^\]]*\])', content) + if match: + urls_str = match.group(1) + # Parse JSON array + hosts = json.loads(urls_str) + except Exception: + pass + return hosts + + +def get_victoria_metrics_ping_data(host): + """ + Query Victoria Metrics for ping metrics. + Returns: {"latency": {"labels": [...], "data": [...]}, "quality": {"labels": [...], "data": [...]}} + """ + ret_latency = {"labels": ["time", "minimum", "maximum", "average"], "data": []} + ret_quality = {"labels": ["time", "returned"], "data": []} + + vm_url = "http://127.0.0.1:8428/api/v1/query_range" + now = int(time.time()) + one_hour_ago = now - 3600 + timeout = 5 + + def vm_query(metric_expr): + params = urllib.parse.urlencode({'query': metric_expr, 'start': one_hour_ago, 'end': now, 'step': 20}) + with urllib.request.urlopen(f"{vm_url}?{params}", timeout=timeout) as resp: + data = json.loads(resp.read()) + result = data.get('data', {}).get('result', []) + return result[0].get('values', []) if result else [] + try: - with urllib.request.urlopen(url, timeout=10) as fu: - data = json.loads(fu.read()) - except: - return ret - return data + min_values = vm_query(f'ping_minimum_response_ms{{url="{host}"}}') + max_values = vm_query(f'ping_maximum_response_ms{{url="{host}"}}') + avg_values = vm_query(f'ping_average_response_ms{{url="{host}"}}') + + ret_latency["data"] = [ + [int(ts), float(mn), float(mx), float(av)] + for (ts, mn), (_, mx), (_, av) in zip(min_values, max_values, avg_values) + ] + + loss_values = vm_query(f'100 - ping_percent_packet_loss{{url="{host}"}} or 100 - ping_percent_reply_loss{{url="{host}"}}') + ret_quality["data"] = [[int(ts), float(val)] for ts, val in loss_values] + + except Exception as e: + print(f"Error querying Victoria Metrics for {host}: {str(e)}", file=sys.stderr) + + return {"latency": ret_latency, "quality": ret_quality} def latency_and_quality_report(): - hosts = get_fping_hosts() + hosts = get_ping_hosts() ret = {} for host in hosts: - host_replaced = host.replace('.', '_') - latency_chart_data = get_netdata_chart_data(f'fping.{host_replaced}_latency') - quality_chart_data = get_netdata_chart_data(f'fping.{host_replaced}_quality') - ret[host] = { - "latency": latency_chart_data, - "quality": quality_chart_data - } + ret[host] = get_victoria_metrics_ping_data(host) return ret diff --git a/packages/ns-api/files/ns.telegraf b/packages/ns-api/files/ns.telegraf index eb32274bb..9f2a42c46 100755 --- a/packages/ns-api/files/ns.telegraf +++ b/packages/ns-api/files/ns.telegraf @@ -45,7 +45,7 @@ def set_config(config): hosts_str = json.dumps(config['hosts']) fp.write(f' urls = {hosts_str}\n') fp.write(' method = "native"\n') - fp.write(' count = 1\n') + fp.write(' count = 5\n') fp.write(' ping_interval = 1.0\n') fp.write(' deadline = 10\n') fp.write(' [inputs.ping.tags]\n')