Reduces redis traffic for alerts checks

This commit is contained in:
Simone Mainardi 2017-05-03 13:23:28 +02:00
parent 2c53dda60b
commit 9c0f7499e7
4 changed files with 53 additions and 49 deletions

View file

@ -145,8 +145,9 @@ function get_global_alerts_hash_key(alert_source)
end
end
function get_housekeeping_set_name(ifId)
return "ntopng.alerts.ifid_"..ifId..".make_room"
function get_make_room_keys(ifId)
return {flows="ntopng.prefs.alerts.ifid_"..ifId..".make_room_flow_alerts",
entities="ntopng.prefs.alerts.ifid_"..ifId..".make_room_closed_alerts"}
end
function ndpival_bytes(json, protoname)
@ -1109,41 +1110,39 @@ function housekeepingAlertsMakeRoom()
local ifId = getInterfaceId(n)
local k = get_housekeeping_set_name(ifId)
local k = get_make_room_keys(ifId)
local members = ntop.getMembersCache(k)
for _, m in pairs(members) do
if m == "closed_alerts" then
local res = interface.queryAlertsRaw(false,
"SELECT alert_entity, alert_entity_val, count(*) count",
"GROUP BY alert_entity, alert_entity_val HAVING COUNT >= "..max_num_alerts_per_entity)
if ntop.getCache(k["entities"]) == "1" then
ntop.delCache(k["entities"])
local res = interface.queryAlertsRaw(false,
"SELECT alert_entity, alert_entity_val, count(*) count",
"GROUP BY alert_entity, alert_entity_val HAVING COUNT >= "..max_num_alerts_per_entity)
for _, e in pairs(res) do
local to_delete = e.count - (max_num_alerts_per_entity * 0.8) -- deletes 20% more alerts than the maximum number
to_delete = round(to_delete, 0)
--tprint({e=e, total=e.count, to_delete=to_delete, to_delete_not_discounted=(e.count - max_num_alerts_per_entity)})
local cleanup = interface.queryAlertsRaw(false,
"DELETE",
"WHERE alert_entity="..e.alert_entity.." AND alert_entity_val=\""..e.alert_entity_val.."\""..
"ORDER BY alert_tstamp ASC LIMIT "..to_delete)
-- TODO: possibly raise a too many alerts for entity e
end
elseif m == "flows_alerts" then
local res = interface.queryFlowAlertsRaw("SELECT count(*) count", "WHERE 1=1")
local count = tonumber(res[1].count)
if count ~= nil and count >= max_num_flow_alerts then
local to_delete = count - (max_num_flow_alerts * 0.8)
to_delete = round(to_delete, 0)
local cleanup = interface.queryFlowAlertsRaw("DELETE",
"ORDER BY alert_tstamp ASC LIMIT "..to_delete)
--tprint({total=count, to_delete=to_delete, cleanup=cleanup})
--tprint(cleanup)
-- TODO: possibly raise a too many flow alerts
end
for _, e in pairs(res) do
local to_delete = e.count - (max_num_alerts_per_entity * 0.8) -- deletes 20% more alerts than the maximum number
to_delete = round(to_delete, 0)
--tprint({e=e, total=e.count, to_delete=to_delete, to_delete_not_discounted=(e.count - max_num_alerts_per_entity)})
local cleanup = interface.queryAlertsRaw(false,
"DELETE",
"WHERE alert_entity="..e.alert_entity.." AND alert_entity_val=\""..e.alert_entity_val.."\""..
"ORDER BY alert_tstamp ASC LIMIT "..to_delete)
-- TODO: possibly raise a too many alerts for entity e
end
elseif ntop.getCache(k["flows"]) == "1" then
ntop.delCache(k["flows"])
local res = interface.queryFlowAlertsRaw("SELECT count(*) count", "WHERE 1=1")
local count = tonumber(res[1].count)
if count ~= nil and count >= max_num_flow_alerts then
local to_delete = count - (max_num_flow_alerts * 0.8)
to_delete = round(to_delete, 0)
local cleanup = interface.queryFlowAlertsRaw("DELETE",
"ORDER BY alert_tstamp ASC LIMIT "..to_delete)
--tprint({total=count, to_delete=to_delete, cleanup=cleanup})
--tprint(cleanup)
-- TODO: possibly raise a too many flow alerts
end
end
ntop.delCache(k)
end
end