-- -- (C) 2013-22 - ntop.org -- local dirs = ntop.getDirs() package.path = dirs.installdir .. "/scripts/lua/modules/pools/?.lua;" .. package.path require "lua_utils" require "db_utils" require "rrd_paths" local dkjson = require("dkjson") local top_talkers_utils = require "top_talkers_utils" local os_utils = require "os_utils" local graph_common = require "graph_common" local have_nedge = ntop.isnEdge() local ts_utils = require("ts_utils") local iface_behavior_update_freq = 300 --Seconds -- ######################################################## local graph_utils = {} -- ######################################################## if(ntop.isPro()) then -- if the version is pro, we include nv_graph_utils as part of this module package.path = dirs.installdir .. "/pro/scripts/lua/modules/?.lua;" .. package.path graph_utils = require "nv_graph_utils" end -- ######################################################## graph_utils.graph_colors = { '#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf', -- https://github.com/mbostock/d3/wiki/Ordinal-Scales '#ff7f0e', '#ffbb78', '#1f77b4', '#aec7e8', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5' } -- ######################################################## function graph_utils.get_html_color(index) return graph_utils.graph_colors[(index % #graph_utils.graph_colors) + 1] end -- ######################################################## -- @brief Ensure that the provided series have the same number of points. This is a -- requirement for the charts. -- @param series a list of series to fix. The format of each serie is the one -- returned by ts_utils.query -- @note the series are modified in place function graph_utils.normalizeSeriesPoints(series) -- local format_utils = require "format_utils" -- for idx, data in ipairs(series) do -- for _, s in ipairs(data.series) do -- if not s.tags.protocol then -- tprint({step = data.step, num = #s.data, start = format_utils.formatEpoch(data.start), count = s.count, label = s.label}) -- end -- end -- end local max_count = 0 local min_step = math.huge local ts_common = require("ts_common") for _, serie in pairs(series) do max_count = math.max(max_count, #serie.series[1].data) min_step = math.min(min_step, serie.step) end if max_count > 0 then for _, serie in pairs(series) do local count = #serie.series[1].data if count ~= max_count then serie.count = max_count for _, serie_data in pairs(serie.series) do -- The way this function perform the upsampling is partial. -- Only points are upsampled, times are not adjusted. -- In addition, the max_count is fixed and this causes series -- with different lengths to be upsampled differently. -- For example a 240-points timeseries with lenght 1-day -- and a 10 points timeseris with length 1-hour would result -- the the 1-hour timeseries being divided into 240 points, actually -- ending up in having a much smaller step. -- TODO: adjust timeseries times. -- TODO: handle series with different start and end times. serie_data.data = ts_common.upsampleSerie(serie_data.data, max_count) -- The new step needs to be adjusted as well. The new step is smaller -- than the new step. To calculate it, multiply the old step by the fraction -- of old vs new points. local new_step = round(serie.step * count / max_count, 0) serie.step = new_step serie_data.step = new_step serie_data.count = max_count end end end end end -- ######################################################## function graph_utils.getProtoVolume(ifName, start_time, end_time, ts_options) ifId = getInterfaceId(ifName) local series = ts_utils.listSeries("iface:ndpi", {ifid = ifId}, start_time) ret = { } for _, tags in ipairs(series or {}) do -- NOTE: this could be optimized via a dedicated driver call local data = ts_utils.query("iface:ndpi", tags, start_time, end_time, ts_options) if(data ~= nil) and (data.statistics.total > 0) then ret[tags.protocol] = data.statistics.total end end return(ret) end -- ######################################################## function graph_utils.breakdownBar(sent, sentLabel, rcvd, rcvdLabel, thresholdLow, thresholdHigh) if((sent+rcvd) > 0) then sent2rcvd = round((sent * 100) / (sent+rcvd), 0) -- io.write("****>> "..sent.."/"..rcvd.."/"..sent2rcvd.."\n") if((thresholdLow == nil) or (thresholdLow < 0)) then thresholdLow = 0 end if((thresholdHigh == nil) or (thresholdHigh > 100)) then thresholdHigh = 100 end if(sent2rcvd < thresholdLow) then sentLabel = ' '..sentLabel elseif(sent2rcvd > thresholdHigh) then rcvdLabel = ' '..rcvdLabel end print('
'..sentLabel) print('
' .. rcvdLabel .. '
') else print(' ') end end -- ######################################################## function graph_utils.percentageBar(total, value, valueLabel) -- io.write("****>> "..total.."/"..value.."\n") if((total ~= nil) and (total > 0)) then pctg = round((value * 100) / total, 0) print('
'..valueLabel) print('
') else print(' ') end end -- ######################################################## function graph_utils.makeProgressBar(percentage) -- nan check if percentage ~= percentage then return "" end local perc_int = round(percentage) return '
'.. round(percentage, 1) ..' %' end -- ######################################################## --! @brief Prints stacked progress bars with a legend --! @total the raw total value (associated to full bar width) --! @param bars a table with elements in the following format: --! - title: the item legend title --! - value: the item raw value --! - class: the bootstrap color class, usually: "default", "info", "danger", "warning", "success" --! @param other_label optional name for the "other" part of the bar. If nil, it will not be shown. --! @param formatter an optional item value formatter --! @param css_class an optional css class to apply to the progress div --! @skip_zero_values don't display values containing only zero --! @return html for the bar function graph_utils.stackedProgressBars(total, bars, other_label, formatter, css_class, skip_zero_values) local res = {} local cumulative = 0 local cumulative_perc = 0 local skip_zero_values = skip_zero_values or false formatter = formatter or (function(x) return x end) -- The bars res[#res + 1] = [[
]] for _, bar in ipairs(bars) do cumulative = cumulative + bar.value end if cumulative > total then total = cumulative end for _, bar in ipairs(bars) do local percentage = round(bar.value * 100 / total, 2) if cumulative_perc + percentage > 100 then percentage = 100 - cumulative_perc end cumulative_perc = cumulative_perc + percentage if bar.class == nil then bar.class = "primary" end if bar.style == nil then bar.style = "" end if bar.link ~= nil then res[#res + 1] = [[]] else res[#res + 1] = [[
]] end if bar.link ~= nil then res[#res + 1] = [[]] end end res[#res + 1] = [[
]] -- The legend res[#res + 1] = [[
]] local legend_items = bars if other_label ~= nil then legend_items = bars legend_items[#legend_items + 1] = { title = other_label, class = "empty", style = "", value = math.max(total - cumulative, 0), } end num = 0 for _, bar in ipairs(legend_items) do if skip_zero_values and bar.value == 0 then goto continue end res[#res + 1] = [[]] if(num > 0) then res[#res + 1] = [[
]] end if bar.link ~= nil then res[#res + 1] = [[]] end res[#res + 1] = [[ ]] if bar.link ~= nil then res[#res + 1] = [[]] end res[#res + 1] = [[ ]] .. bar.title .. " (".. formatter(bar.value) ..")
" num = num + 1 ::continue:: end res[#res + 1] = [[  -  ]] .. i18n("total") .. ": ".. formatter(total) .."" return table.concat(res) end -- ######################################################## local function getMinZoomResolution(schema) local schema_obj = ts_utils.getSchema(schema) if schema_obj then if schema_obj.options.step >= 300 then return '30m' elseif schema_obj.options.step >= 60 then return '5m' end end return '1m' end -- ################################################# function graph_utils.drawGraphs(ifid, schema, tags, zoomLevel, baseurl, selectedEpoch, options, show_graph) local page_utils =require("page_utils") -- Do not require at the top as it could conflict with script_manager.getMenuEntries local debug_rrd = false local is_system_interface = page_utils.is_system_view() options = options or {} if((selectedEpoch == nil) or (selectedEpoch == "")) then -- Refresh the page every minute unless: -- ** a specific epoch has been selected or -- ** the user is browsing historical top talkers and protocols print[[ ]] end local min_zoom = getMinZoomResolution(schema) local min_zoom_k = 1 if(zoomLevel == nil) then zoomLevel = min_zoom end if graph_utils.drawProGraph then _ifstats = interface.getStats() graph_utils.drawProGraph(ifid, schema, tags, zoomLevel, baseurl, options, show_graph) return end nextZoomLevel = zoomLevel; epoch = tonumber(selectedEpoch); for k,v in ipairs(graph_common.zoom_vals) do if graph_common.zoom_vals[k][1] == min_zoom then min_zoom_k = k end if(graph_common.zoom_vals[k][1] == zoomLevel) then if(k > 1) then nextZoomLevel = graph_common.zoom_vals[math.max(k-1, min_zoom_k)][1] end if(epoch ~= nil) then start_time = epoch - math.floor(graph_common.zoom_vals[k][3] / 2) end_time = epoch + math.floor(graph_common.zoom_vals[k][3] / 2) else end_time = os.time() start_time = end_time - graph_common.zoom_vals[k][3] end end end if options.tskey then -- this can contain a MAC address for local broadcast domain hosts -- table.clone needed to modify some parameters while keeping the original unchanged tags = table.clone(tags) tags.host = options.tskey end local data = ts_utils.query(schema, tags, start_time, end_time) if(data) then print [[
]] local page_params = { ts_schema = schema, zoom = zoomLevel or '', epoch = selectedEpoch or '', tskey = options.tskey, } if(options.timeseries) then print [[ ]] end -- options.timeseries print('Timeframe:
\n') for k,v in ipairs(graph_common.zoom_vals) do -- display 1 minute button only for networks and interface stats -- but exclude applications. Application statistics are gathered -- every 5 minutes if graph_common.zoom_vals[k][1] == '1m' and min_zoom ~= '1m' then goto continue elseif graph_common.zoom_vals[k][1] == '5m' and min_zoom ~= '1m' and min_zoom ~= '5m' then goto continue end local params = table.merge(page_params, {zoom=graph_common.zoom_vals[k][1]}) -- Additional parameters if tags.protocol ~= nil then params["protocol"] = tags.protocol end if tags.category ~= nil then params["category"] = tags.category end local url = getPageUrl(baseurl, params) print('') if(graph_common.zoom_vals[k][1] == zoomLevel) then print([[]]) else print([[]]) end ::continue:: end print [[
]] local format_as_bps = true local format_as_bytes = false local formatter_fctn local label = data.series[1].label -- Attempt at reading the formatter from the options using the schema local formatter if options and options.timeseries then for _, cur_ts in pairs(options.timeseries or {}) do if cur_ts.schema == schema and cur_ts.value_formatter then formatter = cur_ts.value_formatter[1] or cur_ts.value_formatter break end end end if label == "load_percentage" then formatter_fctn = "NtopUtils.ffloat" format_as_bps = false elseif label == "resident_bytes" then formatter_fctn = "NtopUtils.bytesToSize" format_as_bytes = true elseif string.contains(label, "pct") then formatter_fctn = "NtopUtils.fpercent" format_as_bps = false format_as_bytes = false elseif schema == "process:num_alerts" then formatter_fctn = "NtopUtils.falerts" format_as_bps = false format_as_bytes = false elseif label:contains("millis") or label:contains("_ms") then formatter_fctn = "NtopUtils.fmillis" format_as_bytes = false format_as_bps = false elseif string.contains(label, "packets") or string.contains(label, "flows") or label:starts("num_") or label:contains("alerts") then formatter_fctn = "NtopUtils.fint" format_as_bytes = false format_as_bps = false elseif formatter then -- The formatter specified in the options formatter_fctn = formatter format_as_bytes = false format_as_bps = false else formatter_fctn = (is_system_interface and "NtopUtils.fnone" or "NtopUtils.fbits") end print [[ ]] print(' \n') local stats = data.statistics if(stats ~= nil) then local minval_time = stats.min_val_idx and (data.start + data.step * stats.min_val_idx) or 0 local maxval_time = stats.max_val_idx and (data.start + data.step * stats.max_val_idx) or 0 local lastval_time = data.start + data.step * (data.count-1) local lastval = 0 for _, serie in pairs(data.series) do lastval = lastval + (serie.data[data.count] or 0) end if format_as_bytes then if(minval_time > 0) then print(' \n') end if(maxval_time > 0) then print(' \n') end print(' \n') print(' \n') print(' \n') elseif(not format_as_bps) then if(minval_time > 0) then print(' \n') end if(maxval_time > 0) then print(' \n') end print(' \n') print(' \n') print(' \n') elseif is_system_interface then if(minval_time > 0) then print(' \n') end if(maxval_time > 0) then print(' \n') end print(' \n') print(' \n') print(' \n') print(' \n') else if(minval_time > 0) then print(' \n') end if(maxval_time > 0) then print(' \n') end print(' \n') print(' \n') print(' \n') print(' \n') end end print(' \n') -- hide Minute Interface Top Talker if we are in system interface if top_talkers_utils.areTopEnabled(ifid) and not is_system_interface then print(' \n') end print [[
 TimeValue
Min' .. os.date("%x %X", minval_time) .. '' .. bytesToSize((stats.min_val*8) or "") .. '
Max' .. os.date("%x %X", maxval_time) .. '' .. bytesToSize((stats.max_val*8) or "") .. '
Last' .. os.date("%x %X", lastval_time) .. '' .. bytesToSize(lastval*8) .. '
Average' .. bytesToSize(stats.average*8) .. '
95th Percentile' .. bytesToSize(stats["95th_percentile"]*8) .. '
Min' .. os.date("%x %X", minval_time) .. '' .. formatValue(stats.min_val or "") .. '
Max' .. os.date("%x %X", maxval_time) .. '' .. formatValue(stats.max_val or "") .. '
Last' .. os.date("%x %X", lastval_time) .. '' .. formatValue(round(lastval), 1) .. '
Average' .. formatValue(round(stats.average, 2)) .. '
95th Percentile' .. formatValue(round(stats["95th_percentile"], 2)) .. '
Min' .. os.date("%x %X", minval_time) .. '' .. (formatValue(round(stats["min_val"], 2)) or "") .. '
Max' .. os.date("%x %X", maxval_time) .. '' .. (formatValue(round(stats["max_val"], 2)) or "") .. '
Last' .. os.date("%x %X", lastval_time) .. '' .. formatValue(round(lastval, 2)) .. '
Average' ..formatValue(round(stats["average"], 2)).. '
95th Percentile' ..(formatValue(round(stats["95th_percentile"], 2)) or '') .. '
Total Traffic' .. (stats.total or '') .. '
Min' .. os.date("%x %X", minval_time) .. '' .. bitsToSize((stats.min_val*8) or "") .. '
Max' .. os.date("%x %X", maxval_time) .. '' .. bitsToSize((stats.max_val*8) or "") .. '
Last' .. os.date("%x %X", lastval_time) .. '' .. bitsToSize(lastval*8) .. '
Average' .. bitsToSize(stats.average*8) .. '
95th Percentile' .. bitsToSize(stats["95th_percentile"]*8) .. '
Total Traffic' .. bytesToSize(stats.total) .. '
Time
Minute
Interface
Top Talkers
]] print[[
]] print[[
]] local ui_utils = require("ui_utils") print(ui_utils.render_notes(options.notes)) print[[ ]] else print("
No data found
") end -- if(data) end -- ################################################# -- -- proto table should contain the following information: -- string traffic_quota -- string time_quota -- string protoName -- -- ndpi_stats or category_stats can be nil if they are not relevant for the proto -- -- quotas_to_show can contain: -- bool traffic -- bool time -- function graph_utils.printProtocolQuota(proto, ndpi_stats, category_stats, quotas_to_show, show_td, hide_limit) local total_bytes = 0 local total_duration = 0 local output = {} if ndpi_stats ~= nil then -- This is a single protocol local proto_stats = ndpi_stats[proto.protoName] if proto_stats ~= nil then total_bytes = proto_stats["bytes.sent"] + proto_stats["bytes.rcvd"] total_duration = proto_stats["duration"] end else -- This is a category local cat_stats = category_stats[proto.protoName] if cat_stats ~= nil then total_bytes = cat_stats["bytes"] total_duration = cat_stats["duration"] end end if quotas_to_show.traffic then local bytes_exceeded = ((proto.traffic_quota ~= "0") and (total_bytes >= tonumber(proto.traffic_quota))) local lb_bytes = bytesToSize(total_bytes) local lb_bytes_quota = ternary(proto.traffic_quota ~= "0", bytesToSize(tonumber(proto.traffic_quota)), i18n("unlimited")) local traffic_taken = ternary(proto.traffic_quota ~= "0", math.min(total_bytes, tonumber(proto.traffic_quota)), 0) local traffic_remaining = math.max(tonumber(proto.traffic_quota) - traffic_taken, 0) local traffic_quota_ratio = round(traffic_taken * 100 / (traffic_taken + traffic_remaining), 0) or 0 if not traffic_quota_ratio then traffic_quota_ratio = 0 end if show_td then output[#output + 1] = [["..lb_bytes..ternary(hide_limit, "", " / "..lb_bytes_quota).."" end output[#output + 1] = [[
'.. ternary(traffic_quota_ratio == traffic_quota_ratio --[[nan check]], traffic_quota_ratio, 0)..[[%
]] if show_td then output[#output + 1] = ("") end end if quotas_to_show.time then local time_exceeded = ((proto.time_quota ~= "0") and (total_duration >= tonumber(proto.time_quota))) local lb_duration = secondsToTime(total_duration) local lb_duration_quota = ternary(proto.time_quota ~= "0", secondsToTime(tonumber(proto.time_quota)), i18n("unlimited")) local duration_taken = ternary(proto.time_quota ~= "0", math.min(total_duration, tonumber(proto.time_quota)), 0) local duration_remaining = math.max(proto.time_quota - duration_taken, 0) local duration_quota_ratio = round(duration_taken * 100 / (duration_taken+duration_remaining), 0) or 0 if show_td then output[#output + 1] = [["..lb_duration..ternary(hide_limit, "", " / "..lb_duration_quota).."" end output[#output + 1] = ([[
'.. ternary(duration_quota_ratio == duration_quota_ratio --[[nan check]], duration_quota_ratio, 0)..[[%
]]) if show_td then output[#output + 1] = ("") end end return table.concat(output, '') end -- ################################################# function graph_utils.poolDropdown(ifId, pool_id, exclude) local host_pools = require "host_pools" local host_pools_instance = host_pools:create() pool_id = tostring(pool_id) local output = {} exclude = exclude or {} for _,pool in ipairs(host_pools_instance:get_all_pools()) do pool.pool_id = tostring(pool.pool_id) if (not exclude[pool.pool_id]) or (pool.pool_id == pool_id) then output[#output + 1] = '' end end return table.concat(output, '') end -- ################################################# function graph_utils.printPoolChangeDropdown(ifId, pool_id, have_nedge) local output = {} output[#output + 1] = [[ ]] .. i18n(ternary(have_nedge, "nedge.user", "host_config.host_pool")) .. [[ ]] print(table.concat(output, '')) end -- ################################################# function graph_utils.printCategoryDropdownButton(by_id, cat_id_or_name, base_url, page_params, count_callback, skip_unknown) local function count_all(cat_id, cat_name) local cat_protos = interface.getnDPIProtocols(tonumber(cat_id)) return table.len(cat_protos) end cat_id_or_name = cat_id_or_name or "" count_callback = count_callback or count_all -- 'Category' button print('\'
\', ') page_params["category"] = cat_id_or_name end -- ################################################# function graph_utils.getDeviceCommonTimeseries() return { {schema="mac:arp_rqst_sent_rcvd_rpls", label=i18n("graphs.arp_rqst_sent_rcvd_rpls")}, } end -- ################################################# local default_timeseries = { {schema="iface:flows", label=i18n("graphs.active_flows")}, {schema="iface:new_flows", label=i18n("graphs.new_flows"), value_formatter = {"NtopUtils.fflows", "NtopUtils.formatFlows"}}, {schema="iface:alerted_flows", label=i18n("graphs.total_alerted_flows")}, {schema="iface:hosts", label=i18n("graphs.active_hosts")}, {schema="iface:engaged_alerts", label=i18n("show_alerts.engaged_alerts"), metrics_labels = { i18n("show_alerts.engaged_alerts") }, skip=hasAllowedNetworksSet()}, {schema="iface:dropped_alerts", label=i18n("show_alerts.dropped_alerts"), metrics_labels = { i18n("show_alerts.dropped_alerts") }, skip=hasAllowedNetworksSet()}, {schema="custom:flows_vs_local_hosts", label=i18n("graphs.flows_vs_local_hosts"), check={"iface:flows", "iface:local_hosts"}, step=60}, {schema="custom:flows_vs_traffic", label=i18n("graphs.flows_vs_traffic"), check={"iface:flows", "iface:traffic"}, step=60}, {schema="custom:memory_vs_flows_hosts", label=i18n("graphs.memory_vs_hosts_flows"), check={"process:resident_memory", "iface:flows", "iface:hosts"}}, {schema="iface:devices", label=i18n("graphs.active_devices")}, {schema="iface:http_hosts", label=i18n("graphs.active_http_servers"), nedge_exclude=1}, {schema="iface:traffic", label=i18n("traffic")}, {schema="iface:score", label=i18n("score"), metrics_labels = { i18n("graphs.cli_score"), i18n("graphs.srv_score")}}, {schema="custom:score_vs_flows_hosts", label=i18n("graphs.score_vs_hosts_flows"), check={"iface:score", "iface:flows", "iface:hosts"}, metrics_labels = { i18n("graphs.cli_score"), i18n("graphs.srv_score")}}, {schema="iface:traffic_rxtx", label=i18n("graphs.traffic_rxtx"), split_directions = true, layout={ ["bytes_sent"] = "area", ["bytes_rcvd"] = "line" }, value_formatter = {"NtopUtils.fbits_from_bytes", "NtopUtils.bytesToSize"} }, {schema="iface:packets_vs_drops", label=i18n("graphs.packets_vs_drops")}, {schema="iface:nfq_pct", label=i18n("graphs.num_nfq_pct"), nedge_only=1}, {schema="iface:hosts_anomalies", label=i18n("graphs.hosts_anomalies"), layout={ ["num_local_hosts_anomalies"] = "area", ["num_remote_hosts_anomalies"] = "area" }, metrics_labels = { i18n("graphs.loc_host_anomalies"), i18n("graphs.rem_host_anomalies")} }, {schema="iface:disc_prob_bytes", label=i18n("graphs.discarded_probing_bytes"), nedge_exclude=1}, {schema="iface:disc_prob_pkts", label=i18n("graphs.discarded_probing_packets"), nedge_exclude=1}, {schema="iface:dumped_flows", label=i18n("graphs.dumped_flows"), metrics_labels = {i18n("graphs.dumped_flows"), i18n("graphs.dropped_flows")} }, {schema="iface:zmq_recv_flows", label=i18n("graphs.zmq_received_flows"), nedge_exclude=1}, {schema="custom:zmq_msg_rcvd_vs_drops",label=i18n("graphs.zmq_msg_rcvd_vs_drops"), check={"iface:zmq_rcvd_msgs", "iface:zmq_msg_drops"}, metrics_labels = {i18n("if_stats_overview.zmq_message_rcvd"), i18n("if_stats_overview.zmq_message_drops")}, value_formatter = {"NtopUtils.fmsgs", "NtopUtils.formatMessages"}}, {schema="iface:zmq_flow_coll_drops", label=i18n("graphs.zmq_flow_coll_drops"), nedge_exclude=1, value_formatter = {"NtopUtils.fflows", "NtopUtils.formatFlows"}}, {schema="iface:zmq_flow_coll_udp_drops", label=i18n("graphs.zmq_flow_coll_udp_drops"), nedge_exclude=1, value_formatter = {"NtopUtils.fpackets", "NtopUtils.formatPackets"}}, {separator=1, nedge_exclude=1, label=i18n("tcp_stats")}, {schema="iface:tcp_lost", label=i18n("graphs.tcp_packets_lost"), nedge_exclude=1}, {schema="iface:tcp_out_of_order", label=i18n("graphs.tcp_packets_ooo"), nedge_exclude=1}, --{schema="tcp_retr_ooo_lost", label=i18n("graphs.tcp_retr_ooo_lost"), nedge_exclude=1}, {schema="iface:tcp_retransmissions", label=i18n("graphs.tcp_packets_retr"), nedge_exclude=1}, {schema="iface:tcp_keep_alive", label=i18n("graphs.tcp_packets_keep_alive"), nedge_exclude=1}, {separator=1, label=i18n("tcp_flags")}, {schema="iface:tcp_syn", label=i18n("graphs.tcp_syn_packets"), nedge_exclude=1, pro_skip=1}, {schema="iface:tcp_synack", label=i18n("graphs.tcp_synack_packets"), nedge_exclude=1, pro_skip=1}, {schema="custom:iface_tcp_syn_vs_tcp_synack", label=i18n("graphs.tcp_syn_vs_tcp_synack"), nedge_exclude=1, metrics_labels = {"SYN", "SYN+ACK"}}, {schema="iface:tcp_finack", label=i18n("graphs.tcp_finack_packets"), nedge_exclude=1}, {schema="iface:tcp_rst", label=i18n("graphs.tcp_rst_packets"), nedge_exclude=1}, } if ntop.isPro() then local pro_timeseries = { {schema="iface:score_anomalies", label=i18n("graphs.iface_score_anomalies")}, {schema="iface:score_behavior", label=i18n("graphs.iface_score_behavior"), split_directions = true --[[ split RX and TX directions ]], first_timeseries_only = true, metrics_labels = {i18n("graphs.score"), i18n("graphs.lower_bound"), i18n("graphs.upper_bound")}}, {schema="iface:traffic_anomalies", label=i18n("graphs.iface_traffic_anomalies")}, {schema="iface:traffic_rx_behavior_v2", label=i18n("graphs.iface_traffic_rx_behavior"), split_directions = true --[[ split RX and TX directions ]], first_timeseries_only = true, time_elapsed = iface_behavior_update_freq, value_formatter = {"NtopUtils.fbits_from_bytes", "NtopUtils.bytesToSize"}, metrics_labels = {i18n("graphs.traffic_rcvd"), i18n("graphs.lower_bound"), i18n("graphs.upper_bound")}}, {schema="iface:traffic_tx_behavior_v2", label=i18n("graphs.iface_traffic_tx_behavior"), split_directions = true --[[ split RX and TX directions ]], first_timeseries_only = true, time_elapsed = iface_behavior_update_freq, value_formatter = {"NtopUtils.fbits_from_bytes", "NtopUtils.bytesToSize"}, metrics_labels = {i18n("graphs.traffic_sent"), i18n("graphs.lower_bound"), i18n("graphs.upper_bound")}}, } default_timeseries = table.merge(pro_timeseries, default_timeseries) end -- ################################################# function graph_utils.get_default_timeseries() return(default_timeseries) end -- ################################################# function graph_utils.get_timeseries_layout(schema) local ret = {"area"} -- default for k,v in pairs(default_timeseries) do if (v.schema == schema) then if (v.layout) then ret = v.layout end break end end return (ret) end -- ################################################# return graph_utils