Merge pull request #322 from ChrispyBacon-dev/unstable
Some checks failed
Docker Image Build and Push / build_self_hosted (push) Has been cancelled
Docker Image Build and Push / build_github_hosted_fallback (push) Has been cancelled

v3.0.6
This commit is contained in:
Chris 2026-02-15 11:02:27 +01:00 committed by GitHub
commit 574fb4c5bf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 145 additions and 150 deletions

3
.gitignore vendored
View file

@ -88,4 +88,5 @@ data/state.json
*.bak
*.swp
*.log
logs/
logs/
DockFlare-Agent-prd/

View file

@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
---
## [v3.0.6] - 2026-02-15
### Fixed
- **Docker Event Listener Efficiency:** Significantly reduced log spam and improved resource utilization by implementing filtered Docker event listeners. DockFlare now only processes container events (start/stop) for containers explicitly opted-in via `dockflare.enable` or the legacy `cloudflare.tunnel.enable` labels, preventing unnecessary inspection of unmanaged containers. Raised by issue #296
### Changed
- **Access Policy Label Rename:** Renamed the Access Policy label "None (Public - No App)" to "No Policy Assigned" in the Dashboard. This change accurately reflects that while no specific policy is assigned to the rule, the service might still be protected by a broader Zone Policy, removing the misleading "Public" designation.
---
## [v3.0.5] - 2025-10-14
### Added

View file

@ -11,7 +11,10 @@
</p>
<p align="center">
<a href="https://github.com/ChrispyBacon-dev/DockFlare/releases"><img src="https://img.shields.io/badge/Release-v3.0.5-blue.svg?style=for-the-badge" alt="Release"></a>
<a href="https://github.com/ChrispyBacon-dev/DockFlare/stargazers">
<img src="https://img.shields.io/github/stars/ChrispyBacon-dev/DockFlare?style=for-the-badge" alt="Stars">
</a>
<a href="https://github.com/ChrispyBacon-dev/DockFlare/releases"><img src="https://img.shields.io/badge/Release-v3.0.6-blue.svg?style=for-the-badge" alt="Release"></a>
<a href="https://hub.docker.com/r/alplat/dockflare"><img src="https://img.shields.io/docker/pulls/alplat/dockflare?style=for-the-badge" alt="Docker Pulls"></a>
<a href="https://www.python.org/"><img src="https://img.shields.io/badge/Made%20with-Python-1f425f.svg?style=for-the-badge" alt="Python"></a>
<a href="https://github.com/ChrispyBacon-dev/DockFlare/blob/main/LICENSE.MD"><img src="https://img.shields.io/badge/License-GPL--3.0-blue.svg?style=for-the-badge" alt="License"></a>

View file

@ -34,7 +34,7 @@ def _get_int_env(name, default, minimum=None):
return default
# --- DockFlare Version ---
APP_VERSION = "v3.0.5"
APP_VERSION = "v3.0.6"
# --- web: https://dockflare.app ---
# --- github: https://github.com/ChrispyBacon-dev/DockFlare ---

View file

@ -20,6 +20,7 @@ import time
import requests
import copy
import re
import threading
from docker.errors import NotFound, APIError
from flask import current_app
@ -525,102 +526,99 @@ def schedule_container_stop(container_id_val):
save_state()
publish_state_event('snapshot_refresh')
def docker_event_listener(stop_event_param):
def docker_event_listener(stop_event_param, label_prefix):
if not docker_client:
logging.error("Docker client unavailable, event listener cannot start.")
logging.error(f"Docker client unavailable, event listener for {label_prefix} cannot start.")
return
logging.info("Starting Docker event listener...")
logging.info(f"Starting Docker event listener for label prefix: {label_prefix}")
error_count = 0
max_errors = 5
if stop_event_param is None:
logging.error("docker_event_listener called with None stop_event_param. Listener will not run correctly.")
return
logging.error(f"docker_event_listener for {label_prefix} called with None stop_event_param. Listener will not run correctly.")
return
event_filters = {
"type": "container",
"label": f"{label_prefix}enable"
}
while not stop_event_param.is_set() and error_count < max_errors:
try:
logging.info("Connecting to Docker event stream...")
events = docker_client.events(decode=True, since=int(time.time()))
logging.info("Successfully connected to Docker event stream.")
error_count = 0
logging.info(f"Connecting to Docker event stream for {label_prefix}...")
events = docker_client.events(decode=True, since=int(time.time()), filters=event_filters)
logging.info(f"Successfully connected to Docker event stream for {label_prefix}.")
error_count = 0
for event in events:
if stop_event_param.is_set():
logging.info("Stop event received in listener, exiting loop.")
logging.info(f"Stop event received in listener for {label_prefix}, exiting loop.")
break
ev_type = event.get("Type")
action = event.get("Action")
actor = event.get("Actor", {})
cont_id = actor.get("ID")
logging.debug(f"Docker Event: Type={ev_type}, Action={action}, ID={cont_id[:12] if cont_id else 'N/A'}")
if ev_type == "container" and cont_id:
logging.debug(f"Docker Event ({label_prefix}): Action={action}, ID={cont_id[:12] if cont_id else 'N/A'}")
if cont_id:
if action == "start":
container_instance = None
for attempt in range(3):
try:
container_instance = docker_client.containers.get(cont_id)
container_instance.reload() # Reload to get fresh labels
if get_label(container_instance.labels, "enable"):
logging.debug(f"Container {cont_id[:12]} details retrieved on attempt {attempt+1}.")
break
else:
logging.debug(f"Container {cont_id[:12]} found but key 'enable' label missing, retrying ({attempt+1}/3)...")
except NotFound:
logging.debug(f"Container {cont_id[:12]} not found on attempt {attempt+1}, retrying...")
except APIError as e_get_cont:
logging.error(f"Docker API error getting container {cont_id[:12]} on attempt {attempt+1}: {e_get_cont}")
break
except requests.exceptions.ConnectionError as e_conn_cont:
logging.error(f"Docker connection error getting container {cont_id[:12]}: {e_conn_cont}")
raise
except Exception as e_unexp_cont:
logging.error(f"Unexpected error getting container {cont_id[:12]} details: {e_unexp_cont}", exc_info=True)
break
if attempt < 2:
time.sleep(0.2 * (attempt + 1))
else:
logging.warning(f"Failed to get container {cont_id[:12]} details or key 'enable' label after multiple attempts.")
if container_instance:
try:
process_container_start(container_instance)
except Exception as e_proc_start:
logging.error(f"Error processing start event for {cont_id[:12]}: {e_proc_start}", exc_info=True)
try:
container_instance = docker_client.containers.get(cont_id)
process_container_start(container_instance)
except NotFound:
logging.warning(f"Container {cont_id[:12]} not found despite 'start' event for {label_prefix}.")
except APIError as e_get_cont:
logging.error(f"Docker API error getting container {cont_id[:12]} for {label_prefix}: {e_get_cont}")
except Exception as e_proc_start:
logging.error(f"Error processing start event for {cont_id[:12]} from {label_prefix}: {e_proc_start}", exc_info=True)
elif action in ["stop", "die", "destroy", "kill"]:
try:
schedule_container_stop(cont_id)
except Exception as e_proc_stop:
logging.error(f"Error processing stop/die/destroy/kill event for {cont_id[:12]}: {e_proc_stop}", exc_info=True)
except requests.exceptions.ConnectionError as e_conn_stream:
except Exception as e_proc_stop:
logging.error(f"Error processing stop/die/destroy/kill event for {cont_id[:12]} from {label_prefix}: {e_proc_stop}", exc_info=True)
except requests.exceptions.ConnectionError as e_conn_stream:
error_count += 1
logging.error(f"Docker listener connection error: {e_conn_stream}. Reconnecting ({error_count}/{max_errors})...")
logging.error(f"Docker listener ({label_prefix}) connection error: {e_conn_stream}. Reconnecting ({error_count}/{max_errors})...")
if not stop_event_param.is_set():
stop_event_param.wait(min(30, 2 * error_count))
except APIError as e_api_stream:
error_count += 1
logging.error(f"Docker listener API error: {e_api_stream}. Reconnecting ({error_count}/{max_errors})...")
logging.error(f"Docker listener ({label_prefix}) API error: {e_api_stream}. Reconnecting ({error_count}/{max_errors})...")
if not stop_event_param.is_set():
stop_event_param.wait(min(30, 2 * error_count))
except Exception as e_unexp_stream:
error_count += 1
logging.error(f"Unexpected error in Docker event listener: {e_unexp_stream}. Reconnecting ({error_count}/{max_errors})...", exc_info=True)
logging.error(f"Unexpected error in Docker event listener ({label_prefix}): {e_unexp_stream}. Reconnecting ({error_count}/{max_errors})...", exc_info=True)
if not stop_event_param.is_set():
stop_event_param.wait(min(30, 2 * error_count))
if stop_event_param.is_set():
break
break
if error_count >= max_errors:
logging.error("Docker event listener stopping after multiple consecutive errors.")
logging.info("Docker event listener stopped.")
logging.error(f"Docker event listener for {label_prefix} stopping after multiple consecutive errors.")
logging.info(f"Docker event listener for {label_prefix} stopped.")
def start_event_listeners(stop_event):
threads = []
label_prefixes = list(set(filter(None, [
config.PRIMARY_LABEL_PREFIX,
config.LEGACY_LABEL_PREFIX,
config.CUSTOM_LABEL_PREFIX
])))
for prefix in label_prefixes:
thread_name = f"DockerEventListener-{prefix.strip('.')}"
thread = threading.Thread(target=docker_event_listener, args=(stop_event, prefix), name=thread_name, daemon=True)
threads.append(thread)
logging.info(f"Created event listener thread for prefix: {prefix}")
return threads
def _detect_zone_for_hostname(hostname):
if not hostname:
return None, None

View file

@ -465,91 +465,74 @@ def ensure_authenticated_default_policy(flask_app=None):
logging.error(f"Error verifying/updating default authenticated policy in Cloudflare: {e}")
def save_state():
global managed_rules, access_groups
current_thread_name = threading.current_thread().name
with state_lock:
logging.info(f"SAVE_STATE: Start (RLock acquired). THREAD: {current_thread_name}. Items to save: {len(managed_rules)} rules, {len(access_groups)} access groups.")
serializable_rules = {}
rules_to_iterate = list(managed_rules.items())
groups_to_iterate = dict(access_groups)
agents_to_iterate = dict(agents)
idps_to_iterate = dict(identity_providers)
if not rules_to_iterate and not groups_to_iterate and not agents_to_iterate and not idps_to_iterate:
logging.info(f"SAVE_STATE: THREAD: {current_thread_name}. State is empty. Proceeding to write empty state file.")
else:
logging.info(
"SAVE_STATE: THREAD: %s. Serializing %s rules, %s groups, %s agents and %s identity providers.",
current_thread_name,
len(rules_to_iterate),
len(groups_to_iterate),
len(agents_to_iterate),
len(idps_to_iterate)
)
current_thread_name = threading.current_thread().name
logging.info(f"SAVE_STATE: Start. THREAD: {current_thread_name}. Items to save: {len(managed_rules)} rules, {len(access_groups)} access groups.")
for rule_key, rule in rules_to_iterate:
logging.debug(f"SAVE_STATE_LOOP: THREAD: {current_thread_name}. Preparing rule for key: {rule_key}")
try:
data_to_serialize = {
"hostname": rule.get("hostname"),
"path": rule.get("path"),
"service": rule.get("service"),
"container_id": rule.get("container_id"),
"status": rule.get("status"),
"delete_at": None,
"zone_id": rule.get("zone_id"),
"no_tls_verify": rule.get("no_tls_verify", False),
"origin_server_name": rule.get("origin_server_name"),
"http_host_header": rule.get("http_host_header"),
"http2_origin": rule.get("http2_origin", False),
"disable_chunked_encoding": rule.get("disable_chunked_encoding", False),
"access_app_id": rule.get("access_app_id"),
"access_policy_type": rule.get("access_policy_type"),
"access_app_config_hash": rule.get("access_app_config_hash"),
"access_policy_ui_override": rule.get("access_policy_ui_override", False),
"rule_ui_override": rule.get("rule_ui_override", False),
"source": rule.get("source", "docker"),
"access_group_id": rule.get("access_group_id"),
"tunnel_id": rule.get("tunnel_id"),
"tunnel_name": rule.get("tunnel_name"),
"zone_name": rule.get("zone_name")
}
delete_at_val = rule.get("delete_at")
if isinstance(delete_at_val, datetime):
logging.debug(f"SAVE_STATE_LOOP: THREAD: {current_thread_name}. Serializing datetime for {rule_key} (value: {delete_at_val}).")
data_to_serialize["delete_at"] = delete_at_val.astimezone(timezone.utc).isoformat().replace('+00:00', 'Z')
serializable_rules[rule_key] = data_to_serialize
except Exception as e_serialize_item:
logging.error(f"SAVE_STATE_LOOP_ERROR: THREAD: {current_thread_name}. Error preparing rule for serialization '{rule_key}': {e_serialize_item}. Rule data: {rule}", exc_info=True)
continue
final_state_to_save = {
"managed_rules": serializable_rules,
"access_groups": groups_to_iterate,
"agents": agents_to_iterate,
"identity_providers": idps_to_iterate
}
serializable_rules = {}
rules_to_iterate = list(managed_rules.items())
groups_to_iterate = dict(access_groups)
agents_to_iterate = dict(agents)
idps_to_iterate = dict(identity_providers)
logging.info(f"SAVE_STATE: THREAD: {current_thread_name}. Prepared final state with {len(serializable_rules)} rules, {len(groups_to_iterate)} groups and {len(idps_to_iterate)} identity providers.")
try:
state_dir = os.path.dirname(config.STATE_FILE_PATH)
if not os.path.exists(state_dir):
for rule_key, rule in rules_to_iterate:
try:
os.makedirs(state_dir, exist_ok=True)
except OSError as e_mkdir:
logging.error(f"SAVE_STATE: THREAD: {current_thread_name}. Mkdir error {e_mkdir}. Save failed.")
return
temp_file_path = config.STATE_FILE_PATH + ".tmp"
with open(temp_file_path, 'w') as f:
json.dump(final_state_to_save, f, indent=2)
os.replace(temp_file_path, config.STATE_FILE_PATH)
logging.info(f"SAVE_STATE: THREAD: {current_thread_name}. Successfully saved state for {len(serializable_rules)} rules and {len(groups_to_iterate)} groups to {config.STATE_FILE_PATH}")
except Exception as e_save_io:
logging.error(f"SAVE_STATE: THREAD: {current_thread_name}. File I/O or other error: {e_save_io}", exc_info=True)
logging.info(f"SAVE_STATE: End. THREAD: {current_thread_name}.")
data_to_serialize = {
"hostname": rule.get("hostname"),
"path": rule.get("path"),
"service": rule.get("service"),
"container_id": rule.get("container_id"),
"status": rule.get("status"),
"delete_at": None,
"zone_id": rule.get("zone_id"),
"no_tls_verify": rule.get("no_tls_verify", False),
"origin_server_name": rule.get("origin_server_name"),
"http_host_header": rule.get("http_host_header"),
"http2_origin": rule.get("http2_origin", False),
"disable_chunked_encoding": rule.get("disable_chunked_encoding", False),
"access_app_id": rule.get("access_app_id"),
"access_policy_type": rule.get("access_policy_type"),
"access_app_config_hash": rule.get("access_app_config_hash"),
"access_policy_ui_override": rule.get("access_policy_ui_override", False),
"rule_ui_override": rule.get("rule_ui_override", False),
"source": rule.get("source", "docker"),
"access_group_id": rule.get("access_group_id"),
"tunnel_id": rule.get("tunnel_id"),
"tunnel_name": rule.get("tunnel_name"),
"zone_name": rule.get("zone_name")
}
delete_at_val = rule.get("delete_at")
if isinstance(delete_at_val, datetime):
data_to_serialize["delete_at"] = delete_at_val.astimezone(timezone.utc).isoformat().replace('+00:00', 'Z')
serializable_rules[rule_key] = data_to_serialize
except Exception as e_serialize_item:
logging.error(f"SAVE_STATE_LOOP_ERROR: THREAD: {current_thread_name}. Error preparing rule for serialization '{rule_key}': {e_serialize_item}. Rule data: {rule}", exc_info=True)
continue
final_state_to_save = {
"managed_rules": serializable_rules,
"access_groups": groups_to_iterate,
"agents": agents_to_iterate,
"identity_providers": idps_to_iterate
}
try:
state_dir = os.path.dirname(config.STATE_FILE_PATH)
if not os.path.exists(state_dir):
try:
os.makedirs(state_dir, exist_ok=True)
except OSError as e_mkdir:
logging.error(f"SAVE_STATE: THREAD: {current_thread_name}. Mkdir error {e_mkdir}. Save failed.")
return
temp_file_path = config.STATE_FILE_PATH + ".tmp"
with open(temp_file_path, 'w') as f:
json.dump(final_state_to_save, f, indent=2)
os.replace(temp_file_path, config.STATE_FILE_PATH)
logging.info(f"SAVE_STATE: THREAD: {current_thread_name}. Successfully saved state to {config.STATE_FILE_PATH}")
except Exception as e_save_io:
logging.error(f"SAVE_STATE: THREAD: {current_thread_name}. File I/O or other error: {e_save_io}", exc_info=True)
logging.info(f"SAVE_STATE: End. THREAD: {current_thread_name}.")
def add_agent(agent_id, agent_data):
"""

View file

@ -33,7 +33,7 @@ from app.core.tunnel_manager import (
update_cloudflared_container_status,
start_cloudflared_container
)
from app.core.docker_handler import docker_event_listener, process_container_start
from app.core.docker_handler import start_event_listeners, process_container_start
from app.core.reconciler import cleanup_expired_rules, reconcile_state_threaded
stop_event = threading.Event()
@ -72,9 +72,9 @@ def run_all_background_tasks():
logging.warning("Managed tunnel not fully initialized (ID/token missing). Background tasks needing tunnel ID may fail.")
if tunnel_ready_for_tasks:
logging.info("Starting core background task threads (Docker Listener, Cleanup Task)...")
event_thread = threading.Thread(target=docker_event_listener, args=(stop_event,), name="DockerEventListener", daemon=True)
threads_to_start.append(event_thread)
logging.info("Starting core background task threads (Docker Listeners, Cleanup Task)...")
event_threads = start_event_listeners(stop_event)
threads_to_start.extend(event_threads)
cleanup_thread = threading.Thread(target=cleanup_expired_rules, args=(stop_event,), name="CleanupTask", daemon=True)
threads_to_start.append(cleanup_thread)

View file

@ -227,7 +227,7 @@
{{ details.access_policy_type.replace('_', ' ') }}
</span>
{% elif details.get('status') == 'active' %}
<span class="opacity-60 italic">None (Public)</span>
<span class="opacity-60 italic">No Policy Assigned</span>
{% else %}
<span class="text-xs opacity-50">N/A</span>
{% endif %}
@ -462,7 +462,7 @@
<span class="label-text font-medium">2. Configure Manually</span>
</label>
<select name="manual_access_policy_type" id="manual_access_policy_type" class="select select-bordered w-full policy-type-select">
<option value="none" selected>None (Public - No App)</option>
<option value="none" selected>No Policy Assigned</option>
<option value="bypass">Bypass (Public App)</option>
</select>
</div>
@ -626,7 +626,7 @@
<span class="label-text font-medium">2. Configure Manually</span>
</label>
<select name="edit_access_policy_type" id="edit_manual_access_policy_type" class="select select-bordered w-full policy-type-select">
<option value="none">None (Public - No App)</option>
<option value="none">No Policy Assigned</option>
<option value="bypass">Bypass (Public App)</option>
</select>
</div>