refracturing - Testing Stage

This commit is contained in:
ChrispyBacon-dev 2025-05-25 21:52:33 +02:00
parent 3fc87991ad
commit 7315e4ac6f
31 changed files with 3941 additions and 388 deletions

View file

@ -70,7 +70,7 @@ jobs:
- name: Build and Push Docker Image (Multi-Arch)
uses: docker/build-push-action@v5
with:
context: .
context: ./dockflare
# Build for multiple architectures
platforms: linux/amd64 #,linux/arm64 # Enable multi-architecture builds
# Push only on direct pushes to stable or unstable branches

88
dockflare/.gitignore vendored Normal file
View file

@ -0,0 +1,88 @@
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
*.manifest
*.spec
.python-version
.mypy_cache/
.dmypy.json
dmypy.json
.pyright_cache/
.pytest_cache/
.coverage
coverage.xml
htmlcov/
.tox/
.env/
.venv/
env/
venv/
ENV/
VENV/
venv.bak/
env.bak/
.spyderproject
.spyproject
.ropeproject
*.prof
*.pstat
*.cprof
node_modules/
.npm/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
.vscode/*
!.vscode/settings.json
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
!.vscode/*.code-workspace
.idea/
*.sublime-project
*.sublime-workspace
.atom/
.project
.pydevproject
.settings/
nbproject/
.DS_Store
.AppleDouble
.LSOverride
._*
.Spotlight-V100
.Trashes
Thumbs.db
ehthumbs.db
Desktop.ini
$RECYCLE.BIN/
*~
.directory
.env
data/state.json
*.tmp
*.bak
*.swp
*.log
logs/

View file

@ -15,21 +15,26 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Use an official Python runtime as a parent image
# Using slim variant for smaller size
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
# (License header remains the same)
FROM node:20-alpine as frontend-builder
LABEL stage=frontend-builder
WORKDIR /usr/src/app
COPY package.json ./
COPY package-lock.json* ./
WORKDIR /usr/src/frontend_build # Changed WORKDIR to avoid conflict with runtime WORKDIR /app
COPY package.json ./package.json
COPY package-lock.json* ./
RUN npm install
COPY tailwind.config.js ./
COPY postcss.config.js ./
COPY ./templates/input.css ./templates/input.css
COPY ./templates ./templates
COPY tailwind.config.js ./tailwind.config.js
COPY postcss.config.js ./postcss.config.js
COPY ./app/templates/input.css ./app/templates/input.css
COPY ./app/templates ./app/templates
RUN npm run build:css
FROM python:3.13-slim as runtime
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
WORKDIR /app
WORKDIR /app # This is correct, our application will run from /app inside the container
ENV CLOUDFLARED_VERSION="2024.1.5"
RUN apt-get update && apt-get install -y --no-install-recommends \
wget \
@ -48,13 +53,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
rm cloudflared-$CLOUDFLARED_ARCH.deb && \
cloudflared --version && \
mkdir -p /root/.cloudflared
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
RUN mkdir -p /app/static/css
RUN mkdir -p /app/static/images
COPY --from=frontend-builder /usr/src/app/static/css/output.css /app/static/css/output.css
COPY app.py .
COPY templates /app/templates/
COPY images /app/static/images/
COPY --from=frontend-builder /usr/src/frontend_build/app/static/css/output.css /app/static/css/output.css
COPY ./app /app
COPY ./images /app/static/images/
EXPOSE 5000
CMD ["python", "app.py"]
CMD ["python", "main.py"]

98
dockflare/app/__init__.py Normal file
View file

@ -0,0 +1,98 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/__init__.py
import logging
import queue
import sys
import os
from flask import Flask
import docker
from docker.errors import APIError
from . import config
tunnel_state = { "name": config.TUNNEL_NAME, "id": None, "token": None, "status_message": "Initializing...", "error": None }
cloudflared_agent_state = { "container_status": "unknown", "last_action_status": None }
log_queue = queue.Queue(maxsize=config.MAX_LOG_QUEUE_SIZE)
log_formatter = logging.Formatter('%(asctime)s [%(levelname)s] [%(threadName)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
class QueueLogHandler(logging.Handler):
def __init__(self, log_queue_instance):
super().__init__()
self.log_queue_instance = log_queue_instance
def emit(self, record):
log_entry = self.format(record)
try:
self.log_queue_instance.put_nowait(log_entry)
except queue.Full:
try:
self.log_queue_instance.get_nowait()
self.log_queue_instance.put_nowait(log_entry)
except queue.Empty:
pass
except queue.Full:
print("Log queue still full after attempting to make space, dropping message.", file=sys.stderr)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
queue_handler = QueueLogHandler(log_queue)
queue_handler.setFormatter(log_formatter)
queue_handler.setLevel(logging.INFO)
root_logger.addHandler(queue_handler)
docker_client = None
try:
docker_client = docker.from_env(timeout=10)
docker_client.ping()
logging.info("Successfully connected to Docker daemon.")
except APIError as e:
logging.error(f"FATAL: Docker API error during initial connection: {e}")
docker_client = None # Ensure it's None on APIError too
except Exception as e:
logging.error(f"FATAL: Failed to connect to Docker daemon: {e}")
docker_client = None
def create_app():
app_instance = Flask(__name__)
app_instance.secret_key = os.urandom(24)
app_instance.config['PREFERRED_URL_SCHEME'] = 'http'
app_instance.reconciliation_info = {
"in_progress": False,
"progress": 0,
"total_items": 0,
"processed_items": 0,
"start_time": 0,
"status": "Not started"
}
with app_instance.app_context():
from .web import routes as web_routes
app_instance.register_blueprint(web_routes.bp)
logging.info("Web blueprint registered.")
return app_instance
app = create_app()

92
dockflare/app/config.py Normal file
View file

@ -0,0 +1,92 @@
# app/config.py
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/config.py
import os
import sys
import logging
from dotenv import load_dotenv
load_dotenv()
MAX_CF_UPDATE_RETRIES = 3
CF_UPDATE_RETRY_DELAY = 2
CF_UPDATE_BACKOFF_FACTOR = 2
CF_API_TOKEN = os.getenv('CF_API_TOKEN')
CF_ACCOUNT_ID = os.getenv('CF_ACCOUNT_ID')
CF_ZONE_ID = os.getenv('CF_ZONE_ID')
CF_API_BASE_URL = "https://api.cloudflare.com/client/v4"
if CF_API_TOKEN:
CF_HEADERS = {
"Authorization": f"Bearer {CF_API_TOKEN}",
"Content-Type": "application/json",
}
else:
CF_HEADERS = {
"Content-Type": "application/json",
}
USE_EXTERNAL_CLOUDFLARED = os.getenv('USE_EXTERNAL_CLOUDFLARED', 'false').lower() in ['true', '1', 't', 'yes']
EXTERNAL_TUNNEL_ID = os.getenv('EXTERNAL_TUNNEL_ID')
SCAN_ALL_NETWORKS = os.getenv('SCAN_ALL_NETWORKS', 'false').lower() in ['true', '1', 't', 'yes']
TUNNEL_DNS_SCAN_ZONE_NAMES_STR = os.getenv('TUNNEL_DNS_SCAN_ZONE_NAMES', '')
TUNNEL_DNS_SCAN_ZONE_NAMES = [name.strip() for name in TUNNEL_DNS_SCAN_ZONE_NAMES_STR.split(',') if name.strip()]
TUNNEL_NAME = os.getenv("TUNNEL_NAME", "dockflared-tunnel")
if not USE_EXTERNAL_CLOUDFLARED:
CLOUDFLARED_NETWORK_NAME = os.getenv('CLOUDFLARED_NETWORK_NAME', 'cloudflare-net')
CLOUDFLARED_CONTAINER_NAME = os.getenv('CLOUDFLARED_CONTAINER_NAME', f"cloudflared-agent-{TUNNEL_NAME}")
else:
CLOUDFLARED_NETWORK_NAME = None
CLOUDFLARED_CONTAINER_NAME = None
CLOUDFLARED_IMAGE = "cloudflare/cloudflared:latest"
LABEL_PREFIX = os.getenv('LABEL_PREFIX', 'cloudflare.tunnel')
GRACE_PERIOD_SECONDS = int(os.getenv('GRACE_PERIOD_SECONDS', 28800))
CLEANUP_INTERVAL_SECONDS = int(os.getenv('CLEANUP_INTERVAL_SECONDS', 300))
AGENT_STATUS_UPDATE_INTERVAL_SECONDS = int(os.getenv('AGENT_STATUS_UPDATE_INTERVAL_SECONDS', 10))
STATE_FILE_PATH = os.getenv('STATE_FILE_PATH', '/app/data/state.json')
MAX_LOG_QUEUE_SIZE = 200
MAX_CONCURRENT_DNS_OPS = int(os.getenv('MAX_CONCURRENT_DNS_OPS', 3))
RECONCILIATION_BATCH_SIZE = int(os.getenv('RECONCILIATION_BATCH_SIZE', 3))
ACCOUNT_EMAIL_CACHE_TTL = 3600
REQUIRED_VARS_BASE = ["CF_API_TOKEN", "CF_ACCOUNT_ID"]
missing_vars = []
if not USE_EXTERNAL_CLOUDFLARED:
if not TUNNEL_NAME:
REQUIRED_VARS_BASE.append("TUNNEL_NAME")
else:
if not EXTERNAL_TUNNEL_ID:
REQUIRED_VARS_BASE.append("EXTERNAL_TUNNEL_ID")
for var_name in REQUIRED_VARS_BASE:
if not globals().get(var_name):
missing_vars.append(var_name)
if missing_vars:
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s')
logging.error(f"FATAL: Missing required environment variables ({', '.join(missing_vars)})")
sys.exit(1)
if not CF_ZONE_ID:
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s')
logging.warning("CF_ZONE_ID not set. DNS management requires 'cloudflare.tunnel.zonename' label on containers or manual zone specification.")

View file

View file

@ -0,0 +1,380 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/access_manager.py
import logging
import json
import hashlib
import requests
import time
from app import config
from app.core import cloudflare_api
_ACCOUNT_EMAIL_CACHE_TTL = 3600
_cached_account_email = None
_cached_account_email_timestamp = 0
def _build_access_app_payload(hostname, name, session_duration, app_launcher_visible, self_hosted_domains, access_policies, allowed_idps=None, auto_redirect_to_identity=False):
payload = {
"name": name,
"domain": hostname,
"type": "self_hosted",
"session_duration": session_duration,
"app_launcher_visible": app_launcher_visible,
"self_hosted_domains": self_hosted_domains,
"allowed_idps": allowed_idps if allowed_idps else [],
"auto_redirect_to_identity": auto_redirect_to_identity,
}
if access_policies is not None:
payload["policies"] = access_policies
if allowed_idps is None:
if "allowed_idps" in payload:
del payload["allowed_idps"]
return payload
def check_for_tld_access_policy(zone_name):
if not zone_name:
logging.warning("check_for_tld_access_policy called with no zone_name.")
return False
tld_hostname = f"*.{zone_name}"
logging.info(f"Checking for existing Access Policy for wildcard TLD: {tld_hostname}")
try:
existing_app = find_cloudflare_access_application_by_hostname(tld_hostname)
if existing_app and existing_app.get("id"):
logging.info(f"Found existing Access Application ID '{existing_app.get('id')}' for TLD '{tld_hostname}'.")
return True
else:
logging.info(f"No specific Access Application found for TLD '{tld_hostname}'.")
return False
except Exception as e:
logging.error(f"Error while checking for TLD access policy for '{tld_hostname}': {e}", exc_info=True)
return False
def get_cloudflare_account_email():
global _cached_account_email, _cached_account_email_timestamp
current_time = time.time()
if _cached_account_email and (current_time - _cached_account_email_timestamp < _ACCOUNT_EMAIL_CACHE_TTL):
logging.debug(f"Returning cached Cloudflare account email: {_cached_account_email}")
return _cached_account_email
logging.info("Fetching Cloudflare account email from API.")
try:
response_data = cloudflare_api.cf_api_request("GET", "/user")
if response_data and response_data.get("success"):
email = response_data.get("result", {}).get("email")
if email:
logging.info(f"Successfully fetched Cloudflare account email: {email}")
_cached_account_email = email
_cached_account_email_timestamp = current_time
return email
else:
logging.warning("Cloudflare account email not found in API response.")
return None
else:
logging.warning(f"Failed to fetch Cloudflare account email, API call unsuccessful. Response: {response_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error fetching Cloudflare account email: {e}")
return None
except Exception as e:
logging.error(f"Unexpected error fetching Cloudflare account email: {e}", exc_info=True)
return None
def find_cloudflare_access_application_by_hostname(hostname):
logging.info(f"Finding Cloudflare Access Application for hostname '{hostname}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/access/apps"
try:
response_data_direct = cloudflare_api.cf_api_request("GET", endpoint, params={"domain": hostname})
apps_direct = response_data_direct.get("result", [])
if apps_direct and isinstance(apps_direct, list):
for app in apps_direct:
if app.get("domain") == hostname: # Exact domain match
logging.info(f"Found Access Application ID '{app.get('id')}' for hostname '{hostname}' via direct domain query.")
return app
logging.info(f"No exact match for '{hostname}' via domain query. Falling back to listing all Access Applications.")
all_apps_response = cloudflare_api.cf_api_request("GET", endpoint, params={"per_page": 100})
# Add pagination here if you expect > 100 access apps
all_apps = all_apps_response.get("result", [])
if all_apps and isinstance(all_apps, list):
for app in all_apps:
if app.get("domain") == hostname:
logging.info(f"Found Access Application ID '{app.get('id')}' for hostname '{hostname}' via full list scan (domain match).")
return app
# Also check self_hosted_domains for a match
if hostname in app.get("self_hosted_domains", []):
logging.info(f"Found Access Application ID '{app.get('id')}' for hostname '{hostname}' (in self_hosted_domains) via full list scan.")
return app
logging.info(f"Access Application for hostname '{hostname}' not found after extensive search.")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error finding Cloudflare Access Application for '{hostname}': {e}")
return None # Or re-raise if the caller should handle API errors directly
except Exception as e:
logging.error(f"Unexpected error finding Cloudflare Access Application for '{hostname}': {e}", exc_info=True)
return None
def create_cloudflare_access_application(hostname, name, session_duration, app_launcher_visible, self_hosted_domains, access_policies, allowed_idps=None, auto_redirect_to_identity=False):
logging.info(f"Creating Cloudflare Access Application for hostname '{hostname}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/access/apps"
payload = _build_access_app_payload(hostname, name, session_duration, app_launcher_visible, self_hosted_domains, access_policies, allowed_idps, auto_redirect_to_identity)
try:
response_data = cloudflare_api.cf_api_request("POST", endpoint, json_data=payload)
app_data = response_data.get("result")
if app_data and app_data.get("id"):
logging.info(f"Successfully created Access Application '{app_data.get('id')}' for '{hostname}'")
return app_data
else:
logging.error(f"Access Application creation for '{hostname}' API call successful but no ID in response: {app_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error creating Access Application for '{hostname}': {e}")
return None
except Exception as e:
logging.error(f"Unexpected error creating Access Application for '{hostname}': {e}", exc_info=True)
return None
def get_cloudflare_access_application(app_uuid):
logging.info(f"Getting Cloudflare Access Application details for ID '{app_uuid}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/access/apps/{app_uuid}"
try:
response_data = cloudflare_api.cf_api_request("GET", endpoint)
app_data = response_data.get("result")
if app_data:
logging.info(f"Successfully retrieved Access Application details for ID '{app_uuid}'")
return app_data
elif response_data.get("success"):
logging.warning(f"Successfully called API for Access App ID '{app_uuid}', but no result data found. Response: {response_data}")
return None
else: # Explicit failure
logging.error(f"API call failed or returned success=false for Access App ID '{app_uuid}'. Response: {response_data}")
return None
except requests.exceptions.RequestException as e:
if hasattr(e, 'response') and e.response is not None and e.response.status_code == 404:
logging.warning(f"Cloudflare Access Application with ID '{app_uuid}' not found (404).")
else:
logging.error(f"API error getting Access Application '{app_uuid}': {e}")
return None
except Exception as e:
logging.error(f"Unexpected error getting Access Application '{app_uuid}': {e}", exc_info=True)
return None
def update_cloudflare_access_application(app_uuid, hostname, name, session_duration, app_launcher_visible, self_hosted_domains, access_policies, allowed_idps=None, auto_redirect_to_identity=False):
logging.info(f"Updating Cloudflare Access Application ID '{app_uuid}' for hostname '{hostname}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/access/apps/{app_uuid}"
payload = _build_access_app_payload(hostname, name, session_duration, app_launcher_visible, self_hosted_domains, access_policies, allowed_idps, auto_redirect_to_identity)
try:
response_data = cloudflare_api.cf_api_request("PUT", endpoint, json_data=payload)
app_data = response_data.get("result")
if app_data and app_data.get("id"):
logging.info(f"Successfully updated Access Application '{app_data.get('id')}' for '{hostname}'")
return app_data
else:
logging.error(f"Access Application update for '{app_uuid}' API call successful but no ID in response: {app_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error updating Access Application '{app_uuid}': {e}")
return None
except Exception as e:
logging.error(f"Unexpected error updating Access Application '{app_uuid}': {e}", exc_info=True)
return None
def delete_cloudflare_access_application(app_uuid):
logging.info(f"Deleting Cloudflare Access Application ID '{app_uuid}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/access/apps/{app_uuid}"
try:
response_data = cloudflare_api.cf_api_request("DELETE", endpoint)
if response_data and response_data.get("success"):
deleted_id = response_data.get("result", {}).get("id") if isinstance(response_data.get("result"), dict) else app_uuid
logging.info(f"Successfully submitted deletion for Access Application ID '{deleted_id if deleted_id else app_uuid}'")
return True
elif response_data and response_data.get("success") and not response_data.get("result"):
logging.info(f"Access Application ID '{app_uuid}' deletion API call succeeded (success:true, no specific result ID).")
return True
elif response_data is None and "success" not in str(response_data):
logging.info(f"Access Application ID '{app_uuid}' deletion API call likely succeeded (no content/error).")
return True
logging.warning(f"Access Application deletion for '{app_uuid}' API call did not confirm success clearly. Response: {response_data}")
return False
except requests.exceptions.RequestException as e:
if hasattr(e, 'response') and e.response is not None and e.response.status_code == 404:
logging.warning(f"Cloudflare Access Application with ID '{app_uuid}' not found during delete attempt (404). Treating as success.")
return True
logging.error(f"API error deleting Access Application '{app_uuid}': {e}")
return False
except Exception as e:
logging.error(f"Unexpected error deleting Access Application '{app_uuid}': {e}", exc_info=True)
return False
def generate_access_app_config_hash(policy_type, session_duration, app_launcher_visible, allowed_idps_str, auto_redirect_to_identity, custom_access_rules_str=None):
config_items = {
"policy_type": policy_type,
"session_duration": str(session_duration),
"app_launcher_visible": bool(app_launcher_visible),
"allowed_idps_str": str(allowed_idps_str) if allowed_idps_str is not None else None,
"auto_redirect_to_identity": bool(auto_redirect_to_identity),
"custom_access_rules_str": str(custom_access_rules_str) if custom_access_rules_str is not None else None
}
consistent_config_string = json.dumps(config_items, sort_keys=True)
hasher = hashlib.sha256()
hasher.update(consistent_config_string.encode('utf-8'))
return hasher.hexdigest()
def handle_access_policy_from_labels(hostname_config_item, current_rule_in_state, state_manager_save_func):
hostname = hostname_config_item["hostname"]
desired_access_policy_type_from_label = hostname_config_item.get("access_policy_type")
desired_access_app_name_from_label = hostname_config_item.get("access_app_name") or f"DockFlare-{hostname}"
desired_session_duration_from_label = hostname_config_item.get("access_session_duration", "24h")
desired_app_launcher_visible_from_label = hostname_config_item.get("access_app_launcher_visible", False)
desired_allowed_idps_str_from_label = hostname_config_item.get("access_allowed_idps_str")
desired_auto_redirect_from_label = hostname_config_item.get("access_auto_redirect", False)
desired_custom_rules_str_from_label = hostname_config_item.get("access_custom_rules_str")
local_state_changed_by_access_policy = False
current_access_app_id = current_rule_in_state.get("access_app_id")
current_access_policy_type_in_state = current_rule_in_state.get("access_policy_type")
current_access_app_config_hash_in_state = current_rule_in_state.get("access_app_config_hash")
if desired_access_policy_type_from_label:
desired_access_app_config_hash_from_label = generate_access_app_config_hash(
desired_access_policy_type_from_label,
desired_session_duration_from_label,
desired_app_launcher_visible_from_label,
desired_allowed_idps_str_from_label,
desired_auto_redirect_from_label,
desired_custom_rules_str_from_label
)
if desired_access_policy_type_from_label == "default_tld":
if current_access_app_id:
logging.info(f"Label policy for {hostname} is 'default_tld'. Deleting existing Access App {current_access_app_id}.")
if delete_cloudflare_access_application(current_access_app_id):
current_rule_in_state["access_app_id"] = None
current_rule_in_state["access_policy_type"] = "default_tld"
current_rule_in_state["access_app_config_hash"] = None
local_state_changed_by_access_policy = True
else:
logging.error(f"Failed to delete Access App {current_access_app_id} for {hostname} as per label 'default_tld'.")
elif current_access_policy_type_in_state != "default_tld":
current_rule_in_state["access_app_id"] = None
current_rule_in_state["access_policy_type"] = "default_tld"
current_rule_in_state["access_app_config_hash"] = None
local_state_changed_by_access_policy = True
logging.info(f"Label policy for {hostname} set to 'default_tld'. No specific app managed.")
elif desired_access_policy_type_from_label in ["bypass", "authenticate"]:
cf_access_policies = []
if desired_custom_rules_str_from_label:
try:
parsed_rules = json.loads(desired_custom_rules_str_from_label)
if isinstance(parsed_rules, list):
cf_access_policies = parsed_rules
else:
logging.error(f"Parsed 'custom_rules' label for {hostname} is not a list. Reverting to default for {desired_access_policy_type_from_label}.")
except json.JSONDecodeError as json_err:
logging.error(f"Error parsing 'custom_rules' label JSON for {hostname}: {json_err}. Reverting to default for {desired_access_policy_type_from_label}.")
if not cf_access_policies:
if desired_access_policy_type_from_label == "bypass":
cf_access_policies = [{"name": "Label Default Bypass", "decision": "bypass", "include": [{"everyone": {}}]}]
elif desired_access_policy_type_from_label == "authenticate":
policy_include_rules = []
if desired_allowed_idps_str_from_label:
idp_ids = [idp.strip() for idp in desired_allowed_idps_str_from_label.split(',') if idp.strip()]
if idp_ids:
policy_include_rules.append({"identity_provider": {"id": idp_ids}})
if not policy_include_rules:
policy_include_rules.append({"everyone": {}})
cf_access_policies = [{"name": "Label Default Authenticated Access", "decision": "allow", "include": policy_include_rules}]
allowed_idps_list_for_app = [idp.strip() for idp in desired_allowed_idps_str_from_label.split(',') if idp.strip()] if desired_allowed_idps_str_from_label else None
needs_api_action = False
if current_access_app_id:
if current_access_policy_type_in_state != desired_access_policy_type_from_label or \
current_access_app_config_hash_in_state != desired_access_app_config_hash_from_label:
needs_api_action = True
logging.info(f"Access App {current_access_app_id} for {hostname} needs update. Current type: '{current_access_policy_type_in_state}', hash: '{current_access_app_config_hash_in_state}'. Desired type: '{desired_access_policy_type_from_label}', hash: '{desired_access_app_config_hash_from_label}'.")
else:
needs_api_action = True
logging.info(f"No Access App for {hostname}. Needs creation with type: '{desired_access_policy_type_from_label}'.")
if needs_api_action:
if current_access_app_id:
logging.info(f"Updating Access App {current_access_app_id} for {hostname} based on labels (type: {desired_access_policy_type_from_label}).")
updated_app = update_cloudflare_access_application(
current_access_app_id, hostname, desired_access_app_name_from_label,
desired_session_duration_from_label, desired_app_launcher_visible_from_label,
[hostname], cf_access_policies, allowed_idps_list_for_app, desired_auto_redirect_from_label
)
if updated_app:
current_rule_in_state["access_policy_type"] = desired_access_policy_type_from_label
current_rule_in_state["access_app_config_hash"] = desired_access_app_config_hash_from_label
local_state_changed_by_access_policy = True
else:
logging.error(f"Failed to update Access App {current_access_app_id} for {hostname} based on labels.")
else:
logging.info(f"Creating new Access App for {hostname} based on labels (type: '{desired_access_policy_type_from_label}').")
created_app = create_cloudflare_access_application(
hostname, desired_access_app_name_from_label,
desired_session_duration_from_label, desired_app_launcher_visible_from_label,
[hostname], cf_access_policies, allowed_idps_list_for_app, desired_auto_redirect_from_label
)
if created_app and created_app.get("id"):
current_rule_in_state["access_app_id"] = created_app.get("id")
current_rule_in_state["access_policy_type"] = desired_access_policy_type_from_label
current_rule_in_state["access_app_config_hash"] = desired_access_app_config_hash_from_label
local_state_changed_by_access_policy = True
else:
logging.error(f"Failed to create Access App for {hostname} based on labels.")
else: # Unknown access policy type
logging.warning(f"Unknown access.policy type '{desired_access_policy_type_from_label}' from label for {hostname}. No Access App action taken based on this specific label type.")
else:
if current_access_app_id:
logging.info(f"No access policy label for {hostname}, but found managed Access App {current_access_app_id}. Deleting it as per label configuration (or lack thereof).")
if delete_cloudflare_access_application(current_access_app_id):
current_rule_in_state["access_app_id"] = None
current_rule_in_state["access_policy_type"] = None
current_rule_in_state["access_app_config_hash"] = None
local_state_changed_by_access_policy = True
else:
logging.error(f"Failed to delete Access App {current_access_app_id} for {hostname} during label-based cleanup (no policy label).")
elif current_rule_in_state.get("access_policy_type") is not None :
current_rule_in_state["access_app_id"] = None
current_rule_in_state["access_policy_type"] = None
current_rule_in_state["access_app_config_hash"] = None
local_state_changed_by_access_policy = True
logging.debug(f"Ensuring access policy type is None for {hostname} as no access labels are present and no app was managed.")
if local_state_changed_by_access_policy and state_manager_save_func:
logging.debug(f"Access policy changes for {hostname} triggered a state save.")
return local_state_changed_by_access_policy

View file

@ -0,0 +1,628 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/cloudflare_api.py
import logging
import requests
import json
import time
import threading
from app import config
zone_id_cache = {}
zone_details_by_id_cache = {}
_cached_account_email = None
_cached_account_email_timestamp = 0
_cache_lock = threading.Lock()
dns_semaphore = threading.Semaphore(config.MAX_CONCURRENT_DNS_OPS)
def cf_api_request(method, endpoint, json_data=None, params=None):
url = f"{config.CF_API_BASE_URL}{endpoint}"
error_msg = None
try:
logging.info(f"CF API Request: {method} {url} Params: {params}")
if json_data:
try:
log_data = json.dumps(json_data)
except TypeError:
log_data = str(json_data)
logging.debug(f"CF API Request Data: {log_data[:500]}")
response = requests.request(
method,
url,
headers=config.CF_HEADERS,
json=json_data,
params=params,
timeout=30
)
response.raise_for_status()
logging.info(f"CF API Response Status: {response.status_code}")
if response.status_code == 204 or not response.content:
return {"success": True, "result": None}
try:
response_data = response.json()
logging.debug(f"CF API Response Body (first 500 chars): {str(response_data)[:500]}")
if isinstance(response_data, dict) and 'success' in response_data:
if response_data['success']:
return response_data
else:
cf_errors = response_data.get('errors', [])
error_code = None
if cf_errors and isinstance(cf_errors, list) and len(cf_errors) > 0 and isinstance(cf_errors[0], dict):
error_msg = f"API Error: {cf_errors[0].get('message', 'Unknown error')}"
error_code = cf_errors[0].get('code')
else:
error_msg = f"API reported failure but no error details provided. Response: {response_data}"
logging.error(f"CF API Request Failed ({method} {url}): {error_msg} - Full Errors: {cf_errors}")
api_exception = requests.exceptions.RequestException(error_msg, response=response)
api_exception.cf_error_code = error_code
raise api_exception
else:
logging.warning(f"CF API response for {method} {url} was valid JSON but missing 'success' field. Status: {response.status_code}. Body: {str(response_data)[:200]}")
raise requests.exceptions.RequestException(f"Unexpected JSON response format from API. Status: {response.status_code}", response=response)
except json.JSONDecodeError:
logging.error(f"CF API response for {method} {url} was not valid JSON. Status: {response.status_code}. Body: {response.text[:200]}")
raise requests.exceptions.RequestException(f"Invalid JSON response from API. Status: {response.status_code}", response=response)
except requests.exceptions.RequestException as e:
if error_msg is None:
log_error_msg = f"CF API Request Failed: {method} {url}. Original Exception: {e}"
error_msg_for_exception = f"Request Exception: {e}"
if e.response is not None:
try:
error_data = e.response.json()
cf_errors = error_data.get('errors', [])
if cf_errors and isinstance(cf_errors, list) and len(cf_errors) > 0 and isinstance(cf_errors[0], dict):
error_msg_for_exception = f"API Error: {cf_errors[0].get('message', 'Unknown error')}"
if not hasattr(e, 'cf_error_code'):
e.cf_error_code = cf_errors[0].get('code')
log_error_msg += f" - API Details: {cf_errors[0].get('message', 'Unknown error')}"
else:
error_msg_for_exception = f"HTTP {e.response.status_code} - {e.response.text[:100]}"
log_error_msg += f" - HTTP {e.response.status_code} - Response Text (first 100): {e.response.text[:100]}"
logging.error(f"CF API Error Response Body: {error_data}")
except (ValueError, AttributeError, json.JSONDecodeError):
error_msg_for_exception = f"HTTP {e.response.status_code} - {e.response.text[:100]}"
log_error_msg += f" - HTTP {e.response.status_code} - Response Text (first 100): {e.response.text[:100]}"
logging.error(log_error_msg)
raise
def get_zone_id_from_name(zone_name):
global zone_id_cache
if not zone_name:
logging.warning("get_zone_id_from_name called with empty zone_name.")
return None
cache_ttl = config.ACCOUNT_EMAIL_CACHE_TTL
current_time = time.time()
with _cache_lock:
cached_data = zone_id_cache.get(zone_name)
if cached_data:
zone_id, timestamp = cached_data
if current_time - timestamp < cache_ttl:
logging.debug(f"Zone ID for '{zone_name}' found in cache: {zone_id}")
return zone_id
else:
logging.debug(f"Cached Zone ID for '{zone_name}' expired, refreshing.")
logging.info(f"Zone ID for '{zone_name}' not in cache or expired. Querying Cloudflare API...")
endpoint = "/zones"
params = {"name": zone_name, "status": "active", "account.id": config.CF_ACCOUNT_ID}
try:
response_data = cf_api_request("GET", endpoint, params=params)
results = response_data.get("result", [])
if results and isinstance(results, list) and len(results) == 1:
zone_id = results[0].get("id")
zone_actual_name = results[0].get("name")
if zone_id and zone_actual_name == zone_name:
logging.info(f"Found Zone ID for '{zone_name}': {zone_id}")
with _cache_lock:
zone_id_cache[zone_name] = (zone_id, current_time)
return zone_id
else:
logging.error(f"API returned unexpected result or name mismatch for zone '{zone_name}': {results[0]}")
return None
elif results and len(results) > 1:
logging.error(f"API returned multiple ({len(results)}) active zones matching name '{zone_name}' for account {config.CF_ACCOUNT_ID}. Cannot determine correct zone.")
return None
else:
logging.warning(f"No active zone found matching name '{zone_name}' for account {config.CF_ACCOUNT_ID} via API.")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error looking up zone '{zone_name}': {e}")
return None
except Exception as e:
logging.error(f"Unexpected error looking up zone '{zone_name}': {e}", exc_info=True)
return None
def get_zone_details_by_id(zone_id_to_check):
global zone_details_by_id_cache
if not zone_id_to_check:
logging.warning("get_zone_details_by_id called with empty zone_id.")
return None
with _cache_lock:
if zone_id_to_check in zone_details_by_id_cache:
logging.debug(f"Zone details for ID '{zone_id_to_check}' found in cache.")
return zone_details_by_id_cache[zone_id_to_check]
logging.info(f"Zone details for ID '{zone_id_to_check}' not in cache. Querying Cloudflare API...")
endpoint = f"/zones/{zone_id_to_check}"
try:
response_data = cf_api_request("GET", endpoint)
if response_data and response_data.get("success"):
zone_data = response_data.get("result")
if zone_data and isinstance(zone_data, dict) and zone_data.get("name"):
logging.info(f"Found zone details for ID '{zone_id_to_check}': Name '{zone_data['name']}'")
with _cache_lock:
zone_details_by_id_cache[zone_id_to_check] = zone_data
return zone_data
else:
logging.error(f"API returned success for zone ID '{zone_id_to_check}' but result is missing or malformed: {zone_data}")
return None
else:
logging.error(f"API call failed or returned success=false for zone ID '{zone_id_to_check}': {response_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error looking up zone ID '{zone_id_to_check}': {e}")
return None
except Exception as e:
logging.error(f"Unexpected error looking up zone ID '{zone_id_to_check}': {e}", exc_info=True)
return None
def find_tunnel_via_api(name):
logging.info(f"Finding tunnel '{name}' via API on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel"
params = {"name": name, "is_deleted": "false"}
try:
response_data = cf_api_request("GET", endpoint, params=params)
tunnels = response_data.get("result", [])
if tunnels and isinstance(tunnels, list):
for tunnel_entry in tunnels:
if tunnel_entry.get("name") == name:
tunnel_id = tunnel_entry.get("id")
if tunnel_id:
logging.info(f"Found existing tunnel '{name}' ID: {tunnel_id}. Getting token...")
token = get_tunnel_token_via_api(tunnel_id)
return tunnel_id, token
else:
logging.warning(f"Found tunnel entry for '{name}' but it has no ID: {tunnel_entry}")
return None, None
logging.info(f"Tunnel '{name}' not found among listed tunnels.")
return None, None
else:
logging.info(f"Tunnel '{name}' not found via API (no results array or empty).")
return None, None
except requests.exceptions.RequestException as e:
logging.error(f"API error finding tunnel '{name}': {e}")
raise
except Exception as e:
logging.error(f"Unexpected error finding tunnel '{name}': {e}", exc_info=True)
raise
def get_tunnel_token_via_api(tunnel_id):
logging.info(f"Getting token for tunnel ID '{tunnel_id}' on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel/{tunnel_id}/token"
url = f"{config.CF_API_BASE_URL}{endpoint}"
try:
logging.info(f"API Request: GET {url} (for token, raw request)")
response = requests.request("GET", url, headers={"Authorization": f"Bearer {config.CF_API_TOKEN}"}, timeout=30)
response.raise_for_status()
token = response.text.strip()
if not token or len(token) < 50:
logging.error(f"Retrieved token for tunnel {tunnel_id} appears invalid (too short or empty).")
raise ValueError("Invalid token format received from API")
logging.info(f"Successfully retrieved token for tunnel {tunnel_id}")
return token
except requests.exceptions.RequestException as e:
error_msg = f"API Error getting token for tunnel {tunnel_id}: {e}"
if e.response is not None:
error_msg += f" Status: {e.response.status_code} Body (first 100): {e.response.text[:100]}"
logging.error(error_msg)
raise
except Exception as e:
logging.error(f"Unexpected error getting tunnel token for {tunnel_id}: {e}", exc_info=True)
raise
def create_tunnel_via_api(name):
logging.info(f"Creating tunnel '{name}' via API on account {config.CF_ACCOUNT_ID}")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel"
payload = {"name": name, "config_src": "cloudflare"}
try:
response_data = cf_api_request("POST", endpoint, json_data=payload)
result = response_data.get("result", {})
tunnel_id = result.get("id")
token = result.get("token")
if not tunnel_id or not token:
logging.error(f"API response for tunnel creation missing ID or Token: {result}")
raise ValueError("Missing ID or Token in API response for tunnel creation")
logging.info(f"Successfully created tunnel '{name}' with ID {tunnel_id}.")
return tunnel_id, token
except requests.exceptions.RequestException as e:
logging.error(f"API error creating tunnel '{name}': {e}")
raise
except Exception as e:
logging.error(f"Unexpected error creating tunnel '{name}': {e}", exc_info=True)
raise
def create_cloudflare_dns_record(zone_id, hostname, tunnel_id):
acquired = False
try:
acquired = dns_semaphore.acquire(timeout=30)
if not acquired:
logging.error(f"Timed out waiting for DNS semaphore - too many concurrent operations. Skipping DNS creation for {hostname}")
return "semaphore_timeout"
if not zone_id or not hostname or not tunnel_id:
logging.error("create_cloudflare_dns_record: Missing required arguments zone_id, hostname, or tunnel_id.")
return None
existing_record_id, correct_tunnel = find_dns_record_id(zone_id, hostname, tunnel_id)
if existing_record_id:
if correct_tunnel:
logging.info(f"DNS record for {hostname} in zone {zone_id} already exists with ID {existing_record_id} and correct tunnel. Using existing record.")
return existing_record_id
else:
logging.warning(f"DNS record for {hostname} in zone {zone_id} exists (ID: {existing_record_id}) but points to wrong tunnel. Updating...")
update_payload = {
"type": "CNAME", "name": hostname,
"content": f"{tunnel_id}.cfargotunnel.com",
"ttl": 1, "proxied": True
}
update_endpoint = f"/zones/{zone_id}/dns_records/{existing_record_id}"
try:
update_response = cf_api_request("PUT", update_endpoint, json_data=update_payload)
updated_record = update_response.get("result", {})
updated_id = updated_record.get("id")
if updated_id:
logging.info(f"Successfully updated DNS record for {hostname} to point to correct tunnel. ID: {updated_id}")
return updated_id
else:
logging.error(f"DNS record update API call for {hostname} reported success but response missing ID")
return existing_record_id
except Exception as update_err:
logging.error(f"Error updating existing DNS record for {hostname}: {update_err}")
return existing_record_id # Return old ID
record_name = hostname
record_content = f"{tunnel_id}.cfargotunnel.com"
endpoint = f"/zones/{zone_id}/dns_records"
payload = {
"type": "CNAME", "name": record_name, "content": record_content,
"ttl": 1, "proxied": True
}
try:
logging.info(f"Attempting to create DNS CNAME in zone {zone_id}: Name={record_name}, Content={record_content}, Proxied=True")
response_data = cf_api_request("POST", endpoint, json_data=payload)
result = response_data.get("result", {})
new_record_id = result.get("id")
if new_record_id:
logging.info(f"Successfully created DNS record for {hostname} in zone {zone_id}. New ID: {new_record_id}")
return new_record_id
else:
logging.error(f"DNS record creation API call for {hostname} reported success but response missing ID: {result}")
return None
except requests.exceptions.RequestException as e:
cf_error_code = getattr(e, 'cf_error_code', None)
if (cf_error_code == 81057 or
(e.response is not None and
("record already exists" in e.response.text.lower() or
"a, aaaa, or cname record with that host already exists" in e.response.text.lower()))):
logging.warning(f"DNS record for {hostname} already exists in zone {zone_id} (API error code indicates conflict). Verifying...")
time.sleep(1) # Give API a moment
existing_id, _ = find_dns_record_id(zone_id, hostname, tunnel_id)
if existing_id:
logging.info(f"Found existing record ID for {hostname} after conflict: {existing_id}")
return existing_id
return "existing_record_unconfirmed"
else:
logging.error(f"API error creating DNS record for {hostname}: {e}")
return None
except Exception as e:
logging.error(f"Unexpected error creating DNS record for {hostname}: {e}", exc_info=True)
return None
finally:
if acquired:
dns_semaphore.release()
logging.debug(f"Released DNS semaphore after processing {hostname}")
def find_dns_record_id(zone_id, hostname, tunnel_id):
acquired = False
try:
acquired = dns_semaphore.acquire(timeout=15)
if not acquired:
logging.error(f"Timed out waiting for DNS semaphore in find_dns_record_id for {hostname}")
return None, False
if not zone_id or not hostname or not tunnel_id:
logging.error("find_dns_record_id: Missing required arguments.")
return None, False
expected_content = f"{tunnel_id}.cfargotunnel.com"
endpoint = f"/zones/{zone_id}/dns_records"
# First, try a very specific query
params_specific = {"type": "CNAME", "name": hostname, "content": expected_content, "match": "all"}
try:
logging.info(f"Searching DNS (specific): Zone={zone_id}, Type=CNAME, Name={hostname}, Content={expected_content}")
response_data = cf_api_request("GET", endpoint, params=params_specific)
results = response_data.get("result", [])
if results and isinstance(results, list) and len(results) == 1: # Expecting one exact match
record = results[0]
if record.get("id"):
logging.info(f"Found exact DNS record for {hostname} in zone {zone_id} with ID: {record.get('id')}")
return record.get("id"), True
logging.info(f"Exact DNS record for {hostname} (content: {expected_content}) not found. Searching by name only.")
params_by_name = {"type": "CNAME", "name": hostname}
response_data_by_name = cf_api_request("GET", endpoint, params=params_by_name)
results_by_name = response_data_by_name.get("result", [])
if results_by_name and isinstance(results_by_name, list):
for record in results_by_name:
if record.get("id"):
record_content = record.get("content", "")
if record_content.lower() == expected_content.lower():
logging.info(f"Found DNS record for {hostname} by name search (correct content) with ID: {record.get('id')}")
return record.get("id"), True
else:
logging.warning(f"Found DNS CNAME for {hostname} (ID: {record.get('id')}) but it points to '{record_content}' instead of '{expected_content}'.")
return record.get("id"), False # Found a record, but wrong tunnel
logging.info(f"Found CNAME(s) for {hostname}, but none match expected content '{expected_content}'.")
if results_by_name[0].get("id"):
return results_by_name[0].get("id"), False
logging.info(f"No CNAME DNS record found for {hostname} in zone {zone_id} after both searches.")
return None, False
except requests.exceptions.RequestException as e:
logging.error(f"API error finding DNS record for {hostname}: {e}")
return None, False
except Exception as e:
logging.error(f"Unexpected error finding DNS record for {hostname}: {e}", exc_info=True)
return None, False
finally:
if acquired:
dns_semaphore.release()
logging.debug(f"Released DNS semaphore after find_dns_record_id for {hostname}")
def delete_cloudflare_dns_record(zone_id, hostname, tunnel_id):
acquired = False
try:
acquired = dns_semaphore.acquire(timeout=30)
if not acquired:
logging.error(f"Timed out waiting for DNS semaphore in delete_cloudflare_dns_record for {hostname}")
return False
if not zone_id or not hostname or not tunnel_id:
logging.error("delete_cloudflare_dns_record: Missing required arguments.")
return False
record_id, is_correct_tunnel = find_dns_record_id(zone_id, hostname, tunnel_id)
if not record_id:
logging.warning(f"DNS record for {hostname} in zone {zone_id} (for tunnel {tunnel_id}) not found to delete. Assuming success or already deleted.")
return True
logging.info(f"Attempting to delete DNS record for {hostname} in zone {zone_id} (ID: {record_id})")
endpoint = f"/zones/{zone_id}/dns_records/{record_id}"
try:
cf_api_request("DELETE", endpoint)
logging.info(f"Successfully submitted deletion for DNS record {hostname} (ID: {record_id}) in zone {zone_id}.")
return True
except requests.exceptions.RequestException as e:
if e.response is not None and e.response.status_code == 404:
logging.warning(f"DNS record {record_id} for {hostname} in zone {zone_id} not found during delete attempt (404). Treating as success.")
return True
logging.error(f"API error deleting DNS record {record_id} for {hostname} in zone {zone_id}: {e}")
return False
except Exception as e:
logging.error(f"Unexpected error deleting DNS record {record_id} for {hostname} in zone {zone_id}: {e}", exc_info=True)
return False
finally:
if acquired:
dns_semaphore.release()
def get_cloudflare_account_email():
global _cached_account_email, _cached_account_email_timestamp
current_time = time.time()
with _cache_lock:
if _cached_account_email and (current_time - _cached_account_email_timestamp < config.ACCOUNT_EMAIL_CACHE_TTL):
logging.debug(f"Returning cached Cloudflare account email: {_cached_account_email}")
return _cached_account_email
logging.info("Fetching Cloudflare account email from API.")
try:
response_data = cf_api_request("GET", "/user")
if response_data and response_data.get("success"):
email = response_data.get("result", {}).get("email")
if email:
logging.info(f"Successfully fetched Cloudflare account email: {email}")
with _cache_lock: # Protect cache write
_cached_account_email = email
_cached_account_email_timestamp = current_time
return email
else:
logging.warning("Cloudflare account email not found in API response.")
return None
else:
logging.warning(f"Failed to fetch Cloudflare account email, API call unsuccessful. Response: {response_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error fetching Cloudflare account email: {e}")
return None
except Exception as e:
logging.error(f"Unexpected error fetching Cloudflare account email: {e}", exc_info=True)
return None
def get_current_cf_config(tunnel_id_to_query):
if not tunnel_id_to_query:
logging.warning("get_current_cf_config: tunnel_id_to_query not provided.")
return None
logging.debug(f"Fetching current CF tunnel configuration for tunnel ID {tunnel_id_to_query}.")
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel/{tunnel_id_to_query}/configurations"
try:
response_data = cf_api_request("GET", endpoint)
if response_data and response_data.get("success"):
result_data = response_data.get("result")
config_data = None
if isinstance(result_data, dict):
config_data = result_data.get("config")
if isinstance(config_data, dict):
logging.debug(f"Fetched config for tunnel {tunnel_id_to_query}: {config_data}")
return config_data
elif config_data is None:
logging.info(f"Fetched 'config' for tunnel {tunnel_id_to_query} is null. Returning empty dict.")
return {}
else:
logging.warning(f"Unexpected type for 'config' field in API response for tunnel {tunnel_id_to_query}: {type(config_data)}. Result: {result_data}")
return {}
else:
logging.error(f"Get config API call failed or returned success=false for tunnel {tunnel_id_to_query}: {response_data}")
return None
except requests.exceptions.RequestException as e:
logging.error(f"API error fetching config for tunnel {tunnel_id_to_query}: {e}")
raise
except Exception as e:
logging.error(f"Unexpected error fetching config for tunnel {tunnel_id_to_query}: {e}", exc_info=True)
raise
def get_all_account_cloudflare_tunnels():
if not config.CF_ACCOUNT_ID:
logging.warning("CF_ACCOUNT_ID is not configured. Cannot list all Cloudflare tunnels.")
return []
if not config.CF_API_TOKEN:
logging.error("Cloudflare API token not configured. Cannot list all account tunnels.")
return []
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel"
params = {"is_deleted": "false", "per_page": 100}
logging.info(f"Attempting to list all Cloudflare tunnels for account ID {config.CF_ACCOUNT_ID}")
all_tunnels = []
page = 1
while True:
params["page"] = page
try:
response_data = cf_api_request("GET", endpoint, params=params)
tunnels_page = response_data.get("result", [])
if not isinstance(tunnels_page, list):
logging.error(f"Unexpected data format for account tunnels list page {page}: {type(tunnels_page)}. Response: {response_data}")
break
all_tunnels.extend(tunnels_page)
if len(tunnels_page) < params["per_page"]:
break
page += 1
if page > 10:
logging.warning("Exceeded 10 pages fetching tunnels. Assuming all fetched or API issue.")
break
except requests.exceptions.RequestException as e:
logging.error(f"API error listing Cloudflare tunnels (page {page}): {e}")
return []
except Exception as e:
logging.error(f"Unexpected error listing Cloudflare tunnels (page {page}): {e}", exc_info=True)
return []
logging.info(f"Successfully retrieved {len(all_tunnels)} Cloudflare tunnels from the account (any status).")
desired_statuses = {"healthy", "degraded", "down", "inactive", "pending"}
filtered_tunnels = [
tunnel for tunnel in all_tunnels if tunnel.get("status", "").lower() in desired_statuses
]
logging.info(f"Returning {len(filtered_tunnels)} tunnels after client-side status check for relevant statuses.")
filtered_tunnels.sort(key=lambda t: t.get("name", "").lower())
return filtered_tunnels
def get_dns_records_for_tunnel(zone_id, tunnel_id):
if not zone_id or not tunnel_id:
logging.warning("get_dns_records_for_tunnel: Missing zone_id or tunnel_id.")
return []
zone_details = get_zone_details_by_id(zone_id)
zone_name_for_display = zone_details.get("name") if zone_details else zone_id
expected_cname_content = f"{tunnel_id}.cfargotunnel.com"
endpoint = f"/zones/{zone_id}/dns_records"
params = {"type": "CNAME", "content": expected_cname_content, "per_page": 100}
logging.info(f"Fetching DNS records for tunnel {tunnel_id} in zone '{zone_name_for_display}' ({zone_id}) with content '{expected_cname_content}'")
all_records_for_tunnel_in_zone = []
page = 1
while True:
params["page"] = page
try:
response_data = cf_api_request("GET", endpoint, params=params)
dns_records_page = response_data.get("result", [])
if not isinstance(dns_records_page, list):
logging.error(f"Unexpected data format for DNS records list in zone {zone_name_for_display}, page {page}: {type(dns_records_page)}")
break
processed_page_records = []
for record in dns_records_page:
if record.get("name"):
processed_page_records.append({
"name": record.get("name"),
"id": record.get("id"),
"zone_id": zone_id,
"zone_name": zone_name_for_display
})
all_records_for_tunnel_in_zone.extend(processed_page_records)
if len(dns_records_page) < params["per_page"]:
break
page += 1
if page > 10: # Safety break
logging.warning(f"Exceeded 10 pages fetching DNS records for tunnel {tunnel_id} in zone {zone_name_for_display}.")
break
except requests.exceptions.RequestException as e:
logging.error(f"API error fetching DNS records for tunnel {tunnel_id} in zone {zone_name_for_display} (page {page}): {e}")
return []
except Exception as e:
logging.error(f"Unexpected error fetching DNS records for tunnel {tunnel_id} in zone {zone_name_for_display} (page {page}): {e}", exc_info=True)
return []
return all_records_for_tunnel_in_zone

View file

@ -0,0 +1,390 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/docker_handler.py
import logging
import time
import requests
from docker.errors import NotFound, APIError
from app import config, docker_client, cloudflared_agent_state, tunnel_state
from app.core.state_manager import managed_rules, state_lock, save_state
from app.core.tunnel_manager import update_cloudflare_config
from app.core.cloudflare_api import create_cloudflare_dns_record, get_zone_id_from_name
from app.core.access_manager import handle_access_policy_from_labels
def is_valid_hostname(hostname):
if not hostname: return False
if hostname.startswith('*.'):
domain_part = hostname[2:]
if not domain_part or len(domain_part) > 253: return False
for label in domain_part.split('.'):
if not label or len(label) > 63: return False
if not all(c.isalnum() or c == '-' for c in label): return False
if label.startswith('-') or label.endswith('-'): return False
return True
if len(hostname) > 253: return False
labels = hostname.split('.')
for label in labels:
if not label or len(label) > 63: return False
if not all(c.isalnum() or c == '-' for c in label): return False
if label.startswith('-') or label.endswith('-'): return False
return True
def is_valid_service(service):
import re
if not service: return False
return (re.match(r"^(https?|tcp|unix)://", service) or
re.match(r"^[a-zA-Z0-9._-]+:\d+$", service)) is not None
def process_container_start(container_obj):
if not container_obj:
return
container_id_val = None
container_name_val = "UnknownContainer"
try:
container_id_val = container_obj.id
container_obj.reload()
container_name_val = container_obj.name
labels = container_obj.labels
enabled_label_key = f"{config.LABEL_PREFIX}.enable"
is_enabled = labels.get(enabled_label_key, "false").lower() in ["true", "1", "t", "yes"]
if not is_enabled:
logging.debug(f"Ignoring start: {container_name_val} ({container_id_val[:12]}): '{enabled_label_key}' not true.")
return
hostnames_to_process = []
default_access_policy_type_label = labels.get(f"{config.LABEL_PREFIX}.access.policy")
default_access_app_name_label = labels.get(f"{config.LABEL_PREFIX}.access.name")
default_access_session_duration_label = labels.get(f"{config.LABEL_PREFIX}.access.session_duration", "24h")
default_access_app_launcher_visible_label = labels.get(f"{config.LABEL_PREFIX}.access.app_launcher_visible", "false").lower() in ["true", "1", "t", "yes"]
default_access_allowed_idps_label_str = labels.get(f"{config.LABEL_PREFIX}.access.allowed_idps")
default_access_auto_redirect_label = labels.get(f"{config.LABEL_PREFIX}.access.auto_redirect_to_identity", "false").lower() in ["true", "1", "t", "yes"]
default_access_custom_rules_label_str = labels.get(f"{config.LABEL_PREFIX}.access.custom_rules")
hostname_label = labels.get(f"{config.LABEL_PREFIX}.hostname")
service_label = labels.get(f"{config.LABEL_PREFIX}.service")
zone_name_label = labels.get(f"{config.LABEL_PREFIX}.zonename")
no_tls_verify_label = labels.get(f"{config.LABEL_PREFIX}.no_tls_verify", "false").lower() in ["true", "1", "t", "yes"]
if hostname_label and service_label:
if is_valid_hostname(hostname_label) and is_valid_service(service_label):
hostnames_to_process.append({
"hostname": hostname_label, "service": service_label, "zone_name": zone_name_label,
"no_tls_verify": no_tls_verify_label,
"access_policy_type": default_access_policy_type_label,
"access_app_name": default_access_app_name_label,
"access_session_duration": default_access_session_duration_label,
"access_app_launcher_visible": default_access_app_launcher_visible_label,
"access_allowed_idps_str": default_access_allowed_idps_label_str,
"access_auto_redirect": default_access_auto_redirect_label,
"access_custom_rules_str": default_access_custom_rules_label_str
})
else:
logging.warning(f"Ignoring invalid direct label pair for {container_name_val}: Hostname '{hostname_label}', Service '{service_label}'")
index = 0
while True:
prefix = f"{config.LABEL_PREFIX}.{index}"
hostname_indexed = labels.get(f"{prefix}.hostname")
if not hostname_indexed: break
service_indexed = labels.get(f"{prefix}.service", service_label)
if not service_indexed:
logging.warning(f"Ignoring indexed hostname {hostname_indexed} for {container_name_val}: Missing service for index {index} and no default service label.")
index += 1
continue
zone_name_indexed = labels.get(f"{prefix}.zonename", zone_name_label)
no_tls_verify_indexed_val = labels.get(f"{prefix}.no_tls_verify", str(no_tls_verify_label).lower())
no_tls_verify_indexed = no_tls_verify_indexed_val.lower() in ["true", "1", "t", "yes"]
access_policy_type_indexed = labels.get(f"{prefix}.access.policy", default_access_policy_type_label)
access_app_name_indexed = labels.get(f"{prefix}.access.name", default_access_app_name_label)
access_session_duration_indexed = labels.get(f"{prefix}.access.session_duration", default_access_session_duration_label)
acc_launcher_val_idx = labels.get(f"{prefix}.access.app_launcher_visible", str(default_access_app_launcher_visible_label).lower())
access_app_launcher_visible_indexed = acc_launcher_val_idx.lower() in ["true", "1", "t", "yes"]
access_allowed_idps_indexed_str = labels.get(f"{prefix}.access.allowed_idps", default_access_allowed_idps_label_str)
acc_redirect_val_idx = labels.get(f"{prefix}.access.auto_redirect_to_identity", str(default_access_auto_redirect_label).lower())
access_auto_redirect_indexed = acc_redirect_val_idx.lower() in ["true", "1", "t", "yes"]
access_custom_rules_indexed_str = labels.get(f"{prefix}.access.custom_rules", default_access_custom_rules_label_str)
if is_valid_hostname(hostname_indexed) and is_valid_service(service_indexed):
hostnames_to_process.append({
"hostname": hostname_indexed, "service": service_indexed, "zone_name": zone_name_indexed,
"no_tls_verify": no_tls_verify_indexed,
"access_policy_type": access_policy_type_indexed,
"access_app_name": access_app_name_indexed,
"access_session_duration": access_session_duration_indexed,
"access_app_launcher_visible": access_app_launcher_visible_indexed,
"access_allowed_idps_str": access_allowed_idps_indexed_str,
"access_auto_redirect": access_auto_redirect_indexed,
"access_custom_rules_str": access_custom_rules_indexed_str
})
else:
logging.warning(f"Ignoring invalid indexed label pair for {container_name_val} (idx {index}): Hostname '{hostname_indexed}', Service '{service_indexed}'")
index += 1
if not hostnames_to_process:
logging.warning(f"No valid hostname configurations found for {container_name_val} ({container_id_val[:12]}) despite being enabled.")
return
logging.info(f"Found {len(hostnames_to_process)} hostname configurations for container {container_name_val}")
state_changed_locally = False
needs_tunnel_config_update_due_to_container = False
for config_item in hostnames_to_process:
hostname = config_item["hostname"]
service = config_item["service"]
zone_name_from_item = config_item["zone_name"]
no_tls_verify_from_item = config_item["no_tls_verify"]
target_zone_id = None
if zone_name_from_item:
target_zone_id = get_zone_id_from_name(zone_name_from_item)
if not target_zone_id:
logging.error(f"Failed to find Zone ID for '{zone_name_from_item}' for hostname {hostname}. Skipping this hostname.")
continue
elif config.CF_ZONE_ID:
target_zone_id = config.CF_ZONE_ID
else:
logging.error(f"Cannot manage DNS for {hostname}: No Zone ID (label or default). Skipping.")
continue
with state_lock:
existing_rule = managed_rules.get(hostname)
if existing_rule and existing_rule.get("source") == "manual":
logging.warning(f"Container {container_name_val} wants hostname '{hostname}', but it's a manual entry. Skipping for this container.")
continue
current_rule_copy = existing_rule.copy() if existing_rule else {}
if existing_rule:
if existing_rule.get("status") == "pending_deletion":
existing_rule["status"] = "active"
existing_rule["delete_at"] = None
needs_tunnel_config_update_due_to_container = True
existing_rule["service"] = service
existing_rule["container_id"] = container_id_val
existing_rule["zone_id"] = target_zone_id
existing_rule["no_tls_verify"] = no_tls_verify_from_item
existing_rule["source"] = "docker"
if (current_rule_copy.get("service") != service or
current_rule_copy.get("zone_id") != target_zone_id or
current_rule_copy.get("no_tls_verify") != no_tls_verify_from_item or
current_rule_copy.get("status") == "pending_deletion"):
needs_tunnel_config_update_due_to_container = True
if current_rule_copy != existing_rule:
state_changed_locally = True
else: # New rule
managed_rules[hostname] = {
"service": service, "container_id": container_id_val,
"status": "active", "delete_at": None, "zone_id": target_zone_id,
"no_tls_verify": no_tls_verify_from_item,
"access_app_id": None, "access_policy_type": None,
"access_app_config_hash": None, "access_policy_ui_override": False,
"source": "docker"
}
existing_rule = managed_rules[hostname]
state_changed_locally = True
needs_tunnel_config_update_due_to_container = True
if existing_rule.get("access_policy_ui_override", False):
logging.info(f"Access policy for {hostname} is UI-managed. Skipping label-based Access Policy processing.")
else:
if handle_access_policy_from_labels(config_item, existing_rule, None):
state_changed_locally = True
if state_changed_locally:
save_state()
if needs_tunnel_config_update_due_to_container:
logging.info(f"Triggering Cloudflare tunnel config update due to changes for container {container_name_val}.")
if update_cloudflare_config(): # From tunnel_manager
logging.info(f"Tunnel config update successful for container {container_name_val}.")
effective_tunnel_id = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
if effective_tunnel_id:
for config_item_dns in hostnames_to_process:
hostname_dns = config_item_dns["hostname"]
zone_name_dns = config_item_dns["zone_name"]
target_zone_id_dns_create = get_zone_id_from_name(zone_name_dns) if zone_name_dns else config.CF_ZONE_ID
if managed_rules.get(hostname_dns, {}).get("source") == "manual": continue
if target_zone_id_dns_create:
dns_record_id_status = create_cloudflare_dns_record(target_zone_id_dns_create, hostname_dns, effective_tunnel_id)
if dns_record_id_status and dns_record_id_status not in ["semaphore_timeout", "existing_record_unconfirmed"]:
logging.info(f"DNS record management in zone {target_zone_id_dns_create} for {hostname_dns} successful (ID/Status: {dns_record_id_status}).")
elif not dns_record_id_status:
logging.error(f"CRITICAL: Tunnel config for {hostname_dns} may be active but failed to create/verify DNS record in zone {target_zone_id_dns_create}!")
if cloudflared_agent_state: cloudflared_agent_state["last_action_status"] = f"Error: Failed creating DNS for {hostname_dns}."
else:
logging.error(f"Missing Zone ID for {hostname_dns} - cannot manage DNS record.")
else:
logging.error(f"Missing effective Tunnel ID - cannot manage DNS records for {container_name_val}.")
else:
logging.error(f"Failed to update Cloudflare tunnel config for {container_name_val}. DNS records not managed.")
except NotFound:
logging.warning(f"Container {container_name_val} ({container_id_val[:12] if container_id_val else 'UnknownID'}) not found during start processing.")
except APIError as e:
logging.error(f"Docker API error processing start for {container_name_val}: {e}", exc_info=True)
except requests.exceptions.ConnectionError as e:
logging.error(f"Docker connection error processing start for {container_name_val}: {e}", exc_info=True)
except Exception as e:
logging.error(f"Unexpected error processing start for {container_name_val}: {e}", exc_info=True)
def schedule_container_stop(container_id_val):
from datetime import datetime, timedelta, timezone
if not container_id_val: return
logging.info(f"Processing stop event for container {container_id_val[:12]}.")
state_changed = False
with state_lock:
hostnames_affected = []
for hn, details in managed_rules.items():
if details.get("container_id") == container_id_val and details.get("status") == "active" and details.get("source", "docker") == "docker":
hostnames_affected.append(hn)
if hostnames_affected:
for hostname_to_schedule in hostnames_affected:
rule = managed_rules[hostname_to_schedule]
if rule.get("status") != "pending_deletion":
rule["status"] = "pending_deletion"
grace_delta = timedelta(seconds=config.GRACE_PERIOD_SECONDS)
rule["delete_at"] = datetime.now(timezone.utc) + grace_delta
logging.info(f"Rule for {hostname_to_schedule} (from container {container_id_val[:12]}) scheduled for deletion at {rule['delete_at'].isoformat()}")
state_changed = True
else:
logging.info(f"Rule for {hostname_to_schedule} was already pending deletion.")
else:
logging.info(f"Stop event for {container_id_val[:12]}, but it didn't manage any active Docker-sourced rules.")
if state_changed:
save_state()
def docker_event_listener(stop_event_param):
if not docker_client:
logging.error("Docker client unavailable, event listener cannot start.")
return
logging.info("Starting Docker event listener...")
error_count = 0
max_errors = 5
if stop_event_param is None:
logging.error("docker_event_listener called with None stop_event_param. Listener will not run correctly.")
return
while not stop_event_param.is_set() and error_count < max_errors:
try:
logging.info("Connecting to Docker event stream...")
events = docker_client.events(decode=True, since=int(time.time()))
logging.info("Successfully connected to Docker event stream.")
error_count = 0 # Reset on successful connection
for event in events:
if stop_event_param.is_set():
logging.info("Stop event received in listener, exiting loop.")
break
ev_type = event.get("Type")
action = event.get("Action")
actor = event.get("Actor", {})
cont_id = actor.get("ID")
logging.debug(f"Docker Event: Type={ev_type}, Action={action}, ID={cont_id[:12] if cont_id else 'N/A'}")
if ev_type == "container" and cont_id:
if action == "start":
container_instance = None
for attempt in range(3):
try:
container_instance = docker_client.containers.get(cont_id)
if attempt == 0 and not container_instance.labels.get(f"{config.LABEL_PREFIX}.enable"):
time.sleep(0.2)
container_instance.reload()
if container_instance.labels.get(f"{config.LABEL_PREFIX}.hostname") or container_instance.labels.get(f"{config.LABEL_PREFIX}.0.hostname"):
logging.debug(f"Container {cont_id[:12]} details retrieved on attempt {attempt+1}.")
break
else:
logging.debug(f"Container {cont_id[:12]} found but key labels missing, retrying ({attempt+1}/3)...")
except NotFound:
logging.debug(f"Container {cont_id[:12]} not found on attempt {attempt+1}, retrying...")
except APIError as e_get_cont:
logging.error(f"Docker API error getting container {cont_id[:12]} on attempt {attempt+1}: {e_get_cont}")
break
except requests.exceptions.ConnectionError as e_conn_cont:
logging.error(f"Docker connection error getting container {cont_id[:12]}: {e_conn_cont}")
raise
except Exception as e_unexp_cont:
logging.error(f"Unexpected error getting container {cont_id[:12]} details: {e_unexp_cont}", exc_info=True)
break
if attempt < 2: time.sleep(0.2 * (attempt + 1))
else: logging.warning(f"Failed to get container {cont_id[:12]} details or key labels after multiple attempts.")
if container_instance:
try:
process_container_start(container_instance)
except Exception as e_proc_start:
logging.error(f"Error processing start event for {cont_id[:12]}: {e_proc_start}", exc_info=True)
elif action in ["stop", "die", "destroy", "kill"]:
try:
schedule_container_stop(cont_id)
except Exception as e_proc_stop:
logging.error(f"Error processing stop/die/destroy/kill event for {cont_id[:12]}: {e_proc_stop}", exc_info=True)
except requests.exceptions.ConnectionError as e_conn_stream: # From docker_client.events()
error_count += 1
logging.error(f"Docker listener connection error: {e_conn_stream}. Reconnecting ({error_count}/{max_errors})...")
if not stop_event_param.is_set(): stop_event_param.wait(min(30, 2 * error_count)) # Increased base wait
except APIError as e_api_stream:
error_count += 1
logging.error(f"Docker listener API error: {e_api_stream}. Reconnecting ({error_count}/{max_errors})...")
if not stop_event_param.is_set(): stop_event_param.wait(min(30, 2 * error_count))
except Exception as e_unexp_stream:
error_count += 1
logging.error(f"Unexpected error in Docker event listener: {e_unexp_stream}. Reconnecting ({error_count}/{max_errors})...", exc_info=True)
if not stop_event_param.is_set(): stop_event_param.wait(min(30, 2 * error_count))
if stop_event_param.is_set():
break
if error_count >= max_errors:
logging.error("Docker event listener stopping after multiple consecutive errors.")
logging.info("Docker event listener stopped.")

View file

@ -0,0 +1,409 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/reconciler.py
import logging
import time
import threading
from datetime import datetime, timedelta, timezone
import json
from app import config, docker_client, tunnel_state, cloudflared_agent_state, app as flask_app
from app.core.state_manager import managed_rules, state_lock, save_state
from app.core.cloudflare_api import (
get_zone_id_from_name,
create_cloudflare_dns_record,
delete_cloudflare_dns_record
)
from app.core.access_manager import (
handle_access_policy_from_labels,
delete_cloudflare_access_application
)
from app.core.tunnel_manager import update_cloudflare_config
def _get_hostname_configs_from_container(container_obj):
"""Helper to extract hostname configurations from a container's labels."""
labels = container_obj.labels
container_id_val = container_obj.id
container_name_val = container_obj.name
hostnames_configs = []
default_access_policy_type = labels.get(f"{config.LABEL_PREFIX}.access.policy")
default_access_app_name = labels.get(f"{config.LABEL_PREFIX}.access.name")
default_session_duration = labels.get(f"{config.LABEL_PREFIX}.access.session_duration", "24h")
default_app_launcher_visible = labels.get(f"{config.LABEL_PREFIX}.access.app_launcher_visible", "false").lower() in ["true", "1", "t", "yes"]
default_allowed_idps_str = labels.get(f"{config.LABEL_PREFIX}.access.allowed_idps")
default_auto_redirect = labels.get(f"{config.LABEL_PREFIX}.access.auto_redirect_to_identity", "false").lower() in ["true", "1", "t", "yes"]
default_custom_rules_str = labels.get(f"{config.LABEL_PREFIX}.access.custom_rules")
h_main = labels.get(f"{config.LABEL_PREFIX}.hostname")
s_main = labels.get(f"{config.LABEL_PREFIX}.service")
zn_main = labels.get(f"{config.LABEL_PREFIX}.zonename")
ntv_main_str = labels.get(f"{config.LABEL_PREFIX}.no_tls_verify", "false")
ntv_main = ntv_main_str.lower() in ["true", "1", "t", "yes"]
if h_main and s_main: # Direct labels
hostnames_configs.append({
"hostname": h_main, "service": s_main, "zone_name": zn_main, "no_tls_verify": ntv_main,
"container_id": container_id_val, "container_name": container_name_val,
"access_policy_type": default_access_policy_type,
"access_app_name": default_access_app_name,
"access_session_duration": default_session_duration,
"access_app_launcher_visible": default_app_launcher_visible,
"access_allowed_idps_str": default_allowed_idps_str,
"access_auto_redirect": default_auto_redirect,
"access_custom_rules_str": default_custom_rules_str
})
idx = 0
while True:
pfx = f"{config.LABEL_PREFIX}.{idx}"
h_idx = labels.get(f"{pfx}.hostname")
if not h_idx: break
s_idx = labels.get(f"{pfx}.service", s_main)
if not s_idx: idx += 1; continue
zn_idx = labels.get(f"{pfx}.zonename", zn_main)
ntv_idx_str = labels.get(f"{pfx}.no_tls_verify", ntv_main_str)
ntv_idx = ntv_idx_str.lower() in ["true", "1", "t", "yes"]
acc_pol_idx = labels.get(f"{pfx}.access.policy", default_access_policy_type)
acc_name_idx = labels.get(f"{pfx}.access.name", default_access_app_name)
acc_sess_idx = labels.get(f"{pfx}.access.session_duration", default_session_duration)
acc_vis_idx_str = labels.get(f"{pfx}.access.app_launcher_visible", str(default_app_launcher_visible).lower())
acc_vis_idx = acc_vis_idx_str.lower() in ["true", "1", "t", "yes"]
acc_idps_idx = labels.get(f"{pfx}.access.allowed_idps", default_allowed_idps_str)
acc_redir_idx_str = labels.get(f"{pfx}.access.auto_redirect_to_identity", str(default_auto_redirect).lower())
acc_redir_idx = acc_redir_idx_str.lower() in ["true", "1", "t", "yes"]
acc_custom_idx = labels.get(f"{pfx}.access.custom_rules", default_custom_rules_str)
hostnames_configs.append({
"hostname": h_idx, "service": s_idx, "zone_name": zn_idx, "no_tls_verify": ntv_idx,
"container_id": container_id_val, "container_name": container_name_val,
"access_policy_type": acc_pol_idx, "access_app_name": acc_name_idx,
"access_session_duration": acc_sess_idx, "access_app_launcher_visible": acc_vis_idx,
"access_allowed_idps_str": acc_idps_idx, "access_auto_redirect": acc_redir_idx,
"access_custom_rules_str": acc_custom_idx
})
idx += 1
return hostnames_configs
def _run_reconciliation_logic():
logging.info("[Reconcile Thread] Starting state reconciliation logic...")
needs_tunnel_config_update = False
state_changed_locally = False
max_total_time = 480
reconciliation_start_time = time.time()
flask_app.reconciliation_info = {
"in_progress": True, "progress": 0, "total_items": 0,
"processed_items": 0, "start_time": reconciliation_start_time,
"status": "Initializing reconciliation..."
}
running_labeled_hostnames_details = {}
try:
flask_app.reconciliation_info["status"] = "Scanning containers for services and access policies..."
containers = docker_client.containers.list(sparse=False, all=config.SCAN_ALL_NETWORKS)
container_count = len(containers)
flask_app.reconciliation_info["total_items"] = container_count
processed_container_count = 0
batch_size = 3 if not config.USE_EXTERNAL_CLOUDFLARED else 2
for i in range(0, container_count, batch_size):
if time.time() - reconciliation_start_time > 60:
logging.warning("[Reconcile] Timeout during container scanning phase.")
flask_app.reconciliation_info["status"] = "Container scan timeout (partial data)"
break
batch = containers[i:i+batch_size]
processed_container_count += len(batch)
flask_app.reconciliation_info["progress"] = min(100, int((processed_container_count / container_count) * 100)) if container_count > 0 else 0
flask_app.reconciliation_info["processed_items"] = processed_container_count
flask_app.reconciliation_info["status"] = f"Scanning containers: batch {i//batch_size + 1}/{(container_count+batch_size-1)//batch_size}"
for c_obj in batch:
try:
c_obj.reload()
if c_obj.labels.get(f"{config.LABEL_PREFIX}.enable", "false").lower() in ["true", "1", "t", "yes"]:
configs = _get_hostname_configs_from_container(c_obj)
for conf in configs:
if conf["hostname"] in running_labeled_hostnames_details:
logging.warning(f"[Reconcile] Duplicate hostname '{conf['hostname']}' found. Using from: {conf['container_name']}.")
running_labeled_hostnames_details[conf["hostname"]] = conf
except Exception as e_cont_scan:
logging.error(f"[Reconcile] Error processing container {c_obj.id[:12] if c_obj and c_obj.id else 'N/A'}: {e_cont_scan}")
logging.info(f"[Reconcile] Found {len(running_labeled_hostnames_details)} running hostnames with DockFlare labels.")
except Exception as e_phase1:
logging.error(f"[Reconcile] Error in container scanning phase: {e_phase1}", exc_info=True)
flask_app.reconciliation_info["status"] = f"Container scan error: {str(e_phase1)}"
flask_app.reconciliation_info["status"] = "Comparing state and reconciling cloud resources..."
flask_app.reconciliation_info["total_items"] = len(running_labeled_hostnames_details) + len(managed_rules)
flask_app.reconciliation_info["processed_items"] = 0 # Reset for this phase
processed_reconcile_items = 0
hostnames_requiring_dns_setup = []
with state_lock:
now_utc = datetime.now(timezone.utc)
current_managed_hostnames_in_state = set(managed_rules.keys())
for hostname, desired_details in running_labeled_hostnames_details.items():
processed_reconcile_items +=1
flask_app.reconciliation_info["processed_items"] = processed_reconcile_items
flask_app.reconciliation_info["progress"] = min(100, int((processed_reconcile_items / flask_app.reconciliation_info["total_items"]) * 100)) if flask_app.reconciliation_info["total_items"] > 0 else 0
flask_app.reconciliation_info["status"] = f"Reconciling (active): {hostname}"
if time.time() - reconciliation_start_time > max_total_time - 30: break
existing_rule = managed_rules.get(hostname)
if existing_rule and existing_rule.get("source") == "manual":
continue
target_zone_id = get_zone_id_from_name(desired_details["zone_name"]) if desired_details["zone_name"] else config.CF_ZONE_ID
if not target_zone_id:
logging.error(f"[Reconcile] No zone ID for {hostname}, skipping its reconciliation.")
continue
if not existing_rule:
managed_rules[hostname] = {
"service": desired_details["service"], "container_id": desired_details["container_id"],
"status": "active", "delete_at": None, "zone_id": target_zone_id,
"no_tls_verify": desired_details["no_tls_verify"],
"access_app_id": None, "access_policy_type": None, "access_app_config_hash": None,
"access_policy_ui_override": False, "source": "docker"
}
existing_rule = managed_rules[hostname]
state_changed_locally = True
needs_tunnel_config_update = True
hostnames_requiring_dns_setup.append((hostname, target_zone_id))
else:
changed_in_reconcile = False
if existing_rule.get("status") == "pending_deletion":
existing_rule["status"] = "active"; existing_rule["delete_at"] = None
changed_in_reconcile = True; needs_tunnel_config_update = True
if existing_rule.get("service") != desired_details["service"]:
existing_rule["service"] = desired_details["service"]; changed_in_reconcile = True; needs_tunnel_config_update = True
if existing_rule.get("no_tls_verify") != desired_details["no_tls_verify"]:
existing_rule["no_tls_verify"] = desired_details["no_tls_verify"]; changed_in_reconcile = True; needs_tunnel_config_update = True
if existing_rule.get("zone_id") != target_zone_id:
existing_rule["zone_id"] = target_zone_id; changed_in_reconcile = True; needs_tunnel_config_update = True # DNS needs re-check for new zone
if existing_rule.get("container_id") != desired_details["container_id"]:
existing_rule["container_id"] = desired_details["container_id"]; changed_in_reconcile = True
existing_rule["source"] = "docker"
if changed_in_reconcile: state_changed_locally = True
hostnames_requiring_dns_setup.append((hostname, target_zone_id))
if existing_rule.get("access_policy_ui_override", False):
pass # Skip label processing if UI override
else:
if handle_access_policy_from_labels(desired_details, existing_rule, None):
state_changed_locally = True
hostnames_in_state_but_not_running = list(current_managed_hostnames_in_state - set(running_labeled_hostnames_details.keys()))
for hostname_to_check in hostnames_in_state_but_not_running:
processed_reconcile_items +=1
flask_app.reconciliation_info["processed_items"] = processed_reconcile_items
if time.time() - reconciliation_start_time > max_total_time - 20: break
rule = managed_rules.get(hostname_to_check)
if rule and rule.get("status") == "active" and rule.get("source", "docker") == "docker":
logging.info(f"[Reconcile] Docker-managed rule {hostname_to_check} active but container/labels gone. Marking for deletion.")
rule["status"] = "pending_deletion"
rule["delete_at"] = now_utc + timedelta(seconds=config.GRACE_PERIOD_SECONDS)
state_changed_locally = True
elif rule and rule.get("source") == "manual" and rule.get("zone_id"):
hostnames_requiring_dns_setup.append((hostname_to_check, rule.get("zone_id")))
if state_changed_locally:
flask_app.reconciliation_info["status"] = "Saving reconciled state..."
save_state()
if time.time() - reconciliation_start_time > max_total_time - 15:
logging.warning("[Reconcile] Timeout before Tunnel/DNS operations.")
needs_tunnel_config_update = False # Skip if timeout
if needs_tunnel_config_update:
flask_app.reconciliation_info["status"] = "Updating Cloudflare tunnel configuration..."
if not config.USE_EXTERNAL_CLOUDFLARED:
if not update_cloudflare_config():
logging.error("[Reconcile] Failed to update Cloudflare tunnel configuration.")
flask_app.reconciliation_info["status"] = "Error: Failed tunnel config update."
else:
logging.info("[Reconcile] Cloudflare tunnel configuration updated successfully.")
flask_app.reconciliation_info["status"] = "Tunnel configuration updated."
else:
logging.info("[Reconcile] External mode: Skipping DockFlare-managed tunnel config update.")
flask_app.reconciliation_info["status"] = "Tunnel config update skipped (external mode)."
if hostnames_requiring_dns_setup:
dns_total = len(hostnames_requiring_dns_setup)
flask_app.reconciliation_info["status"] = f"Setting up DNS for {dns_total} hostnames..."
dns_processed_count = 0
effective_tunnel_id_for_dns = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
if effective_tunnel_id_for_dns:
unique_dns_setups = list(set(hostnames_requiring_dns_setup))
logging.info(f"[Reconcile] Unique hostnames for DNS setup/check: {len(unique_dns_setups)}")
for hostname_dns, zone_id_dns in unique_dns_setups:
dns_processed_count +=1
flask_app.reconciliation_info["status"] = f"DNS for {hostname_dns} ({dns_processed_count}/{len(unique_dns_setups)})"
if time.time() - reconciliation_start_time > max_total_time - 5: break
create_cloudflare_dns_record(zone_id_dns, hostname_dns, effective_tunnel_id_for_dns)
if config.USE_EXTERNAL_CLOUDFLARED: time.sleep(0.1)
else:
logging.error("[Reconcile] Cannot setup DNS: Effective tunnel ID is missing.")
flask_app.reconciliation_info["status"] = "Error: Missing tunnel ID for DNS setup."
flask_app.reconciliation_info["in_progress"] = False
flask_app.reconciliation_info["progress"] = 100
final_status = flask_app.reconciliation_info.get("status", "Reconciliation finished.")
if not final_status.endswith("(Final)"): final_status += " (Final)"
flask_app.reconciliation_info["status"] = final_status
flask_app.reconciliation_info["completed_at"] = time.time()
duration = flask_app.reconciliation_info["completed_at"] - flask_app.reconciliation_info["start_time"]
logging.info(f"[Reconcile Thread] Reconciliation complete. Duration: {duration:.2f}s. Status: {flask_app.reconciliation_info['status']}")
def reconcile_state_threaded():
if not docker_client:
logging.warning("Docker client unavailable, skipping reconciliation.")
return
if not tunnel_state.get("id") and not config.EXTERNAL_TUNNEL_ID:
logging.warning("Tunnel not initialized (no ID), skipping reconciliation.")
return
if not hasattr(flask_app, 'reconciliation_info'):
logging.error("flask_app.reconciliation_info not initialized. Cannot start reconciliation.")
flask_app.reconciliation_info = {"in_progress": False}
if flask_app.reconciliation_info.get("in_progress", False):
logging.info("Reconciliation is already in progress. Skipping new request.")
return
reconcile_thread = threading.Thread(
target=_run_reconciliation_logic,
name="ReconciliationThread",
daemon=True
)
reconcile_thread.start()
logging.info(f"Started reconciliation in background thread {reconcile_thread.name}")
def cleanup_expired_rules(stop_event_param):
logging.info("Starting cleanup task for expired rules...")
if stop_event_param is None:
logging.error("cleanup_expired_rules called with None stop_event_param. Task will not run correctly.")
return
while not stop_event_param.is_set():
next_check_time = time.time() + config.CLEANUP_INTERVAL_SECONDS
try:
logging.debug("Running cleanup check for expired rules...")
rules_to_process_for_deletion = {}
now_utc = datetime.now(timezone.utc)
state_changed_in_cleanup = False
with state_lock:
for hostname, details in list(managed_rules.items()):
if details.get("status") == "pending_deletion" and details.get("source", "docker") == "docker":
delete_at = details.get("delete_at")
is_expired = False
if isinstance(delete_at, datetime):
delete_at_utc = delete_at.astimezone(timezone.utc) if delete_at.tzinfo else delete_at.replace(tzinfo=timezone.utc)
if delete_at_utc <= now_utc:
is_expired = True
else:
logging.warning(f"Rule {hostname} pending delete but has invalid/missing delete_at: {delete_at}. Marking for immediate deletion.")
is_expired = True
if is_expired:
rules_to_process_for_deletion[hostname] = {
"zone_id": details.get("zone_id", config.CF_ZONE_ID),
"access_app_id": details.get("access_app_id")
}
elif details.get("source") == "manual" and details.get("status") == "pending_deletion":
logging.warning(f"Manual rule {hostname} found 'pending_deletion'. Resetting to 'active'.")
details["status"] = "active"; details["delete_at"] = None
state_changed_in_cleanup = True
if state_changed_in_cleanup and not rules_to_process_for_deletion:
save_state()
if rules_to_process_for_deletion:
hostnames_fully_cleaned = []
effective_tunnel_id_cleanup = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
for hostname, delete_info in rules_to_process_for_deletion.items():
zone_id_del = delete_info["zone_id"]
access_app_id_del = delete_info["access_app_id"]
dns_deleted = False
if zone_id_del and effective_tunnel_id_cleanup:
if delete_cloudflare_dns_record(zone_id_del, hostname, effective_tunnel_id_cleanup):
dns_deleted = True
else: logging.error(f"Failed DNS delete for expired rule {hostname} in zone {zone_id_del}.")
elif not zone_id_del: logging.warning(f"Skipping DNS delete for {hostname}: Zone ID unavailable.")
elif not effective_tunnel_id_cleanup: logging.warning(f"Skipping DNS delete for {hostname}: Tunnel ID unavailable.")
access_app_deleted = False
if access_app_id_del:
if delete_cloudflare_access_application(access_app_id_del):
access_app_deleted = True
else: logging.error(f"Failed Access App delete for {hostname}, App ID: {access_app_id_del}.")
else: access_app_deleted = True # No app to delete
hostnames_fully_cleaned.append(hostname)
if hostnames_fully_cleaned:
config_updated_after_delete = False
if not config.USE_EXTERNAL_CLOUDFLARED:
if update_cloudflare_config():
config_updated_after_delete = True
else:
logging.error("Failed to update Cloudflare tunnel config during rule cleanup. Rules may remain in local state temporarily.")
else:
config_updated_after_delete = True
if config_updated_after_delete:
with state_lock:
deleted_count = 0
for hostname_rem in hostnames_fully_cleaned:
if hostname_rem in managed_rules and managed_rules[hostname_rem].get("status") == "pending_deletion":
del managed_rules[hostname_rem]
deleted_count += 1
if deleted_count > 0:
logging.info(f"Removed {deleted_count} rules from local state after cleanup.")
save_state()
except Exception as e_cleanup:
logging.error(f"Error in cleanup task loop: {e_cleanup}", exc_info=True)
wait_duration = max(0, next_check_time - time.time())
if not stop_event_param.is_set(): stop_event_param.wait(wait_duration)
logging.info("Cleanup task for expired rules stopped.")

View file

@ -0,0 +1,136 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/state_manager.py
import json
import logging
import os
import threading
from datetime import datetime, timezone
from app import config
managed_rules = {}
state_lock = threading.Lock()
def _deserialize_datetime(dt_str):
if not dt_str:
return None
try:
if dt_str.endswith('Z'):
dt = datetime.fromisoformat(dt_str.replace('Z', '+00:00'))
else:
dt = datetime.fromisoformat(dt_str)
return dt.replace(tzinfo=timezone.utc) if dt.tzinfo is None else dt.astimezone(timezone.utc)
except ValueError as date_err:
logging.warning(f"Could not parse datetime string '{dt_str}': {date_err}. Returning None.")
return None
def load_state():
global managed_rules
state_dir = os.path.dirname(config.STATE_FILE_PATH)
if not os.path.exists(state_dir):
try:
os.makedirs(state_dir, exist_ok=True)
logging.info(f"Created directory for state file: {state_dir}")
except OSError as e:
logging.error(f"FATAL: Could not create directory for state file {state_dir}: {e}. State persistence will fail.")
managed_rules = {}
return
if not os.path.exists(config.STATE_FILE_PATH):
logging.info(f"State file '{config.STATE_FILE_PATH}' not found, starting fresh.")
managed_rules = {}
return
with state_lock:
try:
with open(config.STATE_FILE_PATH, 'r') as f:
loaded_data = json.load(f)
processed_rules = {}
for hostname, rule in loaded_data.items():
rule_copy = rule.copy()
delete_at_val = rule_copy.get("delete_at")
if isinstance(delete_at_val, str):
rule_copy["delete_at"] = _deserialize_datetime(delete_at_val)
elif not isinstance(delete_at_val, (datetime, type(None))):
logging.warning(f"Invalid type for delete_at for {hostname}: {type(delete_at_val)}. Setting to None.")
rule_copy["delete_at"] = None
if "zone_id" not in rule_copy:
logging.warning(f"Rule for {hostname} loaded from state is missing 'zone_id'. Will attempt to re-determine on reconcile.")
rule_copy["zone_id"] = None
rule_copy.setdefault("access_app_id", None)
rule_copy.setdefault("access_policy_type", None)
rule_copy.setdefault("access_app_config_hash", None)
rule_copy.setdefault("access_policy_ui_override", False)
rule_copy.setdefault("source", "docker")
processed_rules[hostname] = rule_copy
managed_rules = processed_rules
logging.info(f"Loaded state for {len(managed_rules)} rules from {config.STATE_FILE_PATH}")
except (json.JSONDecodeError, IOError, OSError) as e:
logging.error(f"Error loading state from {config.STATE_FILE_PATH}: {e}. Starting fresh.", exc_info=True)
managed_rules = {}
except Exception as e:
logging.error(f"Unexpected error during state loading from {config.STATE_FILE_PATH}: {e}. Starting fresh.", exc_info=True)
managed_rules = {}
def save_state():
global managed_rules
serializable_state = {}
with state_lock:
for hostname, rule in managed_rules.items():
rule_copy = rule.copy()
delete_at_val = rule_copy.get("delete_at")
if isinstance(delete_at_val, datetime):
rule_copy["delete_at"] = delete_at_val.astimezone(timezone.utc).isoformat().replace('+00:00', 'Z')
if "zone_id" not in rule_copy:
logging.warning(f"Attempting to save rule for {hostname} without zone_id!")
rule_copy["zone_id"] = None
rule_copy.setdefault("access_app_id", None)
rule_copy.setdefault("access_policy_type", None)
rule_copy.setdefault("access_app_config_hash", None)
rule_copy.setdefault("access_policy_ui_override", False)
rule_copy.setdefault("source", "docker")
serializable_state[hostname] = rule_copy
try:
state_dir = os.path.dirname(config.STATE_FILE_PATH)
if not os.path.exists(state_dir):
try:
os.makedirs(state_dir, exist_ok=True)
logging.info(f"Created directory {state_dir} before saving state.")
except OSError as e:
logging.error(f"Could not create directory {state_dir} for state file: {e}. Save failed.")
return
temp_file_path = config.STATE_FILE_PATH + ".tmp"
with open(temp_file_path, 'w') as f:
json.dump(serializable_state, f, indent=2)
os.replace(temp_file_path, config.STATE_FILE_PATH)
logging.debug(f"Saved state for {len(managed_rules)} rules to {config.STATE_FILE_PATH}")
except (IOError, OSError) as e:
logging.error(f"Error saving state to {config.STATE_FILE_PATH}: {e}", exc_info=True)
except Exception as e:
logging.error(f"Unexpected error during state saving to {config.STATE_FILE_PATH}: {e}", exc_info=True)

View file

@ -0,0 +1,513 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/core/tunnel_manager.py
import logging
import json
import time
from app import config, docker_client
from app import tunnel_state, cloudflared_agent_state
from app.core import cloudflare_api
from app.core.state_manager import managed_rules, state_lock
from docker.errors import NotFound, APIError
import requests
def initialize_tunnel():
logging.info("Initializing tunnel...")
logging.info(f"Using Cloudflare Account ID: {config.CF_ACCOUNT_ID}")
logging.info(f"API Token available: {'Yes' if config.CF_API_TOKEN else 'No'}")
logging.info(f"Zone ID available: {'Yes: ' + config.CF_ZONE_ID if config.CF_ZONE_ID else 'No'}")
logging.info(f"External mode: {config.USE_EXTERNAL_CLOUDFLARED}")
logging.info(f"External tunnel ID: {config.EXTERNAL_TUNNEL_ID}")
tunnel_state["status_message"] = "Checking tunnel configuration..."
tunnel_state["error"] = None
if config.USE_EXTERNAL_CLOUDFLARED:
logging.info("External cloudflared configuration detected.")
if config.EXTERNAL_TUNNEL_ID:
tunnel_id = config.EXTERNAL_TUNNEL_ID
logging.info(f"Using external tunnel ID: {tunnel_id}")
tunnel_state["id"] = tunnel_id
tunnel_state["token"] = None
tunnel_state["status_message"] = "Using external tunnel to manage DNS and inbound routes."
logging.info(f"External tunnel (ID: {tunnel_id}) initialized for DNS and routes.")
return
else:
logging.warning("USE_EXTERNAL_CLOUDFLARED is true but EXTERNAL_TUNNEL_ID is not provided.")
tunnel_state["status_message"] = "Error: External tunnel config missing tunnel ID."
tunnel_state["error"] = "External cloudflared enabled but missing tunnel ID."
return
if not config.TUNNEL_NAME:
logging.error("TUNNEL_NAME not provided. Required when not using external cloudflared.")
tunnel_state["status_message"] = "Error: Missing required TUNNEL_NAME."
tunnel_state["error"] = "TUNNEL_NAME not provided."
return
try:
tunnel_id, token = cloudflare_api.find_tunnel_via_api(config.TUNNEL_NAME)
if not tunnel_id and not tunnel_state.get("error"):
tunnel_state["status_message"] = f"Tunnel '{config.TUNNEL_NAME}' not found. Creating..."
tunnel_id, token = cloudflare_api.create_tunnel_via_api(config.TUNNEL_NAME)
if tunnel_id and token:
tunnel_state["id"] = tunnel_id
tunnel_state["token"] = token
tunnel_state["status_message"] = "Tunnel setup complete (using API)."
tunnel_state["error"] = None
logging.info(f"Tunnel '{config.TUNNEL_NAME}' initialized. ID: {tunnel_id}")
elif not tunnel_state.get("error"):
tunnel_state["status_message"] = "Tunnel initialization failed."
tunnel_state["error"] = "Failed to find/create tunnel or get token."
logging.error(f"Tunnel init failed for '{config.TUNNEL_NAME}'.")
else:
tunnel_state["status_message"] = "Tunnel initialization failed (see error details)."
except requests.exceptions.RequestException as e:
logging.error(f"API exception during tunnel initialization for '{config.TUNNEL_NAME}': {e}")
if not tunnel_state.get("error"):
tunnel_state["error"] = f"API error: {e}"
tunnel_state["status_message"] = "Tunnel initialization failed (API error)."
except Exception as e:
logging.error(f"Unhandled exception during tunnel initialization for '{config.TUNNEL_NAME}': {e}", exc_info=True)
if not tunnel_state.get("error"):
tunnel_state["error"] = f"Unexpected init error: {e}"
tunnel_state["status_message"] = "Tunnel initialization failed (unexpected error)."
def update_cloudflare_config():
if not tunnel_state.get("id"):
logging.warning("Cannot update CF config, tunnel ID missing in state.")
return False
with state_lock:
logging.info("Constructing desired Cloudflare tunnel configuration from managed rules...")
desired_dockflare_rules = []
for hostname, rule_details in managed_rules.items():
if rule_details.get("status") == "active":
service = rule_details.get("service")
if service:
no_tls_verify = rule_details.get("no_tls_verify", False)
rule_config = {"hostname": hostname, "service": service}
if no_tls_verify:
rule_config["originRequest"] = {"noTLSVerify": True}
desired_dockflare_rules.append(rule_config)
else:
logging.warning(f"Rule {hostname} is active but missing 'service'. Skipping.")
try:
current_api_config_ruleset = cloudflare_api.get_current_cf_config(tunnel_state["id"])
except Exception as e:
logging.error(f"Failed to fetch current CF config to compare: {e}")
tunnel_state["error"] = f"Failed get tunnel config: {e}"
return False
if current_api_config_ruleset is None:
logging.error("Failed to fetch current CF config ruleset; cannot reliably update.")
return False
current_api_ingress_rules = current_api_config_ruleset.get("ingress", [])
preserved_api_rules = []
catch_all_rule_template = {"service": "http_status:404"}
for api_rule in current_api_ingress_rules:
api_hostname = api_rule.get("hostname")
api_service = api_rule.get("service")
is_catch_all = api_service == catch_all_rule_template["service"] and not api_hostname
is_wildcard_not_managed_by_dockflare = api_hostname and '*' in api_hostname and not any(
managed_host == api_hostname and managed_rules[managed_host].get("status") == "active"
for managed_host in managed_rules
)
if is_catch_all or is_wildcard_not_managed_by_dockflare:
preserved_api_rules.append(api_rule)
continue
is_managed_by_dockflare = False
for df_rule_hostname, df_rule_details in managed_rules.items():
if df_rule_hostname == api_hostname and df_rule_details.get("status") == "active":
is_managed_by_dockflare = True
break
if not is_managed_by_dockflare and not is_catch_all and not is_wildcard_not_managed_by_dockflare:
logging.info(f"Non-DockFlare, non-wildcard, non-catch-all rule found in API: {api_rule}. It will be removed by authoritative update.")
final_ingress_rules_to_put = list(desired_dockflare_rules)
for p_rule in preserved_api_rules:
is_duplicate = False
p_hostname = p_rule.get("hostname")
p_service = p_rule.get("service")
for f_rule in final_ingress_rules_to_put:
if f_rule.get("hostname") == p_hostname and f_rule.get("service") == p_service:
is_duplicate = True
break
if not is_duplicate:
final_ingress_rules_to_put.append(p_rule)
has_catch_all = any(r.get("service") == catch_all_rule_template["service"] and not r.get("hostname") for r in final_ingress_rules_to_put)
if not has_catch_all:
final_ingress_rules_to_put.append(catch_all_rule_template)
logging.info("Adding default catch-all rule as none was found/preserved.")
def rule_to_comparable_dict(rule):
comp_dict = {"hostname": rule.get("hostname"), "service": rule.get("service")}
if rule.get("originRequest", {}).get("noTLSVerify"):
comp_dict["noTLSVerify"] = True
return comp_dict
current_api_comparable_set = {json.dumps(rule_to_comparable_dict(r), sort_keys=True) for r in current_api_ingress_rules}
final_put_comparable_set = {json.dumps(rule_to_comparable_dict(r), sort_keys=True) for r in final_ingress_rules_to_put}
needs_api_update = False
if current_api_comparable_set != final_put_comparable_set:
logging.info("Ingress rule configuration content differs from Cloudflare. Update required.")
needs_api_update = True
else:
if not needs_api_update and len(current_api_ingress_rules) == len(final_ingress_rules_to_put):
logging.info("Cloudflare configuration content matches desired state. No API update needed.")
return True
logging.info(f"Updating Cloudflare tunnel config. Rules to PUT ({len(final_ingress_rules_to_put)} total):")
for r_idx, r_val in enumerate(final_ingress_rules_to_put):
logging.debug(f" Rule {r_idx+1}: {json.dumps(r_val)}")
if needs_api_update or not (len(current_api_ingress_rules) == len(final_ingress_rules_to_put) and current_api_comparable_set == final_put_comparable_set):
endpoint = f"/accounts/{config.CF_ACCOUNT_ID}/cfd_tunnel/{tunnel_state['id']}/configurations"
config_payload = {"config": {"ingress": final_ingress_rules_to_put}}
try:
cloudflare_api.cf_api_request("PUT", endpoint, json_data=config_payload)
logging.info("Successfully updated Cloudflare tunnel configuration.")
return True
except Exception as e:
logging.error(f"Failed to update CF tunnel config: {e}", exc_info=True)
tunnel_state["error"] = f"Failed update tunnel config: {e}"
return False
return True
def get_cloudflared_container():
if not docker_client:
logging.debug("Docker client unavailable in get_cloudflared_container.")
return None
if config.USE_EXTERNAL_CLOUDFLARED:
return None
if not config.CLOUDFLARED_CONTAINER_NAME:
logging.debug("CLOUDFLARED_CONTAINER_NAME is not set.")
return None
try:
return docker_client.containers.get(config.CLOUDFLARED_CONTAINER_NAME)
except NotFound:
logging.debug(f"Agent container '{config.CLOUDFLARED_CONTAINER_NAME}' not found.")
return None
except APIError as e:
logging.error(f"Docker API error getting agent container '{config.CLOUDFLARED_CONTAINER_NAME}': {e}")
cloudflared_agent_state["last_action_status"] = f"Error get agent: {e}"
return None
except requests.exceptions.ConnectionError as e:
logging.error(f"Docker connection error getting agent container: {e}")
cloudflared_agent_state["last_action_status"] = f"Error connect Docker: {e}"
return None
except Exception as e:
logging.error(f"Unexpected error getting agent container '{config.CLOUDFLARED_CONTAINER_NAME}': {e}", exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error unexpected get agent: {e}"
return None
def update_cloudflared_container_status():
global docker_client
current_status = cloudflared_agent_state.get("container_status")
if not docker_client:
if current_status != "docker_unavailable":
logging.warning("Docker client unavailable in update_cloudflared_container_status, attempting reconnect...")
try:
import docker as docker_lib # Use an alias to avoid conflict if any
docker_client = docker_lib.from_env(timeout=5)
docker_client.ping()
logging.info("Reconnected to Docker daemon during agent status update.")
except Exception as e_reconnect:
logging.error(f"Failed to reconnect to Docker daemon: {e_reconnect}")
if current_status != "docker_unavailable":
logging.info(f"Agent status changing to docker_unavailable.")
cloudflared_agent_state["container_status"] = "docker_unavailable"
from app import docker_client as global_dc_ref
if global_dc_ref is not None:
logging.warning("Global docker_client was not None, but reconnect failed. This needs careful handling.")
return
else:
return
container = get_cloudflared_container()
new_status = "not_found"
if container:
try:
container.reload()
new_status = container.status
except (NotFound, APIError) as e_reload:
new_status = "not_found"
logging.warning(f"Error reloading agent container status (now 'not_found'): {e_reload}")
if cloudflared_agent_state.get("container_status") != "running":
cloudflared_agent_state["last_action_status"] = "Agent container disappeared or API error."
except requests.exceptions.ConnectionError as e_conn:
new_status = "docker_unavailable"
logging.error(f"Docker connection error during agent status reload: {e_conn}")
from app import docker_client as global_dc_ref
except Exception as e_unexpected:
logging.error(f"Unexpected error reloading agent status for {container.name}: {e_unexpected}", exc_info=True)
return
if current_status != new_status:
logging.info(f"Agent container '{config.CLOUDFLARED_CONTAINER_NAME}' status changed: {current_status} -> {new_status}")
cloudflared_agent_state["container_status"] = new_status
if new_status == 'running' and cloudflared_agent_state.get("last_action_status", "").startswith("Error"):
cloudflared_agent_state["last_action_status"] = None # Clear error if now running
def ensure_docker_network_exists(network_name):
if not docker_client:
logging.error("Docker client unavailable, cannot check/create network.")
return False
if not network_name:
logging.error("Network name not provided to ensure_docker_network_exists.")
return False
try:
docker_client.networks.get(network_name)
logging.info(f"Docker network '{network_name}' already exists.")
return True
except NotFound:
logging.info(f"Docker network '{network_name}' not found. Creating...")
try:
docker_client.networks.create(network_name, driver="bridge", check_duplicate=True)
logging.info(f"Successfully created Docker network '{network_name}'.")
return True
except APIError as e_create:
if "already exists" in str(e_create).lower(): # More robust check
logging.warning(f"Network '{network_name}' creation reported conflict but NotFound was raised? Assuming it exists now.")
return True # Race condition likely
logging.error(f"Failed to create Docker network '{network_name}': {e_create}", exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error create net: {e_create}"
return False
except Exception as e_unexp_create:
logging.error(f"Unexpected error creating Docker network '{network_name}': {e_unexp_create}", exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error: Unexpected create net: {e_unexp_create}"
return False
except APIError as e_get:
logging.error(f"Docker API error checking network '{network_name}': {e_get}", exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error check net: {e_get}"
return False
except requests.exceptions.ConnectionError as e_conn:
logging.error(f"Docker connection error checking network '{network_name}': {e_conn}")
cloudflared_agent_state["last_action_status"] = f"Error: Docker connect check net."
return False
except Exception as e_unexp_get:
logging.error(f"Unexpected error checking network '{network_name}': {e_unexp_get}", exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error: Unexpected check net: {e_unexp_get}"
return False
def start_cloudflared_container():
logging.info(f"Attempting to start agent container '{config.CLOUDFLARED_CONTAINER_NAME}'...")
cloudflared_agent_state["last_action_status"] = "Starting..."
success_flag = False
if not docker_client:
msg = "Docker client not available."
logging.error(msg)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
return False
if not tunnel_state.get("token"):
msg = "Tunnel token not available."
logging.error(msg)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
return False
if not config.CLOUDFLARED_NETWORK_NAME or not ensure_docker_network_exists(config.CLOUDFLARED_NETWORK_NAME):
logging.error(f"Failed network check/create for '{config.CLOUDFLARED_NETWORK_NAME}'. Cannot start agent.")
return False
token = tunnel_state["token"]
container = get_cloudflared_container()
needs_recreate = False
if container:
try:
container.reload()
logging.info(f"Found existing agent container '{container.name}' status: {container.status}")
if container.status == 'running':
msg = f"Agent container '{container.name}' is already running."
logging.info(msg)
cloudflared_agent_state["last_action_status"] = msg
success_flag = True
return True
current_networks = container.attrs.get('NetworkSettings', {}).get('Networks', {})
network_mode = container.attrs.get('HostConfig', {}).get('NetworkMode', 'default')
is_on_correct_network = config.CLOUDFLARED_NETWORK_NAME in current_networks
if network_mode != config.CLOUDFLARED_NETWORK_NAME and not is_on_correct_network :
logging.warning(f"Existing agent container '{container.name}' is in network mode '{network_mode}' / not on '{config.CLOUDFLARED_NETWORK_NAME}'. Networks: {list(current_networks.keys())}. Needs recreation.")
needs_recreate = True
if needs_recreate:
logging.info(f"Removing misconfigured/stopped agent container '{container.name}'...")
try:
container.remove(force=True)
container = None
except (APIError, requests.exceptions.ConnectionError) as rm_err:
logging.error(f"Failed to remove misconfigured agent '{container.name}': {rm_err}. Cannot proceed.")
cloudflared_agent_state["last_action_status"] = f"Error: Failed remove old agent: {rm_err}"
return False
else:
logging.info(f"Starting existing stopped agent container '{container.name}'...");
container.start()
msg = f"Started existing agent container '{container.name}'."
cloudflared_agent_state["last_action_status"] = msg
logging.info(msg)
success_flag = True
except (NotFound, APIError) as e_check:
logging.warning(f"Error checking existing agent container '{config.CLOUDFLARED_CONTAINER_NAME}': {e_check}. Assuming creation is needed.")
container = None
except requests.exceptions.ConnectionError as e_conn:
logging.error(f"Docker connection error checking existing agent container: {e_conn}")
cloudflared_agent_state["last_action_status"] = f"Error: Docker connect check agent."
return False
if not container and not success_flag:
logging.info(f"Agent container '{config.CLOUDFLARED_CONTAINER_NAME}' not found or needs recreation. Creating...")
try:
logging.info(f"Pulling image {config.CLOUDFLARED_IMAGE}...");
docker_client.images.pull(config.CLOUDFLARED_IMAGE)
logging.info("Image pull complete.")
except APIError as img_err:
logging.warning(f"Could not pull image {config.CLOUDFLARED_IMAGE}: {img_err}. Will attempt using local if available.")
except requests.exceptions.ConnectionError as e_conn_pull:
logging.error(f"Docker connection failed during image pull: {e_conn_pull}")
cloudflared_agent_state["last_action_status"] = f"Error: Docker connect pull image."
return False
try:
container_params = {
"image": config.CLOUDFLARED_IMAGE,
"command": f"tunnel --no-autoupdate run --token {token}",
"name": config.CLOUDFLARED_CONTAINER_NAME,
"network": config.CLOUDFLARED_NETWORK_NAME,
"restart_policy": {"Name": "unless-stopped"},
"detach": True,
"remove": False,
"labels": {"managed-by": "dockflare"}
}
new_container = docker_client.containers.run(**container_params)
msg = f"Successfully created and started agent container '{new_container.name}' ({new_container.id[:12]})."
cloudflared_agent_state["last_action_status"] = msg
logging.info(msg)
success_flag = True
except APIError as create_err:
if "is already in use" in str(create_err):
msg = f"Error: Agent container name '{config.CLOUDFLARED_CONTAINER_NAME}' conflict."
else:
msg = f"Docker API error creating agent container: {create_err}"
logging.error(msg, exc_info=True)
cloudflared_agent_state["last_action_status"] = msg
success_flag = False
except requests.exceptions.ConnectionError as e_conn_run:
logging.error(f"Docker connection failed running agent container: {e_conn_run}")
cloudflared_agent_state["last_action_status"] = f"Error: Docker connect run agent."
success_flag = False
if success_flag:
time.sleep(2)
update_cloudflared_container_status()
logging.info(f"Exiting start_cloudflared_container (Success: {success_flag}).")
return success_flag
def stop_cloudflared_container():
logging.info(f"Attempting to stop agent container '{config.CLOUDFLARED_CONTAINER_NAME}'...")
cloudflared_agent_state["last_action_status"] = "Stopping..."
success_flag = False
if not docker_client:
msg = "Docker client unavailable."
logging.error(msg)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
return False
container = get_cloudflared_container()
if not container:
msg = f"Agent container '{config.CLOUDFLARED_CONTAINER_NAME}' not found (already stopped/removed?)."
logging.warning(msg)
cloudflared_agent_state["last_action_status"] = msg
if cloudflared_agent_state["container_status"] != "not_found":
cloudflared_agent_state["container_status"] = "not_found"
success_flag = True
return True
try:
container.reload()
if container.status != 'running':
msg = f"Agent container '{container.name}' is not running (status: {container.status})."
logging.info(msg)
cloudflared_agent_state["last_action_status"] = msg
if cloudflared_agent_state["container_status"] != container.status:
cloudflared_agent_state["container_status"] = container.status
success_flag = True
return True
logging.info(f"Stopping running agent container '{container.name}'...");
container.stop(timeout=30)
msg = f"Successfully stopped agent container '{container.name}'."
cloudflared_agent_state["last_action_status"] = msg
logging.info(msg)
success_flag = True
except (APIError, NotFound) as e_stop:
msg = f"Docker API error stopping agent container '{config.CLOUDFLARED_CONTAINER_NAME}': {e_stop}"
logging.error(msg, exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
success_flag = False
except requests.exceptions.ConnectionError as e_conn:
msg = f"Docker connection error stopping agent container: {e_conn}"
logging.error(msg)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
success_flag = False
except Exception as e_unexp:
msg = f"Unexpected error stopping agent container '{config.CLOUDFLARED_CONTAINER_NAME}': {e_unexp}"
logging.error(msg, exc_info=True)
cloudflared_agent_state["last_action_status"] = f"Error: {msg}"
success_flag = False
if success_flag:
time.sleep(2)
update_cloudflared_container_status()
logging.info(f"Exiting stop_cloudflared_container (Success: {success_flag}).")
return success_flag

280
dockflare/app/main.py Normal file
View file

@ -0,0 +1,280 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/main.py
import logging
import threading
import time
import sys
from app import app, docker_client, tunnel_state, cloudflared_agent_state, config, log_queue
from app.core.state_manager import load_state
from app.core.tunnel_manager import (
initialize_tunnel,
update_cloudflared_container_status,
start_cloudflared_container
)
from app.core.docker_handler import docker_event_listener, process_container_start
from app.core.reconciler import cleanup_expired_rules, reconcile_state_threaded
stop_event = threading.Event()
background_threads_list = []
agent_status_updater_thread = None
main_initialization_thread = None
def run_all_background_tasks():
global background_threads_list, agent_status_updater_thread
threads_to_start = []
if not docker_client:
logging.warning("Docker client unavailable. Core background tasks (Event Listener, Cleanup) cannot start.")
else:
tunnel_ready_for_tasks = False
if config.USE_EXTERNAL_CLOUDFLARED:
if config.EXTERNAL_TUNNEL_ID:
tunnel_ready_for_tasks = True
else:
logging.warning("External mode: EXTERNAL_TUNNEL_ID missing. Background tasks needing tunnel ID may fail.")
elif tunnel_state.get("id") and tunnel_state.get("token"):
tunnel_ready_for_tasks = True
else:
logging.warning("Managed tunnel not fully initialized (ID/token missing). Background tasks needing tunnel ID may fail.")
if tunnel_ready_for_tasks:
logging.info("Starting core background task threads (Docker Listener, Cleanup Task)...")
event_thread = threading.Thread(target=docker_event_listener, args=(stop_event,), name="DockerEventListener", daemon=True)
threads_to_start.append(event_thread)
cleanup_thread = threading.Thread(target=cleanup_expired_rules, args=(stop_event,), name="CleanupTask", daemon=True)
threads_to_start.append(cleanup_thread)
else:
logging.warning("Tunnel not ready. Skipping Docker event listener and cleanup task.")
if not config.USE_EXTERNAL_CLOUDFLARED and docker_client:
logging.info("Starting periodic agent status updater thread...")
# Ensure periodic_agent_status_updater is defined or imported
agent_status_updater_thread = threading.Thread(target=periodic_agent_status_updater, name="AgentStatusUpdater", daemon=True)
threads_to_start.append(agent_status_updater_thread)
for t in threads_to_start:
t.start()
background_threads_list.extend(threads_to_start)
if threads_to_start: # Only log if some threads were actually initiated
logging.info(f"{len(threads_to_start)} background tasks initiated.")
return threads_to_start
def periodic_agent_status_updater():
logging.info("Periodic agent status updater task starting...")
while not stop_event.is_set():
try:
logging.debug("Running periodic agent status update check...")
update_cloudflared_container_status() # From tunnel_manager
except Exception as e_status_update:
logging.error(f"Error in periodic agent status updater loop: {e_status_update}", exc_info=True)
if stop_event.is_set(): break
stop_event.wait(config.AGENT_STATUS_UPDATE_INTERVAL_SECONDS)
logging.info("Periodic agent status updater task stopped.")
def perform_initial_setup_and_tasks():
global background_threads_list
logging.info("Main initialization process started in background thread.")
if not docker_client:
logging.error("Docker client unavailable during initialization process. Critical functionalities will be affected.")
return
initialize_tunnel()
logging.info(f"Tunnel initialization attempt complete. Status: {tunnel_state.get('status_message')}, Error: {tunnel_state.get('error')}")
initial_scan_needed_and_possible = True
if config.USE_EXTERNAL_CLOUDFLARED:
if not config.EXTERNAL_TUNNEL_ID:
logging.error("External mode enabled, but EXTERNAL_TUNNEL_ID is missing. Skipping initial scan.")
initial_scan_needed_and_possible = False
elif not (tunnel_state.get("id") and tunnel_state.get("token")):
logging.error("Managed tunnel not fully initialized (missing ID or token). Skipping initial scan.")
initial_scan_needed_and_possible = False
if initial_scan_needed_and_possible:
logging.info("Performing initial container scan and rule processing...")
flask_app_instance = app
max_initial_scan_time = 120
scan_start_time = time.time()
if not hasattr(flask_app_instance, 'reconciliation_info'):
flask_app_instance.reconciliation_info = {}
flask_app_instance.reconciliation_info.update({
"in_progress": True, "progress": 0, "total_items": 0,
"processed_items": 0, "start_time": scan_start_time,
"status": "Starting initial container scan..."
})
try:
containers = docker_client.containers.list(all=config.SCAN_ALL_NETWORKS)
container_count = len(containers)
logging.info(f"[InitialScan] Found {container_count} total containers to scan.")
flask_app_instance.reconciliation_info["total_items"] = container_count
processed_count = 0
batch_size = 5
for i in range(0, container_count, batch_size):
if time.time() - scan_start_time > max_initial_scan_time:
logging.warning("[InitialScan] Timeout reached during initial container processing.")
break
current_batch = containers[i:i+batch_size]
flask_app_instance.reconciliation_info["status"] = f"Initial scan: batch {i//batch_size + 1}/{(container_count+batch_size-1)//batch_size if container_count > 0 else 1}"
for container_obj in current_batch:
process_container_start(container_obj)
processed_count += 1
if container_count > 0:
flask_app_instance.reconciliation_info["progress"] = min(100, int((processed_count / container_count) * 100))
flask_app_instance.reconciliation_info["processed_items"] = processed_count
time.sleep(0.1)
if stop_event.is_set(): break
except Exception as e_scan:
logging.error(f"Error during initial container scan/processing: {e_scan}", exc_info=True)
if hasattr(flask_app_instance, 'reconciliation_info'):
flask_app_instance.reconciliation_info["status"] = f"Error during initial scan: {str(e_scan)[:100]}"
if hasattr(flask_app_instance, 'reconciliation_info'):
flask_app_instance.reconciliation_info.update({"in_progress": False, "progress": 100, "status": "Initial container scan complete.", "completed_at": time.time()})
logging.info("Initial container scan and rule processing complete.")
logging.info("Scheduling full background reconciliation after initial setup (15s delay).")
threading.Timer(15, reconcile_state_threaded).start()
if not config.USE_EXTERNAL_CLOUDFLARED and tunnel_state.get("id") and tunnel_state.get("token"):
logging.info("Checking managed cloudflared agent container status post-initialization...")
update_cloudflared_container_status()
if cloudflared_agent_state.get("container_status") != 'running':
logging.info("Managed agent container not running, attempting auto-start...")
start_cloudflared_container()
else:
logging.info(f"Managed agent container '{config.CLOUDFLARED_CONTAINER_NAME}' is already running.")
run_all_background_tasks()
def main_application_entrypoint():
global main_initialization_thread
logging.info("-" * 52)
logging.info("--- DockFlare Starting (Refactored Structure) ---")
logging.info(f"--- Version: 1.7.1 ---")
logging.info("-" * 52)
load_state()
logging.info("Initial state loading from file complete.")
if not docker_client:
logging.error("Docker client is unavailable. Dockflare will operate with limited functionality.")
if tunnel_state: tunnel_state["status_message"] = "Error: Docker client unavailable."
if tunnel_state: tunnel_state["error"] = "Failed to connect to Docker daemon."
if cloudflared_agent_state: cloudflared_agent_state["container_status"] = "docker_unavailable"
else:
logging.info("Docker client connected. Proceeding with full initialization in background.")
main_initialization_thread = threading.Thread(
target=perform_initial_setup_and_tasks,
name="MainInitializationThread",
daemon=True
)
main_initialization_thread.start()
logging.info("Starting Flask web server...")
flask_server_thread = None
try:
from waitress import serve
flask_server_thread = threading.Thread(
target=serve,
args=(app,),
kwargs={'host': '0.0.0.0', 'port': 5000, 'threads': 10, 'expose_tracebacks': False},
daemon=True,
name="FlaskWaitressServer"
)
flask_server_thread.start()
logging.info("Flask server started using waitress on 0.0.0.0:5000.")
while not stop_event.is_set():
if flask_server_thread and not flask_server_thread.is_alive():
logging.error("Flask server thread terminated unexpectedly! Initiating shutdown.")
stop_event.set()
break
all_daemons_or_stopped = True
for bg_thread in background_threads_list:
if bg_thread and bg_thread.is_alive() and not bg_thread.daemon:
all_daemons_or_stopped = False
break
if agent_status_updater_thread and agent_status_updater_thread.is_alive() and not agent_status_updater_thread.daemon:
all_daemons_or_stopped = False
# Check main_initialization_thread
if main_initialization_thread and main_initialization_thread.is_alive() and not main_initialization_thread.daemon:
all_daemons_or_stopped = False
if not all_daemons_or_stopped:
time.sleep(5)
else:
if not (flask_server_thread and flask_server_thread.is_alive()):
logging.info("All critical threads seem to have completed. Initiating shutdown.")
stop_event.set()
else:
time.sleep(5)
except ImportError:
logging.warning("Waitress not found. Using Flask development server (NOT FOR PRODUCTION).")
app.run(host='0.0.0.0', port=5000, threaded=True, debug=False)
except KeyboardInterrupt:
logging.info("KeyboardInterrupt received. Shutting down...")
except Exception as server_startup_err:
logging.error(f"Web server failed to start or crashed: {server_startup_err}", exc_info=True)
finally:
logging.info("Shutdown sequence initiated...")
stop_event.set()
if main_initialization_thread and main_initialization_thread.is_alive():
logging.info("Waiting for main initialization thread to complete (timeout 15s)...")
main_initialization_thread.join(timeout=15)
threads_to_join = list(background_threads_list) # Create a copy
if agent_status_updater_thread: threads_to_join.append(agent_status_updater_thread)
for bg_thread in threads_to_join:
if bg_thread and bg_thread.is_alive():
logging.info(f"Waiting for background thread {bg_thread.name} to complete (timeout 5s)...")
bg_thread.join(timeout=5)
if flask_server_thread and flask_server_thread.is_alive():
logging.info("Flask server thread (Waitress) is a daemon; process exit will terminate it.")
logging.info("Dockflare application shutdown complete.")
exit_code = 0
if (tunnel_state and tunnel_state.get("error")) or \
(cloudflared_agent_state and cloudflared_agent_state.get("container_status") == "docker_unavailable") or \
not docker_client:
exit_code = 1
sys.exit(exit_code)
if __name__ == '__main__':
main_application_entrypoint()

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,451 @@
// app/static/js/main.js
const maxLogLines = 250;
let initialConnectMessageCleared = false;
let activeLogSource = null;
let eventSourceHealthCheck = null;
let pingInterval = null;
const themeManager = (function() {
let themeMenuScoped;
const htmlElementScoped = document.documentElement;
const availableThemes = [
"light", "dark", "cupcake", "bumblebee", "emerald", "corporate",
"synthwave", "retro", "cyberpunk", "valentine", "halloween", "garden",
"forest", "aqua", "lofi", "pastel", "fantasy", "wireframe", "black",
"luxury", "dracula", "cmyk", "autumn", "business", "acid",
"lemonade", "night", "coffee", "winter"
];
function setTheme(theme) {
if (!availableThemes.includes(theme)) {
console.warn(`Theme "${theme}" not available, defaulting to light.`);
theme = 'light';
}
localStorage.setItem('theme', theme);
htmlElementScoped.setAttribute('data-theme', theme);
if (themeMenuScoped) updateSelectedThemeInMenu(theme);
}
function populateThemeMenu() {
if (!themeMenuScoped) return;
themeMenuScoped.innerHTML = '';
availableThemes.forEach(themeName => {
const listItem = document.createElement('li');
listItem.classList.add('w-full');
const link = document.createElement('a');
link.textContent = themeName.charAt(0).toUpperCase() + themeName.slice(1);
link.setAttribute('data-theme-value', themeName);
link.href = "#";
link.classList.add('flex', 'items-center', 'flex-grow', 'w-full', 'px-4', 'py-2');
link.addEventListener('click', (e) => {
e.preventDefault();
const selectedTheme = e.target.getAttribute('data-theme-value');
setTheme(selectedTheme);
if (document.activeElement && typeof document.activeElement.blur === 'function') {
document.activeElement.blur();
}
});
listItem.appendChild(link);
themeMenuScoped.appendChild(listItem);
});
}
function updateSelectedThemeInMenu(currentTheme) {
if (!themeMenuScoped) return;
themeMenuScoped.querySelectorAll('li a').forEach(a => {
if (a.getAttribute('data-theme-value') === currentTheme) {
a.parentElement.classList.add('font-bold', 'text-primary');
a.classList.add('active');
} else {
a.parentElement.classList.remove('font-bold', 'text-primary');
a.classList.remove('active');
}
});
}
function initTheme() {
const savedTheme = localStorage.getItem('theme');
const defaultTheme = 'light';
setTheme(savedTheme || defaultTheme);
}
return {
initialize: function() {
themeMenuScoped = document.getElementById('theme-menu');
const themeSelectorBtn = document.getElementById('theme-selector-btn');
if (themeMenuScoped && themeSelectorBtn) {
populateThemeMenu();
initTheme();
} else {
console.error("DockFlare Theme Error: UI elements for theme selector not found.");
}
}
};
})();
function fixResourcesAndBase() {
const currentProtocol = window.location.protocol;
const currentHost = window.location.host;
document.querySelectorAll('link[rel="stylesheet"]').forEach(function(link) {
const href = link.getAttribute('href');
if (href && href.startsWith('http:') && currentProtocol === 'https:') {
link.setAttribute('href', href.replace('http:', 'https:'));
}
});
document.querySelectorAll('script[src]').forEach(function(script) {
const src = script.getAttribute('src');
if (src && src.startsWith('http:') && currentProtocol === 'https:') {
script.setAttribute('src', src.replace('http:', 'https:'));
}
});
document.querySelectorAll('link[rel="preconnect"]').forEach(function(link) {
const href = link.getAttribute('href');
if (href && href.startsWith('http:') && currentProtocol === 'https:') {
const urlObj = new URL(href);
link.setAttribute('href', currentProtocol + '//' + urlObj.host + (urlObj.pathname || '') + (urlObj.search || ''));
}
});
let baseTag = document.querySelector('base');
if (!baseTag) {
baseTag = document.createElement('base');
document.head.insertBefore(baseTag, document.head.firstChild); // Insert at the beginning
}
baseTag.href = currentProtocol + '//' + currentHost + '/';
const origFetch = window.fetch;
window.fetch = function(url, options) {
let processedUrl = url;
if (url && typeof url === 'string') {
try {
const urlObj = new URL(url, document.baseURI);
if (urlObj.host === currentHost && urlObj.protocol !== currentProtocol) {
urlObj.protocol = currentProtocol;
processedUrl = urlObj.toString();
}
} catch (e) {
}
}
return origFetch.call(this, processedUrl, options);
};
}
function addLogLine(message, type = 'log') {
const logOutput = document.getElementById('log-output');
if (!logOutput) { console.error("Log output element not found."); return; }
if (!initialConnectMessageCleared && logOutput.textContent.includes('Connecting to log stream...')) {
logOutput.textContent = '';
initialConnectMessageCleared = true;
}
const newLogLine = document.createElement('div');
newLogLine.textContent = message;
if (type === 'status') newLogLine.classList.add('text-neutral-content', 'opacity-70', 'italic');
else if (type === 'error') newLogLine.classList.add('text-red-400', 'font-semibold');
else if (type === 'connected') newLogLine.classList.add('text-green-400');
const isScrolledToBottom = logOutput.scrollHeight - logOutput.clientHeight <= logOutput.scrollTop + 10;
logOutput.appendChild(newLogLine);
while (logOutput.childNodes.length > maxLogLines) {
logOutput.removeChild(logOutput.firstChild);
}
if (isScrolledToBottom) {
logOutput.scrollTop = logOutput.scrollHeight;
}
}
function connectEventSource() {
const logOutput = document.getElementById('log-output');
if (!logOutput) { console.error("Log output element not found for EventSource."); return; }
if (!window.EventSource) {
addLogLine("Browser doesn't support Server-Sent Events.", 'error');
return;
}
if (activeLogSource) {
try { activeLogSource.close(); } catch (e) { console.error("Error closing existing log stream:", e); }
activeLogSource = null;
}
const streamUrl = `${document.baseURI}stream-logs?t=${Date.now()}`;
try {
activeLogSource = new EventSource(streamUrl);
let connectionTimeout;
const resetConnectionTimeout = () => {
if (connectionTimeout) clearTimeout(connectionTimeout);
connectionTimeout = setTimeout(() => {
if (activeLogSource) {
activeLogSource.close(); activeLogSource = null;
addLogLine("--- Log stream connection timeout. Reconnecting... ---", 'error');
setTimeout(connectEventSource, 2000);
}
}, 10000); // 10s timeout
};
resetConnectionTimeout();
activeLogSource.onopen = function() {
if (connectionTimeout) clearTimeout(connectionTimeout);
addLogLine("--- Log stream connected ---", 'connected');
};
activeLogSource.onmessage = function(event) {
resetConnectionTimeout();
if (event.data === "heartbeat" || event.data === ": keepalive") { return; }
addLogLine(event.data, 'log');
};
let retryAttempt = 0;
activeLogSource.onerror = function(err) {
if (connectionTimeout) clearTimeout(connectionTimeout);
if (activeLogSource && activeLogSource.readyState !== EventSource.CLOSED) {
addLogLine("--- Log stream connection error. Retrying... ---", 'error');
}
if (activeLogSource) { activeLogSource.close(); activeLogSource = null; }
retryAttempt++;
const delay = Math.min(5000 * Math.pow(1.5, Math.min(retryAttempt - 1, 5)), 30000);
setTimeout(connectEventSource, delay);
};
} catch (e) {
addLogLine(`--- Failed to establish log stream connection: ${e.message} ---`, 'error');
setTimeout(connectEventSource, 5000);
}
if (eventSourceHealthCheck) clearInterval(eventSourceHealthCheck);
eventSourceHealthCheck = setInterval(() => {
if (!activeLogSource || activeLogSource.readyState === EventSource.CLOSED) {
addLogLine("--- Health check: Log stream disconnected. Reconnecting... ---", 'status');
connectEventSource();
}
}, 15000);
}
function formatTimeDifference(diffMillis) {
const totalSeconds = Math.round(Math.abs(diffMillis / 1000));
if (totalSeconds < 60) return diffMillis >= 0 ? 'in <1m' : '<1m ago';
const days = Math.floor(totalSeconds / (3600 * 24));
const hours = Math.floor((totalSeconds % (3600 * 24)) / 3600);
const minutes = Math.floor((totalSeconds % 3600) / 60);
let parts = [];
if (days > 0) parts.push(`${days}d`);
if (hours > 0) parts.push(`${hours}h`);
if (minutes > 0 || (days === 0 && hours === 0)) parts.push(`${minutes}m`);
const timeString = parts.join(' ');
return diffMillis >= 0 ? `in ${timeString}` : `${timeString} ago`;
}
function updateCountdowns() {
document.querySelectorAll('div[data-delete-at]').forEach(div => {
const deleteAtISO = div.dataset.deleteAt;
if (!deleteAtISO) return;
const absoluteTimeSpan = div.querySelector('.absolute-time-display');
const countdownSpan = div.querySelector('.countdown-timer');
if (!absoluteTimeSpan || !countdownSpan) return;
try {
const targetDate = new Date(deleteAtISO);
if (isNaN(targetDate.getTime())) throw new Error("Invalid date");
const options = { hour: '2-digit', minute: '2-digit', day: '2-digit', month: 'short', year: 'numeric' };
absoluteTimeSpan.textContent = targetDate.toLocaleString(undefined, options);
const now = new Date();
const diff = targetDate - now;
countdownSpan.textContent = `(${formatTimeDifference(diff)})`;
if (diff < 0) {
countdownSpan.classList.add('text-error'); absoluteTimeSpan.classList.add('text-error');
} else {
countdownSpan.classList.remove('text-error'); absoluteTimeSpan.classList.remove('text-error');
}
} catch (e) {
absoluteTimeSpan.textContent = "(Invalid Date)"; countdownSpan.textContent = "";
console.error("Error processing date for countdown:", deleteAtISO, e);
}
});
}
function startServerPing() {
if (pingInterval) clearInterval(pingInterval);
pingInterval = setInterval(() => {
fetch(`${document.baseURI}ping?t=${Date.now()}`) // Use baseURI
.then(response => response.ok ? response.json() : Promise.reject(`Ping failed: ${response.status}`))
.then(data => { /* console.debug("Ping success:", data) */ })
.catch(error => console.warn("Server ping failed:", error));
}, 30000);
}
function updateReconciliationStatus() {
fetch(`${document.baseURI}reconciliation-status?t=${Date.now()}`)
.then(response => response.json())
.then(data => {
const statusElement = document.getElementById('reconciliation-status');
const messageElement = document.getElementById('reconciliation-status-message');
if (!statusElement || !messageElement) return;
if (data.status) {
messageElement.textContent = data.status;
messageElement.style.display = data.in_progress ? 'block' : 'none';
}
if (data.in_progress) {
statusElement.innerHTML = `<div role="alert" class="alert alert-warning shadow-md text-sm"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" class="stroke-current shrink-0 w-6 h-6 animate-spin"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 6v6l4 2M21.56 10.5A10.001 10.001 0 0012 2a10 10 0 100 20 9.974 9.974 0 005.201-1.71l-.001-.001z"></path></svg><div><h3 class="font-bold">Reconciliation: ${data.progress}%</h3><div class="text-xs">Processing ${data.processed_items} of ${data.total_items} items...</div></div></div>`;
} else {
// Only clear if it was previously showing reconciliation
if (statusElement.innerHTML.includes('Reconciliation:')) {
statusElement.innerHTML = `<div role="alert" class="alert alert-success shadow-md text-sm"><svg xmlns="http://www.w3.org/2000/svg" class="stroke-current shrink-0 h-6 w-6" fill="none" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z"></path></svg><span>Reconciliation complete</span></div>`;
setTimeout(() => {
if (statusElement.innerHTML.includes('Reconciliation complete')) {
statusElement.innerHTML = '';
if (messageElement) messageElement.style.display = 'none';
}
}, 5000);
}
}
}).catch(err => console.warn("Failed to fetch reconciliation status:", err));
}
document.addEventListener('DOMContentLoaded', function() {
fixResourcesAndBase();
themeManager.initialize();
document.querySelectorAll('form.protocol-aware-form').forEach(form => {
if (form.getAttribute('action')) {
let actionUrl = form.getAttribute('action');
try {
const fullActionUrl = new URL(actionUrl, document.baseURI);
if (fullActionUrl.protocol !== window.location.protocol && fullActionUrl.host === window.location.host) {
fullActionUrl.protocol = window.location.protocol;
form.setAttribute('action', fullActionUrl.toString());
} else if (!actionUrl.startsWith('http')) { // Ensure relative paths become full
form.setAttribute('action', fullActionUrl.toString());
}
} catch (e) { /* console.error("Error processing form action URL:", actionUrl, e); */ }
}
});
document.querySelectorAll('a[href]').forEach(link => {
const href = link.getAttribute('href');
if (href && href !== "#" && !href.startsWith('mailto:') && !href.startsWith('tel:')) {
try {
const fullLinkUrl = new URL(href, document.baseURI);
if (fullLinkUrl.protocol !== window.location.protocol && fullLinkUrl.host === window.location.host) {
fullLinkUrl.protocol = window.location.protocol;
link.setAttribute('href', fullLinkUrl.toString());
} else if (!href.startsWith('http')) {
link.setAttribute('href', fullLinkUrl.toString());
}
} catch (e) { /* console.error("Error processing link href URL:", href, e); */ }
}
});
updateCountdowns();
setInterval(updateCountdowns, 30000);
connectEventSource();
updateReconciliationStatus();
setInterval(updateReconciliationStatus, 2000);
// Policy type select logic
function toggleAuthEmailField(policyType, selectElement) {
const form = selectElement.closest('form');
if (!form) return;
const emailFieldDiv = form.querySelector('.auth-email-field');
if (emailFieldDiv) {
if (policyType === 'authenticate_email') {
emailFieldDiv.classList.remove('hidden');
} else {
emailFieldDiv.classList.add('hidden');
const emailInput = emailFieldDiv.querySelector('input[name="auth_email"]');
if (emailInput) emailInput.value = '';
}
}
}
document.querySelectorAll('.policy-type-select').forEach(select => {
select.addEventListener('change', function() {
toggleAuthEmailField(this.value, this);
});
toggleAuthEmailField(select.value, select);
});
// Tunnel DNS toggle logic
document.querySelectorAll('.tunnel-dns-toggle').forEach(button => {
button.addEventListener('click', async function() {
const tunnelId = this.dataset.tunnelId;
const tunnelDetailsRow = this.closest('tr');
const dnsRecordsDisplayRow = tunnelDetailsRow.nextElementSibling;
const targetDivId = this.getAttribute('aria-controls');
const targetDiv = document.getElementById(targetDivId);
const isExpanded = this.getAttribute('aria-expanded') === 'true';
const expandIcon = this.querySelector('.expand-icon');
const collapseIcon = this.querySelector('.collapse-icon');
if (!dnsRecordsDisplayRow || !targetDiv) return;
if (isExpanded) {
dnsRecordsDisplayRow.classList.add('hidden');
this.setAttribute('aria-expanded', 'false');
if (expandIcon) expandIcon.classList.remove('hidden');
if (collapseIcon) collapseIcon.classList.add('hidden');
} else {
this.setAttribute('aria-expanded', 'true');
if (expandIcon) expandIcon.classList.add('hidden');
if (collapseIcon) collapseIcon.classList.remove('hidden');
if (targetDiv.dataset.loaded !== 'true' || targetDiv.dataset.loaded === 'error') {
targetDiv.innerHTML = '<p class="opacity-60 italic animate-pulse p-2">Loading DNS records...</p>';
dnsRecordsDisplayRow.classList.remove('hidden');
try {
const fetchUrl = `${document.baseURI}tunnel-dns-records/${encodeURIComponent(tunnelId)}?t=${Date.now()}`;
const response = await fetch(fetchUrl);
if (!response.ok) {
let errorDetail = `HTTP error ${response.status}`;
try { const errorData = await response.json(); errorDetail = errorData.error || errorData.message || errorDetail; } catch (e) {}
throw new Error(errorDetail);
}
const data = await response.json();
const currentTargetDiv = document.getElementById(`dns-records-${tunnelId}`);
if (!currentTargetDiv) {return;}
if (data.dns_records && data.dns_records.length > 0) {
let dnsHtml = '<ul class="list-none pl-4 space-y-1.5">';
data.dns_records.forEach(record => {
const recordUrl = `https://${record.name}`;
const zoneDisplay = record.zone_name ? record.zone_name : record.zone_id;
dnsHtml += `<li class="opacity-90 text-xs">
<svg xmlns="http://www.w3.org/2000/svg" class="h-3 w-3 inline-block mr-1 text-info" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path stroke-linecap="round" stroke-linejoin="round" d="M13.828 10.172a4 4 0 00-5.656 0l-4 4a4 4 0 105.656 5.656l1.102-1.101m-.758-4.899a4 4 0 005.656 0l4-4a4 4 0 00-5.656-5.656l-1.1 1.1" /></svg>
<a href="${recordUrl}" target="_blank" rel="noopener noreferrer" class="link link-hover">${record.name}</a>
<span class="ml-2 opacity-60">(Zone: ${zoneDisplay})</span>
</li>`;
});
dnsHtml += '</ul>';
currentTargetDiv.innerHTML = dnsHtml;
currentTargetDiv.dataset.loaded = 'true';
} else if (data.message) {
currentTargetDiv.innerHTML = `<p class="opacity-60 italic p-2">${data.message}</p>`;
currentTargetDiv.dataset.loaded = 'info';
} else {
currentTargetDiv.innerHTML = '<p class="opacity-60 italic p-2">No CNAME DNS records found pointing to this tunnel in the configured zones.</p>';
currentTargetDiv.dataset.loaded = 'true';
}
} catch (error) {
const errorTargetDiv = document.getElementById(`dns-records-${tunnelId}`);
if (errorTargetDiv) {
errorTargetDiv.innerHTML = `<p class="text-error p-2">Error loading DNS records: ${error.message}</p>`;
errorTargetDiv.dataset.loaded = 'error';
}
}
}
dnsRecordsDisplayRow.classList.remove('hidden');
}
});
});
startServerPing();
window.addEventListener('beforeunload', function() {
if (activeLogSource) activeLogSource.close();
if (eventSourceHealthCheck) clearInterval(eventSourceHealthCheck);
if (pingInterval) clearInterval(pingInterval);
});
});

View file

@ -22,149 +22,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>DockFlare v1.7.1 - Cloudflare Tunnel ingress Manager</title>
<script>
(function() {
const currentProtocol = window.location.protocol;
const currentHost = window.location.host;
function fixResources() {
document.querySelectorAll('link[rel="stylesheet"]').forEach(function(link) {
const href = link.getAttribute('href');
if (href && href.startsWith('http:') && window.location.protocol === 'https:') {
link.setAttribute('href', href.replace('http:', 'https:'));
}
});
document.querySelectorAll('script[src]').forEach(function(script) {
const src = script.getAttribute('src');
if (src && src.startsWith('http:') && window.location.protocol === 'https:') {
script.setAttribute('src', src.replace('http:', 'https:'));
}
});
document.querySelectorAll('link[rel="preconnect"]').forEach(function(link) {
const href = link.getAttribute('href');
if (href && href.startsWith('http:') && window.location.protocol === 'https:') {
link.setAttribute('href', href.replace('http:', 'https:'));
}
});
}
fixResources();
document.addEventListener('DOMContentLoaded', fixResources);
const origFetch = window.fetch;
window.fetch = function(url, options) {
if (url && typeof url === 'string' && url.startsWith('http')) {
try {
const urlObj = new URL(url);
if (urlObj.host === currentHost) {
urlObj.protocol = currentProtocol;
return origFetch.call(this, urlObj.toString(), options);
}
} catch (e) { console.error('Error processing URL:', e); }
}
return origFetch.call(this, url, options);
};
let themeMenuScoped;
const htmlElementScoped = document.documentElement;
const availableThemes = [
"light", "dark", "cupcake", "bumblebee", "emerald", "corporate",
"synthwave", "retro", "cyberpunk", "valentine", "halloween", "garden",
"forest", "aqua", "lofi", "pastel", "fantasy", "wireframe", "black",
"luxury", "dracula", "cmyk", "autumn", "business", "acid",
"lemonade", "night", "coffee", "winter"
];
function setTheme(theme) {
if (!availableThemes.includes(theme)) {
console.warn(`Theme "${theme}" not available, defaulting to light.`);
theme = 'light';
}
localStorage.setItem('theme', theme);
htmlElementScoped.setAttribute('data-theme', theme);
if (theme === 'dark' || theme === 'night' || theme === 'dracula' || theme === 'halloween' || theme === 'forest' || theme === 'black' || theme === 'luxury' || theme === 'synthwave' || theme === 'coffee') {
htmlElementScoped.classList.add('dark');
} else {
htmlElementScoped.classList.remove('dark');
}
if (themeMenuScoped) updateSelectedThemeInMenu(theme);
}
function populateThemeMenu() {
if (!themeMenuScoped) return;
themeMenuScoped.innerHTML = '';
availableThemes.forEach(themeName => {
const listItem = document.createElement('li');
listItem.classList.add('w-full');
const link = document.createElement('a');
link.textContent = themeName.charAt(0).toUpperCase() + themeName.slice(1);
link.setAttribute('data-theme-value', themeName);
link.href = "#";
link.classList.add('flex', 'items-center', 'flex-grow', 'w-full', 'px-4', 'py-2');
link.addEventListener('click', (e) => {
e.preventDefault();
const selectedTheme = e.target.getAttribute('data-theme-value');
setTheme(selectedTheme);
if (document.activeElement && typeof document.activeElement.blur === 'function') {
document.activeElement.blur();
}
});
listItem.appendChild(link);
themeMenuScoped.appendChild(listItem);
});
}
function updateSelectedThemeInMenu(currentTheme) {
if (!themeMenuScoped) return;
themeMenuScoped.querySelectorAll('li a').forEach(a => {
if (a.getAttribute('data-theme-value') === currentTheme) {
a.parentElement.classList.add('font-bold', 'text-primary');
a.classList.add('active');
} else {
a.parentElement.classList.remove('font-bold', 'text-primary');
a.classList.remove('active');
}
});
}
function initTheme() {
const savedTheme = localStorage.getItem('theme');
const defaultTheme = 'light';
setTheme(savedTheme || defaultTheme);
}
window.DOCKFLARE_THEME_MODULE = {
initialize: function() {
let themeSelectorBtn = document.getElementById('theme-selector-btn');
themeMenuScoped = document.getElementById('theme-menu');
if (themeMenuScoped && themeSelectorBtn) {
populateThemeMenu();
initTheme();
} else {
console.error("DockFlare Theme Error: UI elements for theme selector not found after DOM load.");
}
}
};
const baseTag = document.createElement('base');
baseTag.href = currentProtocol + '//' + currentHost + '/';
document.head.appendChild(baseTag);
const preconnectDomains = ['rsms.me'];
preconnectDomains.forEach(domain => {
const existingPreconnect = Array.from(document.querySelectorAll('link[rel="preconnect"]'))
.find(link => link.href.includes(domain));
if (existingPreconnect) {
const href = existingPreconnect.getAttribute('href');
if (href && href.startsWith('http') && new URL(href).protocol !== currentProtocol) {
existingPreconnect.setAttribute('href', currentProtocol + '//' + new URL(href).host);
}
}
});
})();
</script>
<link rel="stylesheet" href="{{ url_for('static', filename='css/output.css') }}">
<link rel="preconnect" href="https://rsms.me" crossorigin>
<link rel="stylesheet" href="https://rsms.me/inter/inter.css" crossorigin="anonymous">
@ -608,195 +466,7 @@
</div>
<form method="dialog" class="modal-backdrop"><button>close</button></form>
</dialog>
<script>
const maxLogLines = 250;let initialConnectMessageCleared = false;let activeLogSource = null;let eventSourceHealthCheck = null;let pingInterval = null;
function addLogLine(message, type = 'log') {const logOutput = document.getElementById('log-output');if (!logOutput) { console.error("Log output element not found when trying to add line."); return; }if (!initialConnectMessageCleared && logOutput.textContent.includes('Connecting to log stream...')) {logOutput.textContent = ''; initialConnectMessageCleared = true;}const newLogLine = document.createElement('div');newLogLine.textContent = message;if (type === 'status') newLogLine.classList.add('text-neutral-content', 'opacity-70', 'italic');else if (type === 'error') newLogLine.classList.add('text-red-400', 'font-semibold');else if (type === 'connected') newLogLine.classList.add('text-green-400');const isScrolledToBottom = logOutput.scrollHeight - logOutput.clientHeight <= logOutput.scrollTop + 10;logOutput.appendChild(newLogLine);while (logOutput.childNodes.length > maxLogLines) logOutput.removeChild(logOutput.firstChild);if (isScrolledToBottom) logOutput.scrollTop = logOutput.scrollHeight;}
function connectEventSource() {const logOutput = document.getElementById('log-output');if (!logOutput) { console.error("Log output element not found for EventSource setup."); return; }if (!window.EventSource) {console.error("EventSource not supported");logOutput.textContent = "Browser doesn't support Server-Sent Events."; return;}if (activeLogSource) { try { activeLogSource.close(); } catch (e) { console.error("Error closing existing log stream:", e); } activeLogSource = null; }const currentProtocol = window.location.protocol; const currentHost = window.location.host;const timestampParam = Date.now();const streamUrl = `${currentProtocol}//${currentHost}/stream-logs?t=${timestampParam}`;try {activeLogSource = new EventSource(streamUrl);let connectionTimeout;const resetConnectionTimeout = () => {if (connectionTimeout) clearTimeout(connectionTimeout);connectionTimeout = setTimeout(() => {if (activeLogSource) {activeLogSource.close(); activeLogSource = null;addLogLine("--- Log stream connection timeout. Reconnecting... ---", 'error');setTimeout(connectEventSource, 2000);}}, 10000);};resetConnectionTimeout();activeLogSource.onopen = function() {if (connectionTimeout) clearTimeout(connectionTimeout);addLogLine("--- Log stream connected ---", 'connected');};activeLogSource.onmessage = function(event) {resetConnectionTimeout(); if (event.data === "heartbeat") { return; }addLogLine(event.data, 'log');};let retryAttempt = 0;activeLogSource.onerror = function(err) {if (connectionTimeout) clearTimeout(connectionTimeout);addLogLine("--- Log stream connection error. Retrying... ---", 'error');if (activeLogSource) { activeLogSource.close(); activeLogSource = null; }retryAttempt++; const delay = Math.min(5000 * Math.pow(1.5, retryAttempt - 1), 30000);setTimeout(connectEventSource, delay);};} catch (e) {addLogLine("--- Failed to establish log stream connection ---", 'error'); setTimeout(connectEventSource, 5000);}if (eventSourceHealthCheck) clearInterval(eventSourceHealthCheck);eventSourceHealthCheck = setInterval(() => {if (!activeLogSource || activeLogSource.readyState === 2) {connectEventSource();}}, 30000);window.addEventListener('beforeunload', function() {if (activeLogSource) activeLogSource.close(); if (eventSourceHealthCheck) clearInterval(eventSourceHealthCheck);});}
function checkEnvironment() {const currentProtocol = window.location.protocol; const currentHost = window.location.host;const debugUrl = `${currentProtocol}//${currentHost}/debug`;fetch(debugUrl).then(response => response.json()).then(data => { alert("Env debug info logged to console."); }).catch(error => console.error("Failed to fetch environment info:", error));}
function formatTimeDifference(diffMillis) {const totalSeconds = Math.round(Math.abs(diffMillis / 1000)); if (totalSeconds < 60) return diffMillis >= 0 ? 'in <1m' : '<1m ago';const days = Math.floor(totalSeconds / (3600 * 24)); const hours = Math.floor((totalSeconds % (3600 * 24)) / 3600); const minutes = Math.floor((totalSeconds % 3600) / 60);let parts = []; if (days > 0) parts.push(`${days}d`); if (hours > 0) parts.push(`${hours}h`); if (minutes > 0 || (days === 0 && hours === 0)) parts.push(`${minutes}m`);const timeString = parts.join(' '); return diffMillis >= 0 ? `in ${timeString}` : `${timeString} ago`;}
function updateCountdowns() {
document.querySelectorAll('div[data-delete-at]').forEach(div => {
const deleteAtISO = div.dataset.deleteAt;
if (!deleteAtISO) return;
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
const absoluteTimeSpan = div.querySelector('.absolute-time-display');
const countdownSpan = div.querySelector('.countdown-timer');
if (!absoluteTimeSpan || !countdownSpan) {
return;
}
try {
const targetDate = new Date(deleteAtISO);
if (isNaN(targetDate.getTime())) throw new Error("Invalid date parsed from data-delete-at");
const options = {
hour: '2-digit', minute: '2-digit',
day: '2-digit', month: 'short', year: 'numeric',
};
absoluteTimeSpan.textContent = targetDate.toLocaleString(undefined, options);
const now = new Date();
const diff = targetDate - now;
countdownSpan.textContent = `(${formatTimeDifference(diff)})`;
if (diff < 0) {
countdownSpan.classList.add('text-error');
absoluteTimeSpan.classList.add('text-error');
} else {
countdownSpan.classList.remove('text-error');
absoluteTimeSpan.classList.remove('text-error');
}
} catch (e) {
absoluteTimeSpan.textContent = "(Invalid Date)";
countdownSpan.textContent = "";
console.error("Error processing date for countdown:", deleteAtISO, e);
}
});
}
function startServerPing() {if (pingInterval) clearInterval(pingInterval);pingInterval = setInterval(() => {const currentProtocol = window.location.protocol; const currentHost = window.location.host;const pingUrl = `${currentProtocol}//${currentHost}/ping?t=${new Date().getTime()}`;fetch(pingUrl).then(response => response.ok ? response.json() : Promise.reject(`Ping failed: ${response.status}`)).then(data => {}).catch(error => {});}, 30000);}
function updateReconciliationStatus() {const currentProtocol = window.location.protocol;const currentHost = window.location.host;const statusUrl = `${currentProtocol}//${currentHost}/reconciliation-status?t=${new Date().getTime()}`;fetch(statusUrl).then(response => response.json()).then(data => {const statusElement = document.getElementById('reconciliation-status');const messageElement = document.getElementById('reconciliation-status-message');if (!statusElement || !messageElement) {return;}if (data.status) {messageElement.textContent = data.status;messageElement.style.display = data.in_progress ? 'block' : 'none';}if (data.in_progress) {statusElement.innerHTML = `<div role="alert" class="alert alert-warning shadow-md text-sm"><svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" class="stroke-current shrink-0 w-6 h-6 animate-spin"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M12 6v6l4 2M21.56 10.5A10.001 10.001 0 0012 2a10 10 0 100 20 9.974 9.974 0 005.201-1.71l-.001-.001z"></path></svg><div><h3 class="font-bold">Reconciliation: ${data.progress}%</h3><div class="text-xs">Processing ${data.processed_items} of ${data.total_items} items...</div></div></div>`;} else if (statusElement.innerHTML.includes('Reconciliation')) {statusElement.innerHTML = `<div role="alert" class="alert alert-success shadow-md text-sm"><svg xmlns="http://www.w3.org/2000/svg" class="stroke-current shrink-0 h-6 w-6" fill="none" viewBox="0 0 24 24"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z"></path></svg><span>Reconciliation complete</span></div>`;setTimeout(() => {if (statusElement.innerHTML.includes('Reconciliation complete')) {statusElement.innerHTML = '';if (messageElement) {messageElement.style.display = 'none';}}}, 5000);}}).catch(err => {});}
document.addEventListener('DOMContentLoaded', function() {
if (window.DOCKFLARE_THEME_MODULE && typeof window.DOCKFLARE_THEME_MODULE.initialize === 'function') {
window.DOCKFLARE_THEME_MODULE.initialize();
}
const currentProtocol = window.location.protocol;const currentHost = window.location.host;
document.querySelectorAll('form.protocol-aware-form').forEach(form => {if (form.getAttribute('action')) {let actionUrl = form.getAttribute('action');if (actionUrl.startsWith('/')) {actionUrl = `${currentProtocol}//${currentHost}${actionUrl}`; form.setAttribute('action', actionUrl);} else if (actionUrl.startsWith('http')) {try {const parsedUrl = new URL(actionUrl);if (parsedUrl.host === currentHost && parsedUrl.protocol !== currentProtocol) {parsedUrl.protocol = currentProtocol; form.setAttribute('action', parsedUrl.toString());}} catch (e) {}}}});
document.querySelectorAll('a[href]').forEach(link => {const href = link.getAttribute('href');if (href && (href.startsWith('/') || (href.startsWith('http') && new URL(href, window.location.origin).host === currentHost))) {try {const url = new URL(href, window.location.origin);if (url.host === currentHost && url.protocol !== currentProtocol) {url.protocol = currentProtocol;link.setAttribute('href', url.toString());}} catch (e) {}}});
updateCountdowns();const countdownInterval = setInterval(updateCountdowns, 30000);connectEventSource();
updateReconciliationStatus();setInterval(updateReconciliationStatus, 2000);
function toggleAuthEmailField(policyType, hostname, selectElementId) {
if (!selectElementId || typeof selectElementId !== 'string') {
return;
}
const parts = selectElementId.split('-');
if (parts.length < 3) {
return;
}
const source = parts[1];
const hostnameIdentifier = parts.slice(2).join('-');
const emailFieldDivId = `auth-email-field-${source}-${hostnameIdentifier}`;
const emailFieldDiv = document.getElementById(emailFieldDivId);
if (emailFieldDiv) {
if (policyType === 'authenticate_email') {
emailFieldDiv.classList.remove('hidden');
} else {
emailFieldDiv.classList.add('hidden');
const emailInput = emailFieldDiv.querySelector('input[name="auth_email"]');
if (emailInput) {
emailInput.value = '';
}
}
}
}
document.querySelectorAll('.policy-type-select').forEach(select => {
if (select.id) {
select.addEventListener('change', function() {
if (this.id) {
toggleAuthEmailField(this.value, this.dataset.hostname, this.id);
} else {
console.warn("Select element in change event is missing an ID:", this);
}
});
toggleAuthEmailField(select.value, select.dataset.hostname, select.id);
} else {
console.warn("Found a .policy-type-select element missing an ID. Cannot attach listener or set initial state for its email field.", select);
}
});
document.querySelectorAll('.tunnel-dns-toggle').forEach(button => {
button.addEventListener('click', async function() {
const tunnelId = this.dataset.tunnelId;
const tunnelDetailsRow = this.closest('tr');
const dnsRecordsDisplayRow = tunnelDetailsRow.nextElementSibling;
const targetDivId = this.getAttribute('aria-controls');
const targetDiv = document.getElementById(targetDivId);
const isExpanded = this.getAttribute('aria-expanded') === 'true';
const expandIcon = this.querySelector('.expand-icon');
const collapseIcon = this.querySelector('.collapse-icon');
if (!dnsRecordsDisplayRow || !dnsRecordsDisplayRow.classList.contains('dns-records-row')) {
console.error(`Could not find the dedicated DNS records row for tunnel ${tunnelId}. Button was in:`, tunnelDetailsRow, "Next sibling is:", dnsRecordsDisplayRow);
return;
}
if (!targetDiv) {
console.error(`Could not find targetDiv with ID ${targetDivId} for tunnel ${tunnelId}`);
return;
}
if (isExpanded) {
dnsRecordsDisplayRow.classList.add('hidden');
this.setAttribute('aria-expanded', 'false');
if (expandIcon) expandIcon.classList.remove('hidden');
if (collapseIcon) collapseIcon.classList.add('hidden');
} else {
this.setAttribute('aria-expanded', 'true');
if (expandIcon) expandIcon.classList.add('hidden');
if (collapseIcon) collapseIcon.classList.remove('hidden');
if (targetDiv.dataset.loaded !== 'true' || targetDiv.dataset.loaded === 'error') {
targetDiv.innerHTML = '<p class="opacity-60 italic animate-pulse p-2">Loading DNS records...</p>';
dnsRecordsDisplayRow.classList.remove('hidden');
try {
const currentProtocol = window.location.protocol;
const currentHost = window.location.host;
const fetchUrl = `${currentProtocol}//${currentHost}/tunnel-dns-records/${encodeURIComponent(tunnelId)}?t=${Date.now()}`;
const response = await fetch(fetchUrl);
if (!response.ok) {
let errorDetail = `HTTP error ${response.status}`;
try { const errorData = await response.json(); errorDetail = errorData.error || errorData.message || errorDetail; } catch (e) {}
throw new Error(errorDetail);
}
const data = await response.json();
const currentTargetDiv = document.getElementById(`dns-records-${tunnelId}`);
if (!currentTargetDiv) {
console.error("PANIC: Target DIV disappeared or was wrong during fetch for tunnel " + tunnelId);
return;
}
if (data.dns_records && data.dns_records.length > 0) {
let dnsHtml = '<ul class="list-none pl-4 space-y-1.5">';
data.dns_records.forEach(record => {
const recordUrl = `https://${record.name}`;
const zoneDisplay = record.zone_name ? record.zone_name : record.zone_id;
dnsHtml += `<li class="opacity-90 text-xs">
<svg xmlns="http://www.w3.org/2000/svg" class="h-3 w-3 inline-block mr-1 text-info" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2"><path stroke-linecap="round" stroke-linejoin="round" d="M13.828 10.172a4 4 0 00-5.656 0l-4 4a4 4 0 105.656 5.656l1.102-1.101m-.758-4.899a4 4 0 005.656 0l4-4a4 4 0 00-5.656-5.656l-1.1 1.1" /></svg>
<a href="${recordUrl}" target="_blank" rel="noopener noreferrer" class="link link-hover">${record.name}</a>
<span class="ml-2 opacity-60">(Zone: ${zoneDisplay})</span>
</li>`;
});
dnsHtml += '</ul>';
currentTargetDiv.innerHTML = dnsHtml;
currentTargetDiv.dataset.loaded = 'true';
} else if (data.message) {
currentTargetDiv.innerHTML = `<p class="opacity-60 italic p-2">${data.message}</p>`;
currentTargetDiv.dataset.loaded = 'info';
} else {
currentTargetDiv.innerHTML = '<p class="opacity-60 italic p-2">No CNAME DNS records found pointing to this tunnel in the configured zones.</p>';
currentTargetDiv.dataset.loaded = 'true';
}
} catch (error) {
const errorTargetDiv = document.getElementById(`dns-records-${tunnelId}`);
if (errorTargetDiv) {
errorTargetDiv.innerHTML = `<p class="text-error p-2">Error loading DNS records: ${error.message}</p>`;
errorTargetDiv.dataset.loaded = 'error';
}
}
}
dnsRecordsDisplayRow.classList.remove('hidden');
}
});
});
});
startServerPing();
</script>
</body>
</html>

View file

View file

449
dockflare/app/web/routes.py Normal file
View file

@ -0,0 +1,449 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# app/web/routes.py
import logging
import time
import copy
import os
import random
import queue
from datetime import datetime, timezone
import traceback
import json
from flask import (
Blueprint, render_template, jsonify, redirect, url_for, request, Response,
stream_with_context, current_app
)
from app import config, docker_client, tunnel_state, cloudflared_agent_state, log_queue # Globals from app/__init__
from app.core.state_manager import managed_rules, state_lock, save_state, load_state # load_state if UI triggers it
from app.core.tunnel_manager import (
start_cloudflared_container,
stop_cloudflared_container,
update_cloudflare_config
)
from app.core.cloudflare_api import (
get_all_account_cloudflare_tunnels,
get_dns_records_for_tunnel,
create_cloudflare_dns_record,
delete_cloudflare_dns_record,
get_zone_id_from_name,
get_zone_details_by_id
)
from app.core.access_manager import (
check_for_tld_access_policy,
get_cloudflare_account_email,
delete_cloudflare_access_application,
create_cloudflare_access_application,
update_cloudflare_access_application,
generate_access_app_config_hash
)
from app.core.reconciler import reconcile_state_threaded
from app.core.docker_handler import is_valid_hostname, is_valid_service
bp = Blueprint('web', __name__)
def get_display_token_ui(token_value):
if not token_value: return "Not available"
return f"{token_value[:5]}...{token_value[-5:]}" if len(token_value) > 10 else "Token (short)"
@bp.before_app_request
def detect_protocol_bp():
forwarded_proto = request.headers.get('X-Forwarded-Proto', '').lower()
current_app.config['PREFERRED_URL_SCHEME'] = 'https' if forwarded_proto == 'https' or request.is_secure else 'http'
@bp.after_app_request
def add_security_headers_bp(response):
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
response.headers['X-XSS-Protection'] = '1; mode=block'
is_https = current_app.config.get('PREFERRED_URL_SCHEME') == 'https'
csp = ("default-src * 'unsafe-inline' 'unsafe-eval' data: blob:; "
"script-src * 'unsafe-inline' 'unsafe-eval'; "
"style-src * 'unsafe-inline'; "
"img-src * data: blob:; font-src * data:; "
"connect-src *; frame-src *; ")
if is_https: csp += "upgrade-insecure-requests; "
response.headers['Content-Security-Policy'] = csp
response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'
if is_https: response.headers['Strict-Transport-Security'] = 'max-age=31536000; includeSubDomains'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type, X-Requested-With, Authorization'
return response
@bp.context_processor
def inject_protocol_bp():
preferred_scheme = current_app.config.get('PREFERRED_URL_SCHEME', 'http')
base_url = f"{preferred_scheme}://{request.host}"
return {
'protocol': preferred_scheme,
'is_https': preferred_scheme == 'https',
'base_url': base_url,
'host': request.host,
'request_scheme': request.scheme
}
@bp.route('/')
def status_page():
rules_for_template = {}
template_tunnel_state = {}
template_agent_state = {}
initialization_status = {}
tld_policy_exists_val = False
account_email_for_tld_val = None
relevant_zone_name_for_tld_policy_val = None
with state_lock:
for hostname, rule in managed_rules.items():
rule_copy = copy.deepcopy(rule)
if rule_copy.get("delete_at") and isinstance(rule_copy["delete_at"], datetime):
rule_copy["delete_at"] = rule_copy["delete_at"].replace(tzinfo=timezone.utc) if rule_copy["delete_at"].tzinfo is None else rule_copy["delete_at"].astimezone(timezone.utc)
rules_for_template[hostname] = rule_copy
template_tunnel_state = tunnel_state.copy()
template_agent_state = cloudflared_agent_state.copy()
initialization_status = {
"complete": template_tunnel_state.get("id") is not None or config.EXTERNAL_TUNNEL_ID,
"in_progress": not (template_tunnel_state.get("id") or config.EXTERNAL_TUNNEL_ID) and \
template_tunnel_state.get("status_message", "").lower().startswith("init")
}
if config.CF_ZONE_ID and docker_client:
zone_details = get_zone_details_by_id(config.CF_ZONE_ID)
if zone_details and zone_details.get("name"):
relevant_zone_name_for_tld_policy_val = zone_details.get("name")
if relevant_zone_name_for_tld_policy_val:
tld_policy_exists_val = check_for_tld_access_policy(relevant_zone_name_for_tld_policy_val)
if not tld_policy_exists_val:
account_email_for_tld_val = get_cloudflare_account_email()
else:
logging.info("Relevant zone name for TLD policy check (from CF_ZONE_ID) could not be determined.")
display_token_val = get_display_token_ui(template_tunnel_state.get("token"))
all_account_tunnels_list = get_all_account_cloudflare_tunnels()
return render_template('status_page.html',
tunnel_state=template_tunnel_state,
agent_state=template_agent_state,
initialization=initialization_status,
display_token=display_token_val,
cloudflared_container_name=config.CLOUDFLARED_CONTAINER_NAME,
docker_available=docker_client is not None,
external_cloudflared=config.USE_EXTERNAL_CLOUDFLARED,
external_tunnel_id=config.EXTERNAL_TUNNEL_ID,
rules=rules_for_template,
all_account_tunnels=all_account_tunnels_list,
CF_ACCOUNT_ID_CONFIGURED=bool(config.CF_ACCOUNT_ID),
ACCOUNT_ID_FOR_DISPLAY=config.CF_ACCOUNT_ID if config.CF_ACCOUNT_ID else "Not Configured",
relevant_zone_name_for_tld_policy=relevant_zone_name_for_tld_policy_val,
tld_policy_exists=tld_policy_exists_val,
account_email_for_tld=account_email_for_tld_val,
CF_ZONE_ID_CONFIGURED=bool(config.CF_ZONE_ID)
)
@bp.route('/ui_update_access_policy/<path:hostname>', methods=['POST'])
def ui_update_access_policy(hostname):
if not docker_client:
cloudflared_agent_state["last_action_status"] = "Error: UI Policy Update - Docker client unavailable."
return redirect(url_for('web.status_page'))
new_policy_type = request.form.get('access_policy_type')
auth_email = request.form.get('auth_email', '').strip()
action_status_message = f"Processing UI policy update for {hostname}..."
with state_lock:
current_rule = managed_rules.get(hostname)
if not current_rule:
cloudflared_agent_state["last_action_status"] = f"Error: Rule for {hostname} not found."
return redirect(url_for('web.status_page'))
current_access_app_id = current_rule.get("access_app_id")
desired_session_duration = request.form.get("session_duration", current_rule.get("access_session_duration", "24h"))
cf_access_policies = []
final_policy_type_for_state = new_policy_type
custom_rules_for_hash = None
operation_successful = False
if new_policy_type == "none" or new_policy_type == "public_no_policy":
if current_access_app_id:
if delete_cloudflare_access_application(current_access_app_id):
current_rule["access_app_id"] = None
operation_successful = True
# ...
final_policy_type_for_state = None
elif new_policy_type == "default_tld":
final_policy_type_for_state = "default_tld"
elif new_policy_type == "bypass":
cf_access_policies = [{"name": "UI Public Bypass", "decision": "bypass", "include": [{"everyone": {}}]}]
custom_rules_for_hash = json.dumps(cf_access_policies)
final_policy_type_for_state = "bypass"
elif new_policy_type == "authenticate_email":
if not auth_email:
return redirect(url_for('web.status_page'))
cf_access_policies = [
{"name": f"UI Allow Email {auth_email}", "decision": "allow", "include": [{"email": {"email": auth_email}}]},
{"name": "UI Deny Fallback", "decision": "deny", "include": [{"everyone": {}}]}
]
custom_rules_for_hash = json.dumps(cf_access_policies)
final_policy_type_for_state = "authenticate_email"
if new_policy_type in ["bypass", "authenticate_email"]:
if not cf_access_policies: # ... error ...
return redirect(url_for('web.status_page'))
new_config_hash = generate_access_app_config_hash(
final_policy_type_for_state, desired_session_duration, # ...
custom_access_rules_str=custom_rules_for_hash
)
if current_access_app_id:
pass
else:
pass
if operation_successful:
current_rule["access_policy_ui_override"] = True
# ...
if current_rule.get("access_policy_ui_override") or operation_successful :
current_rule["access_policy_ui_override"] = True
if operation_successful: state_changed_locally = True
if state_changed_locally: save_state()
cloudflared_agent_state["last_action_status"] = action_status_message
return redirect(url_for('web.status_page'))
@bp.route('/revert_access_policy_to_labels/<path:hostname>', methods=['POST'])
def revert_access_policy_to_labels(hostname):
if not docker_client: # ...
return redirect(url_for('web.status_page'))
action_status_message = f"Attempting to revert Access Policy for '{hostname}' to label configuration..."
app_id_to_delete_if_any = None
state_changed_for_revert = False
with state_lock:
current_rule = managed_rules.get(hostname)
if not current_rule: # ...
return redirect(url_for('web.status_page'))
if not current_rule.get("access_policy_ui_override", False): # ...
return redirect(url_for('web.status_page'))
app_id_to_delete_if_any = current_rule.get("access_app_id")
current_rule["access_policy_ui_override"] = False
# ...
state_changed_for_revert = True
if state_changed_for_revert: save_state()
if app_id_to_delete_if_any:
if delete_cloudflare_access_application(app_id_to_delete_if_any):
pass # ...
reconcile_state_threaded()
action_status_message += " Reconciliation triggered."
cloudflared_agent_state["last_action_status"] = action_status_message
return redirect(url_for('web.status_page'))
@bp.route('/tunnel-dns-records/<tunnel_id>')
def tunnel_dns_records(tunnel_id):
if not tunnel_id: return jsonify({"error": "Tunnel ID is required"}), 400
all_found_dns_records = []
zone_ids_to_scan = set()
if config.CF_ZONE_ID: zone_ids_to_scan.add(config.CF_ZONE_ID)
for zone_name in config.TUNNEL_DNS_SCAN_ZONE_NAMES:
resolved_zone_id = get_zone_id_from_name(zone_name)
if resolved_zone_id: zone_ids_to_scan.add(resolved_zone_id)
if not zone_ids_to_scan:
return jsonify({"dns_records": [], "message": "No zones configured or resolved for DNS scan."})
for z_id in zone_ids_to_scan:
records_in_zone = get_dns_records_for_tunnel(z_id, tunnel_id)
if records_in_zone: all_found_dns_records.extend(records_in_zone)
all_found_dns_records.sort(key=lambda r: r.get("name", "").lower())
return jsonify({"dns_records": all_found_dns_records})
@bp.route('/ping')
def ping():
return jsonify({ "status": "ok", "timestamp": int(time.time()), "version": "1.7.1",
"protocol": request.environ.get('wsgi.url_scheme', 'unknown')})
@bp.route('/debug')
def debug_info():
try:
headers = {k: v for k, v in request.headers.items()}
return jsonify({
"request": { "scheme": request.scheme, "is_secure": request.is_secure, "host": request.host,
"path": request.path, "url": request.url, "headers": headers },
"environment": { "wsgi.url_scheme": request.environ.get('wsgi.url_scheme'),
"HTTP_X_FORWARDED_PROTO": request.environ.get('HTTP_X_FORWARDED_PROTO') },
"timestamp": int(time.time())
})
except Exception as e:
return jsonify({ "error": str(e), "traceback": traceback.format_exc() }), 500
@bp.route('/reconciliation-status')
def reconciliation_status_route():
reconciliation_info_data = getattr(current_app, 'reconciliation_info', {})
return jsonify({
"in_progress": reconciliation_info_data.get("in_progress", False),
"progress": reconciliation_info_data.get("progress", 0),
"total_items": reconciliation_info_data.get("total_items", 0),
"processed_items": reconciliation_info_data.get("processed_items", 0),
"status": reconciliation_info_data.get("status", "Not started")
})
@bp.route('/start-tunnel', methods=['POST'])
def start_tunnel_route():
start_cloudflared_container()
time.sleep(1)
return redirect(url_for('web.status_page'))
@bp.route('/stop-tunnel', methods=['POST'])
def stop_tunnel_route():
stop_cloudflared_container()
time.sleep(1)
return redirect(url_for('web.status_page'))
@bp.route('/force_delete_rule/<path:hostname>', methods=['POST'])
def force_delete_rule_route(hostname):
rule_removed_from_state = False; dns_delete_success = False; access_app_delete_success = False
zone_id_for_delete = None; access_app_id_for_delete = None
with state_lock:
rule_details = managed_rules.get(hostname)
if rule_details: # ...
zone_id_for_delete = rule_details.get("zone_id")
access_app_id_for_delete = rule_details.get("access_app_id")
# ...
effective_tunnel_id = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
if zone_id_for_delete and effective_tunnel_id:
dns_delete_success = delete_cloudflare_dns_record(zone_id_for_delete, hostname, effective_tunnel_id)
if access_app_id_for_delete:
access_app_delete_success = delete_cloudflare_access_application(access_app_id_for_delete)
# ...
with state_lock:
if hostname in managed_rules: del managed_rules[hostname]; rule_removed_from_state = True; save_state()
# ...
if rule_removed_from_state and not config.USE_EXTERNAL_CLOUDFLARED:
if update_cloudflare_config(): pass # ...
# ...
return redirect(url_for('web.status_page'))
@bp.route('/stream-logs')
def stream_logs_route():
client_id = f"client-{random.randint(1000, 9999)}"
logging.info(f"Log stream client {client_id} connected.")
def event_stream():
try:
yield f"data: --- Log stream connected (client {client_id}) ---\n\n"
last_heartbeat = time.time()
while True:
try:
log_entry = log_queue.get(timeout=0.25)
yield f"data: {log_entry}\n\n"
last_heartbeat = time.time()
except queue.Empty:
if time.time() - last_heartbeat > 2:
yield f": keepalive\n\n"
last_heartbeat = time.time()
time.sleep(0.1)
except GeneratorExit:
logging.info(f"Log stream client {client_id} disconnected.")
except Exception as e_stream:
logging.error(f"Error in log stream for {client_id}: {e_stream}", exc_info=True)
finally:
logging.info(f"Log stream for client {client_id} ended.")
response = Response(event_stream(), mimetype='text/event-stream')
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'; response.headers['Expires'] = '0'
response.headers['X-Accel-Buffering'] = 'no'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET'
return response
@bp.route('/ui/manual-rules/add', methods=['POST'])
def ui_add_manual_rule_route():
if not docker_client or (not tunnel_state.get("id") and not config.EXTERNAL_TUNNEL_ID): # ...
return redirect(url_for('web.status_page'))
hostname = request.form.get('manual_hostname', '').strip()
with state_lock:
save_state()
effective_tunnel_id = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
if update_cloudflare_config():
create_cloudflare_dns_record(target_zone_id, hostname, effective_tunnel_id)
# ...
return redirect(url_for('web.status_page'))
@bp.route('/ui/manual-rules/delete/<path:hostname>', methods=['POST'])
def ui_delete_manual_rule_route(hostname):
with state_lock:
rule_details = managed_rules.get(hostname)
if rule_details and rule_details.get("source") == "manual":
# ... (get zone_id, access_app_id) ...
del managed_rules[hostname]; save_state()
# ...
effective_tunnel_id = tunnel_state.get("id") if not config.USE_EXTERNAL_CLOUDFLARED else config.EXTERNAL_TUNNEL_ID
if zone_id_for_delete and effective_tunnel_id: delete_cloudflare_dns_record(...)
if access_app_id_for_delete: delete_cloudflare_access_application(...)
if update_cloudflare_config(): pass # ...
return redirect(url_for('web.status_page'))
@bp.route('/cloudflare-ping')
def cloudflare_ping_route(): # Renamed
try:
cf_headers = {k: v for k, v in request.headers.items() if k.lower().startswith('cf-')}
visitor_data = json.loads(request.headers.get('Cf-Visitor', '{}'))
return jsonify({
"status": "ok", "timestamp": int(time.time()),
"cloudflare": { "connecting_ip": request.headers.get('Cf-Connecting-Ip') or request.remote_addr,
"visitor": visitor_data, "ray": request.headers.get('Cf-Ray') },
"request": { "host": request.host, "path": request.path, "scheme": request.scheme },
"server": { "wsgi_url_scheme": request.environ.get('wsgi.url_scheme') }
})
except Exception as e_cfping:
return jsonify({ "error": str(e_cfping), "status": "error", "timestamp": int(time.time()) }), 500

View file

View file

@ -468,9 +468,9 @@
"dev": true
},
"node_modules/electron-to-chromium": {
"version": "1.5.154",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.154.tgz",
"integrity": "sha512-G4VCFAyKbp1QJ+sWdXYIRYsPGvlV5sDACfCmoMFog3rjm1syLhI41WXm/swZypwCIWIm4IFLWzHY14joWMQ5Fw==",
"version": "1.5.157",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.157.tgz",
"integrity": "sha512-/0ybgsQd1muo8QlnuTpKwtl0oX5YMlUGbm8xyqgDU00motRkKFFbUJySAQBWcY79rVqNLWIWa87BGVGClwAB2w==",
"dev": true
},
"node_modules/emoji-regex": {

View file

@ -3,7 +3,7 @@
"version": "1.5.0",
"private": true,
"scripts": {
"build:css": "tailwindcss -c ./tailwind.config.js -i ./templates/input.css -o ./static/css/output.css --minify"
"build:css": "tailwindcss -c ./tailwind.config.js -i ./app/templates/input.css -o ./app/static/css/output.css --minify"
},
"devDependencies": {
"tailwindcss": "^3.4.3",

View file

@ -1,7 +1,7 @@
/** @type {import('tailwindcss').Config} */
module.exports = {
content: [
"./templates/**/*.html",
"./app/templates/**/*.html",
],
darkMode: 'class',
theme: {

View file

@ -1,34 +0,0 @@
# DockFlare: Automates Cloudflare Tunnel ingress from Docker labels.
# Copyright (C) 2025 ChrispyBacon-Dev <https://github.com/ChrispyBacon-dev/DockFlare>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
docker run -d \
--name my-nginx-web \
--network cloudflare-net \
-l cloudflare.tunnel.enable="true" \
-l cloudflare.tunnel.hostname="nginx.your-domain.com" \
-l cloudflare.tunnel.service="http://my-nginx-web:80" \
nginx:latest
### For Multi DNS Zones on your CloudFlare
docker run -d \
--name my-nginx-web2 \
--network cloudflare-net \
-l cloudflare.tunnel.enable="true" \
-l cloudflare.tunnel.hostname="nginx.your-other-domain.com" \
-l cloudflare.tunnel.service="http://my-nginx-web2:80" \
-l cloudflare.tunnel.zonename="your-other-domain.com" \
nginx:latest

File diff suppressed because one or more lines are too long