From 030bd36c0931beec7d513d9e4a6b8bc069f61a8d Mon Sep 17 00:00:00 2001 From: Gerrit Gogel Date: Mon, 18 Jan 2021 16:04:12 +0100 Subject: [PATCH] Revert "use pro_seafile_7.1 scripts" This reverts commit bbbfb0ad8a2f2c56e805fde0723741643bab7e97. --- seafile/Dockerfile | 2 +- seafile/pro_seafile_7.1/Dockerfile | 71 +++++ .../pro_seafile_7.1/scripts_7.1/bootstrap.py | 200 ++++++++++++ .../scripts_7.1/create_data_links.sh | 81 +++++ .../scripts_7.1}/enterpoint.sh | 0 seafile/pro_seafile_7.1/scripts_7.1/gc.sh | 37 +++ .../scripts_7.1}/init.py | 0 .../scripts_7.1}/setup-seafile-mysql.py | 0 seafile/pro_seafile_7.1/scripts_7.1/ssl.sh | 46 +++ seafile/pro_seafile_7.1/scripts_7.1/start.py | 65 ++++ .../scripts_7.1}/start.sh | 0 .../pro_seafile_7.1/scripts_7.1/upgrade.py | 82 +++++ .../scripts_7.1/utils/__init__.py | 287 ++++++++++++++++++ seafile/pro_seafile_7.1/services/nginx.conf | 33 ++ seafile/pro_seafile_7.1/services/nginx.sh | 3 + .../templates/letsencrypt.cron.template | 3 + .../templates/seafile.nginx.conf.template | 82 +++++ seafile/scripts/auto_renew_crt.sh | 37 +++ seafile/scripts/bootstrap.py | 122 +++++--- seafile/scripts/create_data_links.sh | 51 +--- seafile/scripts/ssl.sh | 4 +- seafile/scripts/start.py | 51 +++- seafile/scripts/upgrade.py | 123 +++++++- seafile/scripts/utils/__init__.py | 29 +- 24 files changed, 1293 insertions(+), 116 deletions(-) create mode 100644 seafile/pro_seafile_7.1/Dockerfile create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/bootstrap.py create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/create_data_links.sh rename seafile/{scripts => pro_seafile_7.1/scripts_7.1}/enterpoint.sh (100%) create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/gc.sh rename seafile/{scripts => pro_seafile_7.1/scripts_7.1}/init.py (100%) rename seafile/{scripts => pro_seafile_7.1/scripts_7.1}/setup-seafile-mysql.py (100%) create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/ssl.sh create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/start.py rename seafile/{scripts => pro_seafile_7.1/scripts_7.1}/start.sh (100%) create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/upgrade.py create mode 100644 seafile/pro_seafile_7.1/scripts_7.1/utils/__init__.py create mode 100644 seafile/pro_seafile_7.1/services/nginx.conf create mode 100644 seafile/pro_seafile_7.1/services/nginx.sh create mode 100644 seafile/pro_seafile_7.1/templates/letsencrypt.cron.template create mode 100644 seafile/pro_seafile_7.1/templates/seafile.nginx.conf.template create mode 100644 seafile/scripts/auto_renew_crt.sh diff --git a/seafile/Dockerfile b/seafile/Dockerfile index f3a2673..200ec40 100644 --- a/seafile/Dockerfile +++ b/seafile/Dockerfile @@ -50,4 +50,4 @@ RUN find /opt/seafile/ \( -name "liblber-*" -o -name "libldap-*" -o -name "libld EXPOSE 80 -CMD ["/sbin/my_init", "--", "/scripts/enterpoint.sh"] +CMD ["/sbin/my_init", "--", "/scripts/start.py"] diff --git a/seafile/pro_seafile_7.1/Dockerfile b/seafile/pro_seafile_7.1/Dockerfile new file mode 100644 index 0000000..89ee20b --- /dev/null +++ b/seafile/pro_seafile_7.1/Dockerfile @@ -0,0 +1,71 @@ +# See https://hub.docker.com/r/phusion/baseimage/tags/ +FROM phusion/baseimage:0.11 +ENV SEAFILE_SERVER=seafile-pro-server SEAFILE_VERSION= + +RUN apt-get update --fix-missing + +# Utility tools +RUN apt-get install -y vim htop net-tools psmisc wget curl git + +# For suport set local time zone. +RUN export DEBIAN_FRONTEND=noninteractive && apt-get install tzdata -y + +# Nginx +RUN apt-get install -y nginx + +# Java +RUN apt-get install -y openjdk-8-jre + +# Libreoffice +RUN apt-get install -y libreoffice libreoffice-script-provider-python libsm-dev +RUN apt-get install -y ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy + +# Tools +RUN apt-get install -y zlib1g-dev pwgen openssl poppler-utils + + +# Python3 +RUN apt-get install -y python3 python3-pip python3-setuptools python3-ldap python-rados +RUN python3.6 -m pip install --upgrade pip && rm -r /root/.cache/pip + +RUN pip3 install --timeout=3600 click termcolor colorlog pymysql \ + django==1.11.29 && rm -r /root/.cache/pip + +RUN pip3 install --timeout=3600 Pillow pylibmc captcha jinja2 \ + sqlalchemy django-pylibmc django-simple-captcha && \ + rm -r /root/.cache/pip + +RUN pip3 install --timeout=3600 boto oss2 pycryptodome twilio python-ldap configparser && \ + rm -r /root/.cache/pip + + +# Scripts +COPY scripts_7.1 /scripts +COPY templates /templates +COPY services /services +RUN chmod u+x /scripts/* + +RUN mkdir -p /etc/my_init.d && \ + rm -f /etc/my_init.d/* && \ + cp /scripts/create_data_links.sh /etc/my_init.d/01_create_data_links.sh + +RUN mkdir -p /etc/service/nginx && \ + rm -f /etc/nginx/sites-enabled/* /etc/nginx/conf.d/* && \ + mv /services/nginx.conf /etc/nginx/nginx.conf && \ + mv /services/nginx.sh /etc/service/nginx/run + + +# Seafile +WORKDIR /opt/seafile + +RUN mkdir -p /opt/seafile/ && cd /opt/seafile/ && \ + wget -O seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz \ + "https://download.seafile.com/d/6e5297246c/files/?p=/pro/seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz&dl=1" && \ + tar -zxvf seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz && \ + rm -f seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz + + +EXPOSE 80 + + +CMD ["/sbin/my_init", "--", "/scripts/enterpoint.sh"] diff --git a/seafile/pro_seafile_7.1/scripts_7.1/bootstrap.py b/seafile/pro_seafile_7.1/scripts_7.1/bootstrap.py new file mode 100644 index 0000000..b915f81 --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/bootstrap.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +#coding: UTF-8 + +""" +Bootstraping seafile server, letsencrypt (verification & cron job). +""" + +import argparse +import os +from os.path import abspath, basename, exists, dirname, join, isdir +import shutil +import sys +import uuid +import time + +from utils import ( + call, get_conf, get_install_dir, loginfo, + get_script, render_template, get_seafile_version, eprint, + cert_has_valid_days, get_version_stamp_file, update_version_stamp, + wait_for_mysql, wait_for_nginx, read_version_stamp +) + +seafile_version = get_seafile_version() +installdir = get_install_dir() +topdir = dirname(installdir) +shared_seafiledir = '/shared/seafile' +ssl_dir = '/shared/ssl' +generated_dir = '/bootstrap/generated' + +def init_letsencrypt(): + loginfo('Preparing for letsencrypt ...') + wait_for_nginx() + + if not exists(ssl_dir): + os.mkdir(ssl_dir) + + domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com') + context = { + 'ssl_dir': ssl_dir, + 'domain': domain, + } + render_template( + '/templates/letsencrypt.cron.template', + join(generated_dir, 'letsencrypt.cron'), + context + ) + + ssl_crt = '/shared/ssl/{}.crt'.format(domain) + if exists(ssl_crt): + loginfo('Found existing cert file {}'.format(ssl_crt)) + if cert_has_valid_days(ssl_crt, 30): + loginfo('Skip letsencrypt verification since we have a valid certificate') + return + + loginfo('Starting letsencrypt verification') + # Create a temporary nginx conf to start a server, which would accessed by letsencrypt + context = { + 'https': False, + 'domain': domain, + } + render_template('/templates/seafile.nginx.conf.template', + '/etc/nginx/sites-enabled/seafile.nginx.conf', context) + + call('nginx -s reload') + time.sleep(2) + + call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain)) + # if call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain), check_call=False) != 0: + # eprint('Now waiting 1000s for postmortem') + # time.sleep(1000) + # sys.exit(1) + + +def generate_local_nginx_conf(): + # Now create the final nginx configuratin + domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com') + context = { + 'https': is_https(), + 'domain': domain, + } + render_template( + '/templates/seafile.nginx.conf.template', + '/etc/nginx/sites-enabled/seafile.nginx.conf', + context + ) + + +def is_https(): + return get_conf('SEAFILE_SERVER_LETSENCRYPT', 'false').lower() == 'true' + +def parse_args(): + ap = argparse.ArgumentParser() + ap.add_argument('--parse-ports', action='store_true') + + return ap.parse_args() + +def init_seafile_server(): + version_stamp_file = get_version_stamp_file() + if exists(join(shared_seafiledir, 'seafile-data')): + if not exists(version_stamp_file): + update_version_stamp(os.environ['SEAFILE_VERSION']) + # sysbol link unlink after docker finish. + latest_version_dir='/opt/seafile/seafile-server-latest' + current_version_dir='/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-' + read_version_stamp() + if not exists(latest_version_dir): + call('ln -sf ' + current_version_dir + ' ' + latest_version_dir) + loginfo('Skip running setup-seafile-mysql.py because there is existing seafile-data folder.') + return + + loginfo('Now running setup-seafile-mysql.py in auto mode.') + env = { + 'SERVER_NAME': 'seafile', + 'SERVER_IP': get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com'), + 'MYSQL_USER': 'seafile', + 'MYSQL_USER_PASSWD': str(uuid.uuid4()), + 'MYSQL_USER_HOST': '127.0.0.1', + # Default MariaDB root user has empty password and can only connect from localhost. + 'MYSQL_ROOT_PASSWD': '', + } + + # Change the script to allow mysql root password to be empty + call('''sed -i -e 's/if not mysql_root_passwd/if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ/g' {}''' + .format(get_script('setup-seafile-mysql.py'))) + + setup_script = get_script('setup-seafile-mysql.sh') + call('{} auto -n seafile'.format(setup_script), env=env) + + domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com') + proto = 'https' if is_https() else 'http' + with open(join(topdir, 'conf', 'seahub_settings.py'), 'a+') as fp: + fp.write('\n') + fp.write("""CACHES = { + 'default': { + 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + }, + 'locmem': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + }, +} +COMPRESS_CACHE_BACKEND = 'locmem' + +OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n""") + fp.write("\nFILE_SERVER_ROOT = '{proto}://{domain}/seafhttp'\n".format(proto=proto, domain=domain)) + fp.write(""" +TIME_ZONE = 'Europe/Berlin' +SITE_BASE = 'http://127.0.0.1' +SITE_NAME = 'Seafile Server' +SITE_TITLE = 'Seafile Server' +SITE_ROOT = '/' +ENABLE_SIGNUP = False +ACTIVATE_AFTER_REGISTRATION = False +SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = True +SEND_EMAIL_ON_RESETTING_USER_PASSWD = True +CLOUD_MODE = False +FILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024 +SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 +SESSION_SAVE_EVERY_REQUEST = False +SESSION_EXPIRE_AT_BROWSER_CLOSE = False\n""") + + # By default ccnet-server binds to the unix socket file + # "/opt/seafile/ccnet/ccnet.sock", but /opt/seafile/ccnet/ is a mounted + # volume from the docker host, and on windows and some linux environment + # it's not possible to create unix sockets in an external-mounted + # directories. So we change the unix socket file path to + # "/opt/seafile/ccnet.sock" to avoid this problem. + with open(join(topdir, 'conf', 'ccnet.conf'), 'a+') as fp: + fp.write('\n') + fp.write('[Client]\n') + fp.write('UNIX_SOCKET = /opt/seafile/ccnet.sock\n') + fp.write('\n') + + # Disabled the Elasticsearch process on Seafile-container + # Connection to the Elasticsearch-container + with open(join(topdir, 'conf', 'seafevents.conf'), 'r') as fp: + seafevents_lines = fp.readlines() + # es + es_insert_index = seafevents_lines.index('[INDEX FILES]\n') + 1 + es_insert_lines = ['external_es_server = true\n', 'es_host = 127.0.0.1\n', 'es_port = 9200\n'] + for line in es_insert_lines: + seafevents_lines.insert(es_insert_index, line) + # office + office_insert_index = seafevents_lines.index('[OFFICE CONVERTER]\n') + 1 + office_insert_lines = ['host = 127.0.0.1\n', 'port = 6000\n'] + for line in office_insert_lines: + seafevents_lines.insert(office_insert_index, line) + + with open(join(topdir, 'conf', 'seafevents.conf'), 'w') as fp: + fp.writelines(seafevents_lines) + + files_to_copy = ['conf', 'ccnet', 'seafile-data', 'seahub-data', 'pro-data'] + for fn in files_to_copy: + src = join(topdir, fn) + dst = join(shared_seafiledir, fn) + if not exists(dst) and exists(src): + shutil.move(src, shared_seafiledir) + call('ln -sf ' + join(shared_seafiledir, fn) + ' ' + src) + + loginfo('Updating version stamp') + update_version_stamp(os.environ['SEAFILE_VERSION']) diff --git a/seafile/pro_seafile_7.1/scripts_7.1/create_data_links.sh b/seafile/pro_seafile_7.1/scripts_7.1/create_data_links.sh new file mode 100644 index 0000000..7604172 --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/create_data_links.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +set -e +set -o pipefail + +if [[ $SEAFILE_BOOTSRAP != "" ]]; then + exit 0 +fi + +if [[ $TIME_ZONE != "" ]]; then + time_zone=/usr/share/zoneinfo/$TIME_ZONE + if [[ ! -e $time_zone ]]; then + echo "invalid time zone" + exit 1 + else + ln -snf $time_zone /etc/localtime + echo "$TIME_ZONE" > /etc/timezone + fi +fi + +dirs=( + conf + ccnet + seafile-data + seahub-data + pro-data + seafile-license.txt +) + +for d in ${dirs[*]}; do + src=/shared/seafile/$d + if [[ -e $src ]]; then + rm -rf /opt/seafile/$d && ln -sf $src /opt/seafile + fi +done + +if [[ ! -e /shared/logs/seafile ]]; then + mkdir -p /shared/logs/seafile +fi +rm -rf /opt/seafile/logs && ln -sf /shared/logs/seafile/ /opt/seafile/logs + +current_version_dir=/opt/seafile/${SEAFILE_SERVER}-${SEAFILE_VERSION} +latest_version_dir=/opt/seafile/seafile-server-latest +seahub_data_dir=/shared/seafile/seahub-data + +if [[ ! -e $seahub_data_dir ]]; then + mkdir -p $seahub_data_dir +fi + +media_dirs=( + avatars + custom +) +for d in ${media_dirs[*]}; do + source_media_dir=${current_version_dir}/seahub/media/$d + if [ -e ${source_media_dir} ] && [ ! -e ${seahub_data_dir}/$d ]; then + mv $source_media_dir ${seahub_data_dir}/$d + fi + rm -rf $source_media_dir && ln -sf ${seahub_data_dir}/$d $source_media_dir +done + +rm -rf /var/lib/mysql +if [[ ! -e /shared/db ]];then + mkdir -p /shared/db +fi +ln -sf /shared/db /var/lib/mysql + +if [[ ! -e /shared/logs/var-log ]]; then + chmod 777 /var/log -R + mv /var/log /shared/logs/var-log +fi +rm -rf /var/log && ln -sf /shared/logs/var-log /var/log + +if [[ ! -e latest_version_dir ]]; then + ln -sf $current_version_dir $latest_version_dir +fi + +# chmod u+x /scripts/* + +# echo $PYTHON +# $PYTHON /scripts/init.py diff --git a/seafile/scripts/enterpoint.sh b/seafile/pro_seafile_7.1/scripts_7.1/enterpoint.sh similarity index 100% rename from seafile/scripts/enterpoint.sh rename to seafile/pro_seafile_7.1/scripts_7.1/enterpoint.sh diff --git a/seafile/pro_seafile_7.1/scripts_7.1/gc.sh b/seafile/pro_seafile_7.1/scripts_7.1/gc.sh new file mode 100644 index 0000000..4531933 --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/gc.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +set -e + +# Before +SEAFILE_DIR=/opt/seafile/seafile-server-latest + +if [[ $SEAFILE_SERVER != *"pro"* ]]; then + echo "Seafile CE: Stop Seafile to perform offline garbage collection." + $SEAFILE_DIR/seafile.sh stop + + echo "Waiting for the server to shut down properly..." + sleep 5 +else + echo "Seafile Pro: Perform online garbage collection." +fi + +# Do it +( + set +e + $SEAFILE_DIR/seaf-gc.sh "$@" | tee -a /var/log/gc.log + # We want to presevent the exit code of seaf-gc.sh + exit "${PIPESTATUS[0]}" +) + +gc_exit_code=$? + +# After + +if [[ $SEAFILE_SERVER != *"pro"* ]]; then + echo "Giving the server some time..." + sleep 3 + + $SEAFILE_DIR/seafile.sh start +fi + +exit $gc_exit_code diff --git a/seafile/scripts/init.py b/seafile/pro_seafile_7.1/scripts_7.1/init.py similarity index 100% rename from seafile/scripts/init.py rename to seafile/pro_seafile_7.1/scripts_7.1/init.py diff --git a/seafile/scripts/setup-seafile-mysql.py b/seafile/pro_seafile_7.1/scripts_7.1/setup-seafile-mysql.py similarity index 100% rename from seafile/scripts/setup-seafile-mysql.py rename to seafile/pro_seafile_7.1/scripts_7.1/setup-seafile-mysql.py diff --git a/seafile/pro_seafile_7.1/scripts_7.1/ssl.sh b/seafile/pro_seafile_7.1/scripts_7.1/ssl.sh new file mode 100644 index 0000000..931219a --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/ssl.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -e + +ssldir=${1:?"error params"} +domain=${2:?"error params"} + +letsencryptdir=$ssldir/letsencrypt +letsencrypt_script=$letsencryptdir/acme_tiny.py + +ssl_account_key=${domain}.account.key +ssl_csr=${domain}.csr +ssl_key=${domain}.key +ssl_crt=${domain}.crt + +mkdir -p /var/www/challenges && chmod -R 777 /var/www/challenges +mkdir -p ssldir + +if ! [[ -d $letsencryptdir ]]; then + git clone git://github.com/diafygi/acme-tiny.git $letsencryptdir +else + cd $letsencryptdir + git pull origin master:master +fi + +cd $ssldir + +if [[ ! -e ${ssl_account_key} ]]; then + openssl genrsa 4096 > ${ssl_account_key} +fi + +if [[ ! -e ${ssl_key} ]]; then + openssl genrsa 4096 > ${ssl_key} +fi + +if [[ ! -e ${ssl_csr} ]]; then + openssl req -new -sha256 -key ${ssl_key} -subj "/CN=$domain" > $ssl_csr +fi + +python $letsencrypt_script --account-key ${ssl_account_key} --csr $ssl_csr --acme-dir /var/www/challenges/ > ./signed.crt +curl -sSL -o intermediate.pem https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem +cat signed.crt intermediate.pem > ${ssl_crt} + +nginx -s reload + +echo "Nginx reloaded." diff --git a/seafile/pro_seafile_7.1/scripts_7.1/start.py b/seafile/pro_seafile_7.1/scripts_7.1/start.py new file mode 100644 index 0000000..453b632 --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/start.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +#coding: UTF-8 + +import os +import sys +import time +import json +import argparse +from os.path import join, exists, dirname + +from upgrade import check_upgrade +from utils import call, get_conf, get_script, get_command_output, get_install_dir + +installdir = get_install_dir() +topdir = dirname(installdir) + +def watch_controller(): + maxretry = 4 + retry = 0 + while retry < maxretry: + controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip() + garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip() + if not controller_pid and not garbage_collector_pid: + retry += 1 + else: + retry = 0 + time.sleep(5) + print('seafile controller exited unexpectedly.') + sys.exit(1) + +def main(args): + call('/scripts/create_data_links.sh') + # check_upgrade() + os.chdir(installdir) + call('service nginx start &') + + admin_pw = { + 'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'), + 'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'), + } + password_file = join(topdir, 'conf', 'admin.txt') + with open(password_file, 'w+') as fp: + json.dump(admin_pw, fp) + + + try: + call('{} start'.format(get_script('seafile.sh'))) + call('{} start'.format(get_script('seahub.sh'))) + if args.mode == 'backend': + call('{} start'.format(get_script('seafile-background-tasks.sh'))) + finally: + if exists(password_file): + os.unlink(password_file) + + print('seafile server is running now.') + try: + watch_controller() + except KeyboardInterrupt: + print('Stopping seafile server.') + sys.exit(0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Seafile cluster start script') + parser.add_argument('--mode') + main(parser.parse_args()) diff --git a/seafile/scripts/start.sh b/seafile/pro_seafile_7.1/scripts_7.1/start.sh similarity index 100% rename from seafile/scripts/start.sh rename to seafile/pro_seafile_7.1/scripts_7.1/start.sh diff --git a/seafile/pro_seafile_7.1/scripts_7.1/upgrade.py b/seafile/pro_seafile_7.1/scripts_7.1/upgrade.py new file mode 100644 index 0000000..602692c --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/upgrade.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +#coding: UTF-8 + +""" +This script is used to run proper upgrade scripts automatically. +""" + +import json +import re +import glob +import os +from os.path import abspath, basename, exists, dirname, join, isdir +import shutil +import sys +import time + +from utils import ( + call, get_install_dir, get_script, get_command_output, replace_file_pattern, + read_version_stamp, wait_for_mysql, update_version_stamp, loginfo +) + +installdir = get_install_dir() +topdir = dirname(installdir) + +def collect_upgrade_scripts(from_version, to_version): + """ + Give the current installed version, calculate which upgrade scripts we need + to run to upgrade it to the latest verison. + + For example, given current version 5.0.1 and target version 6.1.0, and these + upgrade scripts: + + upgrade_4.4_5.0.sh + upgrade_5.0_5.1.sh + upgrade_5.1_6.0.sh + upgrade_6.0_6.1.sh + + We need to run upgrade_5.0_5.1.sh, upgrade_5.1_6.0.sh, and upgrade_6.0_6.1.sh. + """ + from_major_ver = '.'.join(from_version.split('.')[:2]) + to_major_ver = '.'.join(to_version.split('.')[:2]) + + scripts = [] + for fn in sorted(glob.glob(join(installdir, 'upgrade', 'upgrade_*_*.sh'))): + va, vb = parse_upgrade_script_version(fn) + if va >= from_major_ver and vb <= to_major_ver: + scripts.append(fn) + return scripts + +def parse_upgrade_script_version(script): + script = basename(script) + m = re.match(r'upgrade_([0-9+.]+)_([0-9+.]+).sh', basename(script)) + return m.groups() + +def check_upgrade(): + last_version = read_version_stamp() + current_version = os.environ['SEAFILE_VERSION'] + if last_version == current_version: + return + + scripts_to_run = collect_upgrade_scripts(from_version=last_version, to_version=current_version) + for script in scripts_to_run: + loginfo('Running scripts {}'.format(script)) + # Here we use a trick: use a version stamp like 6.1.0 to prevent running + # all upgrade scripts before 6.1 again (because 6.1 < 6.1.0 in python) + new_version = parse_upgrade_script_version(script)[1] + '.0' + + replace_file_pattern(script, 'read dummy', '') + call(script) + + update_version_stamp(new_version) + + update_version_stamp(current_version) + +def main(): + wait_for_mysql() + + os.chdir(installdir) + check_upgrade() + +if __name__ == '__main__': + main() diff --git a/seafile/pro_seafile_7.1/scripts_7.1/utils/__init__.py b/seafile/pro_seafile_7.1/scripts_7.1/utils/__init__.py new file mode 100644 index 0000000..70a3796 --- /dev/null +++ b/seafile/pro_seafile_7.1/scripts_7.1/utils/__init__.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +#coding: UTF-8 + +from configparser import ConfigParser +from contextlib import contextmanager +import os +import datetime +from os.path import abspath, basename, exists, dirname, join, isdir, expanduser +import platform +import sys +import subprocess +import time +import logging +import logging.config +import click +import termcolor +import colorlog + +logger = logging.getLogger('.utils') + +DEBUG_ENABLED = os.environ.get('SEAFILE_DOCKER_VERBOSE', '').lower() in ('true', '1', 'yes') + +def eprint(*a, **kw): + kw['file'] = sys.stderr + print(*a, **kw) + +def identity(msg, *a, **kw): + return msg + +colored = identity if not os.isatty(sys.stdin.fileno()) else termcolor.colored +red = lambda s: colored(s, 'red') +green = lambda s: colored(s, 'green') + +def underlined(msg): + return '\x1b[4m{}\x1b[0m'.format(msg) + +def sudo(*a, **kw): + call('sudo ' + a[0], *a[1:], **kw) + +def _find_flag(args, *opts, **kw): + is_flag = kw.get('is_flag', False) + if is_flag: + return any([opt in args for opt in opts]) + else: + for opt in opts: + try: + return args[args.index(opt) + 1] + except ValueError: + pass + +def call(*a, **kw): + dry_run = kw.pop('dry_run', False) + quiet = kw.pop('quiet', DEBUG_ENABLED) + cwd = kw.get('cwd', os.getcwd()) + check_call = kw.pop('check_call', True) + reduct_args = kw.pop('reduct_args', []) + if not quiet: + toprint = a[0] + args = [x.strip('"') for x in a[0].split() if '=' not in x] + for arg in reduct_args: + value = _find_flag(args, arg) + toprint = toprint.replace(value, '{}**reducted**'.format(value[:3])) + logdbg('calling: ' + green(toprint)) + logdbg('cwd: ' + green(cwd)) + kw.setdefault('shell', True) + if not dry_run: + if check_call: + return subprocess.check_call(*a, **kw) + else: + return subprocess.Popen(*a, **kw).wait() + +@contextmanager +def cd(path): + path = expanduser(path) + olddir = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(olddir) + +def must_makedir(p): + p = expanduser(p) + if not exists(p): + logger.info('created folder %s', p) + os.makedirs(p) + else: + logger.debug('folder %s already exists', p) + +def setup_colorlog(): + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'standard': { + 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' + }, + 'colored': { + '()': 'colorlog.ColoredFormatter', + 'format': "%(log_color)s[%(asctime)s]%(reset)s %(blue)s%(message)s", + 'datefmt': '%m/%d/%Y %H:%M:%S', + }, + }, + 'handlers': { + 'default': { + 'level': 'INFO', + 'formatter': 'colored', + 'class': 'logging.StreamHandler', + }, + }, + 'loggers': { + '': { + 'handlers': ['default'], + 'level': 'INFO', + 'propagate': True + }, + 'django.request': { + 'handlers': ['default'], + 'level': 'WARN', + 'propagate': False + }, + } + }) + + logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( + logging.WARNING) + + +def setup_logging(level=logging.INFO): + kw = { + 'format': '[%(asctime)s][%(module)s]: %(message)s', + 'datefmt': '%m/%d/%Y %H:%M:%S', + 'level': level, + 'stream': sys.stdout + } + + logging.basicConfig(**kw) + logging.getLogger('requests.packages.urllib3.connectionpool').setLevel( + logging.WARNING) + +def get_process_cmd(pid, env=False): + env = 'e' if env else '' + try: + return subprocess.check_output('ps {} -o command {}'.format(env, pid), + shell=True).strip().splitlines()[1] + # except Exception, e: + # print(e) + except: + return None + +def get_match_pids(pattern): + pgrep_output = subprocess.check_output( + 'pgrep -f "{}" || true'.format(pattern), + shell=True).strip() + return [int(pid) for pid in pgrep_output.splitlines()] + +def ask_for_confirm(msg): + confirm = click.prompt(msg, default='Y') + return confirm.lower() in ('y', 'yes') + +def confirm_command_to_run(cmd): + if ask_for_confirm('Run the command: {} ?'.format(green(cmd))): + call(cmd) + else: + sys.exit(1) + +def git_current_commit(): + return get_command_output('git rev-parse --short HEAD').strip() + +def get_command_output(cmd): + shell = not isinstance(cmd, list) + return subprocess.check_output(cmd, shell=shell) + +def ask_yes_or_no(msg, prompt='', default=None): + print('\n' + msg + '\n') + while True: + answer = input(prompt + ' [yes/no] ').lower() + if not answer: + continue + + if answer not in ('yes', 'no', 'y', 'n'): + continue + + if answer in ('yes', 'y'): + return True + else: + return False + +def git_branch_exists(branch): + return call('git rev-parse --short --verify {}'.format(branch)) == 0 + +def to_unicode(s): + if isinstance(s, str): + return s.decode('utf-8') + else: + return s + +def to_utf8(s): + if isinstance(s, str): + return s.encode('utf-8') + else: + return s + +def git_commit_time(refspec): + return int(get_command_output('git log -1 --format="%ct" {}'.format( + refspec)).strip()) + +def get_seafile_version(): + return os.environ['SEAFILE_VERSION'] + +def get_install_dir(): + return join('/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-{}'.format(get_seafile_version())) + +def get_script(script): + return join(get_install_dir(), script) + + +_config = None + +def get_conf(key, default=None): + key = key.upper() + return os.environ.get(key, default) + +def _add_default_context(context): + default_context = { + 'current_timestr': datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S'), + } + for k in default_context: + context.setdefault(k, default_context[k]) + +def render_template(template, target, context): + from jinja2 import Environment, FileSystemLoader + env = Environment(loader=FileSystemLoader(dirname(template))) + _add_default_context(context) + content = env.get_template(basename(template)).render(**context) + with open(target, 'w') as fp: + fp.write(content) + +def logdbg(msg): + if DEBUG_ENABLED: + msg = '[debug] ' + msg + loginfo(msg) + +def loginfo(msg): + msg = '[{}] {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), green(msg)) + eprint(msg) + +def cert_has_valid_days(cert, days): + assert exists(cert) + + secs = 86400 * int(days) + retcode = call('openssl x509 -checkend {} -noout -in {}'.format(secs, cert), check_call=False) + return retcode == 0 + +def get_version_stamp_file(): + return '/shared/seafile/seafile-data/current_version' + +def read_version_stamp(fn=get_version_stamp_file()): + assert exists(fn), 'version stamp file {} does not exist!'.format(fn) + with open(fn, 'r') as fp: + return fp.read().strip() + +def update_version_stamp(version, fn=get_version_stamp_file()): + with open(fn, 'w') as fp: + fp.write(version + '\n') + +def wait_for_mysql(): + while not exists('/var/run/mysqld/mysqld.sock'): + logdbg('waiting for mysql server to be ready') + time.sleep(2) + logdbg('mysql server is ready') + +def wait_for_nginx(): + while True: + logdbg('waiting for nginx server to be ready') + output = get_command_output('netstat -nltp') + if ':80 ' in output: + logdbg(output) + logdbg('nginx is ready') + return + time.sleep(2) + +def replace_file_pattern(fn, pattern, replacement): + with open(fn, 'r') as fp: + content = fp.read() + with open(fn, 'w') as fp: + fp.write(content.replace(pattern, replacement)) diff --git a/seafile/pro_seafile_7.1/services/nginx.conf b/seafile/pro_seafile_7.1/services/nginx.conf new file mode 100644 index 0000000..c2a6b12 --- /dev/null +++ b/seafile/pro_seafile_7.1/services/nginx.conf @@ -0,0 +1,33 @@ +daemon off; +user www-data; +worker_processes auto; + +events { + worker_connections 768; +} + +http { + include /etc/nginx/mime.types; + server_names_hash_bucket_size 256; + server_names_hash_max_size 1024; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log info; + + gzip on; + gzip_types text/plain text/css application/javascript application/json text/javascript; + + include /etc/nginx/conf.d/*.conf; + include /etc/nginx/sites-enabled/*; + + server { + listen 80; + location / { + return 444; + } + } +} diff --git a/seafile/pro_seafile_7.1/services/nginx.sh b/seafile/pro_seafile_7.1/services/nginx.sh new file mode 100644 index 0000000..21060ee --- /dev/null +++ b/seafile/pro_seafile_7.1/services/nginx.sh @@ -0,0 +1,3 @@ +#!/bin/bash +exec 2>&1 +exec /usr/sbin/nginx diff --git a/seafile/pro_seafile_7.1/templates/letsencrypt.cron.template b/seafile/pro_seafile_7.1/templates/letsencrypt.cron.template new file mode 100644 index 0000000..cd877b6 --- /dev/null +++ b/seafile/pro_seafile_7.1/templates/letsencrypt.cron.template @@ -0,0 +1,3 @@ +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +# min hour dayofmonth month dayofweek command +0 0 1 * * root /scripts/ssl.sh {{ ssl_dir }} {{ domain }} diff --git a/seafile/pro_seafile_7.1/templates/seafile.nginx.conf.template b/seafile/pro_seafile_7.1/templates/seafile.nginx.conf.template new file mode 100644 index 0000000..cbbbf76 --- /dev/null +++ b/seafile/pro_seafile_7.1/templates/seafile.nginx.conf.template @@ -0,0 +1,82 @@ +# -*- mode: nginx -*- +# Auto generated at {{ current_timestr }} +{% if https -%} +server { + listen 80; + server_name _ default_server; + rewrite ^ https://{{ domain }}$request_uri? permanent; +} +{% endif -%} + +server { +{% if https -%} + listen 443; + ssl on; + ssl_certificate /shared/ssl/{{ domain }}.crt; + ssl_certificate_key /shared/ssl/{{ domain }}.key; + + ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS; + + # TODO: More SSL security hardening: ssl_session_tickets & ssl_dhparam + # ssl_session_tickets on; + # ssl_session_ticket_key /etc/nginx/sessionticket.key; + # ssl_session_cache shared:SSL:10m; + # ssl_session_timeout 10m; +{% else -%} + listen 80; +{% endif -%} + + server_name {{ domain }}; + + client_max_body_size 10m; + + location / { + proxy_pass http://127.0.0.1:8000/; + proxy_read_timeout 310s; + proxy_set_header Host $host; + proxy_set_header Forwarded "for=$remote_addr;proto=$scheme"; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header Connection ""; + proxy_http_version 1.1; + } + + location /seafhttp { + rewrite ^/seafhttp(.*)$ $1 break; + proxy_pass http://127.0.0.1:8082; + client_max_body_size 0; + proxy_connect_timeout 36000s; + proxy_read_timeout 36000s; + proxy_request_buffering off; + } + + location /seafdav { + client_max_body_size 0; + fastcgi_pass 127.0.0.1:8080; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; + fastcgi_param PATH_INFO $fastcgi_script_name; + + fastcgi_param SERVER_PROTOCOL $server_protocol; + fastcgi_param QUERY_STRING $query_string; + fastcgi_param REQUEST_METHOD $request_method; + fastcgi_param CONTENT_TYPE $content_type; + fastcgi_param CONTENT_LENGTH $content_length; + fastcgi_param SERVER_ADDR $server_addr; + fastcgi_param SERVER_PORT $server_port; + fastcgi_param SERVER_NAME $server_name; + + access_log /var/log/nginx/seafdav.access.log; + error_log /var/log/nginx/seafdav.error.log; + } + + location /media { + root /opt/seafile/seafile-server-latest/seahub; + } + + # For letsencrypt + location /.well-known/acme-challenge/ { + alias /var/www/challenges/; + try_files $uri =404; + } +} diff --git a/seafile/scripts/auto_renew_crt.sh b/seafile/scripts/auto_renew_crt.sh new file mode 100644 index 0000000..67291aa --- /dev/null +++ b/seafile/scripts/auto_renew_crt.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e + +ssldir=${1:?"error params"} +domain=${2:?"error params"} + +letsencryptdir=$ssldir/letsencrypt +letsencrypt_script=$letsencryptdir/acme_tiny.py + +ssl_account_key=${domain}.account.key +ssl_csr=${domain}.csr +ssl_key=${domain}.key +ssl_crt=${domain}.crt +renew_cert_script=/scripts/renew_cert.sh + +if [[ ! -x ${renew_cert_script} ]]; then + cat > ${renew_cert_script} << EOF +#!/bin/bash +python3 ${letsencrypt_script} --account-key ${ssldir}/${ssl_account_key} --csr ${ssldir}/${ssl_csr} --acme-dir /var/www/challenges/ > ${ssldir}/${ssl_crt} || exit +$(which nginx) -s reload +EOF + + chmod u+x ${renew_cert_script} + + if [[ ! -d "/var/www/challenges" ]]; then + mkdir -p /var/www/challenges + fi + + cat >> /etc/crontab << EOF +00 1 1 * * root /scripts/renew_cert.sh 2>> /var/log/acme_tiny.log +EOF + + echo 'Created a crontab to auto renew the cert for letsencrypt.' +else + echo 'Found existing the script for renew the cert.' + echo 'Skip create the crontab for letscncrypt since maybe we have created before.' +fi diff --git a/seafile/scripts/bootstrap.py b/seafile/scripts/bootstrap.py index b915f81..7bad674 100644 --- a/seafile/scripts/bootstrap.py +++ b/seafile/scripts/bootstrap.py @@ -50,6 +50,9 @@ def init_letsencrypt(): loginfo('Found existing cert file {}'.format(ssl_crt)) if cert_has_valid_days(ssl_crt, 30): loginfo('Skip letsencrypt verification since we have a valid certificate') + if exists(join(ssl_dir, 'letsencrypt')): + # Create a crontab to auto renew the cert for letsencrypt. + call('/scripts/auto_renew_crt.sh {0} {1}'.format(ssl_dir, domain)) return loginfo('Starting letsencrypt verification') @@ -58,8 +61,9 @@ def init_letsencrypt(): 'https': False, 'domain': domain, } - render_template('/templates/seafile.nginx.conf.template', - '/etc/nginx/sites-enabled/seafile.nginx.conf', context) + if not os.path.isfile('/shared/nginx/conf/seafile.nginx.conf'): + render_template('/templates/seafile.nginx.conf.template', + '/etc/nginx/sites-enabled/seafile.nginx.conf', context) call('nginx -s reload') time.sleep(2) @@ -70,6 +74,9 @@ def init_letsencrypt(): # time.sleep(1000) # sys.exit(1) + call('/scripts/auto_renew_crt.sh {0} {1}'.format(ssl_dir, domain)) + # Create a crontab to auto renew the cert for letsencrypt. + def generate_local_nginx_conf(): # Now create the final nginx configuratin @@ -78,12 +85,16 @@ def generate_local_nginx_conf(): 'https': is_https(), 'domain': domain, } - render_template( - '/templates/seafile.nginx.conf.template', - '/etc/nginx/sites-enabled/seafile.nginx.conf', - context - ) + if not os.path.isfile('/shared/nginx/conf/seafile.nginx.conf'): + render_template( + '/templates/seafile.nginx.conf.template', + '/etc/nginx/sites-enabled/seafile.nginx.conf', + context + ) + nginx_etc_file = '/etc/nginx/sites-enabled/seafile.nginx.conf' + nginx_shared_file = '/shared/nginx/conf/seafile.nginx.conf' + call('mv {0} {1} && ln -sf {1} {0}'.format(nginx_etc_file, nginx_shared_file)) def is_https(): return get_conf('SEAFILE_SERVER_LETSENCRYPT', 'false').lower() == 'true' @@ -113,14 +124,22 @@ def init_seafile_server(): 'SERVER_IP': get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com'), 'MYSQL_USER': 'seafile', 'MYSQL_USER_PASSWD': str(uuid.uuid4()), - 'MYSQL_USER_HOST': '127.0.0.1', + 'MYSQL_USER_HOST': '%.%.%.%', + 'MYSQL_HOST': get_conf('DB_HOST','127.0.0.1'), # Default MariaDB root user has empty password and can only connect from localhost. - 'MYSQL_ROOT_PASSWD': '', + 'MYSQL_ROOT_PASSWD': get_conf('DB_ROOT_PASSWD', ''), } # Change the script to allow mysql root password to be empty - call('''sed -i -e 's/if not mysql_root_passwd/if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ/g' {}''' - .format(get_script('setup-seafile-mysql.py'))) + # call('''sed -i -e 's/if not mysql_root_passwd/if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ/g' {}''' + # .format(get_script('setup-seafile-mysql.py'))) + + # Change the script to disable check MYSQL_USER_HOST + call('''sed -i -e '/def validate_mysql_user_host(self, host)/a \ \ \ \ \ \ \ \ return host' {}''' + .format(get_script('setup-seafile-mysql.py'))) + + call('''sed -i -e '/def validate_mysql_host(self, host)/a \ \ \ \ \ \ \ \ return host' {}''' + .format(get_script('setup-seafile-mysql.py'))) setup_script = get_script('setup-seafile-mysql.sh') call('{} auto -n seafile'.format(setup_script), env=env) @@ -132,31 +151,18 @@ def init_seafile_server(): fp.write("""CACHES = { 'default': { 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache', - 'LOCATION': '127.0.0.1:11211', + 'LOCATION': 'memcached:11211', }, 'locmem': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, } -COMPRESS_CACHE_BACKEND = 'locmem' - -OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n""") - fp.write("\nFILE_SERVER_ROOT = '{proto}://{domain}/seafhttp'\n".format(proto=proto, domain=domain)) - fp.write(""" -TIME_ZONE = 'Europe/Berlin' -SITE_BASE = 'http://127.0.0.1' -SITE_NAME = 'Seafile Server' -SITE_TITLE = 'Seafile Server' -SITE_ROOT = '/' -ENABLE_SIGNUP = False -ACTIVATE_AFTER_REGISTRATION = False -SEND_EMAIL_ON_ADDING_SYSTEM_MEMBER = True -SEND_EMAIL_ON_RESETTING_USER_PASSWD = True -CLOUD_MODE = False -FILE_PREVIEW_MAX_SIZE = 30 * 1024 * 1024 -SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 -SESSION_SAVE_EVERY_REQUEST = False -SESSION_EXPIRE_AT_BROWSER_CLOSE = False\n""") +COMPRESS_CACHE_BACKEND = 'locmem'""") + fp.write('\n') + fp.write("TIME_ZONE = '{time_zone}'".format(time_zone=os.getenv('TIME_ZONE',default='Etc/UTC'))) + fp.write('\n') + fp.write('FILE_SERVER_ROOT = "{proto}://{domain}/seafhttp"'.format(proto=proto, domain=domain)) + fp.write('\n') # By default ccnet-server binds to the unix socket file # "/opt/seafile/ccnet/ccnet.sock", but /opt/seafile/ccnet/ is a mounted @@ -172,22 +178,50 @@ SESSION_EXPIRE_AT_BROWSER_CLOSE = False\n""") # Disabled the Elasticsearch process on Seafile-container # Connection to the Elasticsearch-container - with open(join(topdir, 'conf', 'seafevents.conf'), 'r') as fp: - seafevents_lines = fp.readlines() - # es - es_insert_index = seafevents_lines.index('[INDEX FILES]\n') + 1 - es_insert_lines = ['external_es_server = true\n', 'es_host = 127.0.0.1\n', 'es_port = 9200\n'] - for line in es_insert_lines: - seafevents_lines.insert(es_insert_index, line) + if os.path.exists(join(topdir, 'conf', 'seafevents.conf')): + with open(join(topdir, 'conf', 'seafevents.conf'), 'r') as fp: + fp_lines = fp.readlines() + if '[INDEX FILES]\n' in fp_lines: + insert_index = fp_lines.index('[INDEX FILES]\n') + 1 + insert_lines = ['es_port = 9200\n', 'es_host = elasticsearch\n', 'external_es_server = true\n'] + for line in insert_lines: + fp_lines.insert(insert_index, line) + + # office + if '[OFFICE CONVERTER]\n' in fp_lines: + insert_index = fp_lines.index('[OFFICE CONVERTER]\n') + 1 + insert_lines = ['host = 127.0.0.1\n', 'port = 6000\n'] + for line in insert_lines: + fp_lines.insert(insert_index, line) + + with open(join(topdir, 'conf', 'seafevents.conf'), 'w') as fp: + fp.writelines(fp_lines) + # office - office_insert_index = seafevents_lines.index('[OFFICE CONVERTER]\n') + 1 - office_insert_lines = ['host = 127.0.0.1\n', 'port = 6000\n'] - for line in office_insert_lines: - seafevents_lines.insert(office_insert_index, line) + with open(join(topdir, 'conf', 'seahub_settings.py'), 'r') as fp: + fp_lines = fp.readlines() + if "OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n" not in fp_lines: + fp_lines.append("OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n") - with open(join(topdir, 'conf', 'seafevents.conf'), 'w') as fp: - fp.writelines(seafevents_lines) + with open(join(topdir, 'conf', 'seahub_settings.py'), 'w') as fp: + fp.writelines(fp_lines) + # Modify seafdav config + if os.path.exists(join(topdir, 'conf', 'seafdav.conf')): + with open(join(topdir, 'conf', 'seafdav.conf'), 'r') as fp: + fp_lines = fp.readlines() + if 'share_name = /\n' in fp_lines: + replace_index = fp_lines.index('share_name = /\n') + replace_line = 'share_name = /seafdav\n' + fp_lines[replace_index] = replace_line + + with open(join(topdir, 'conf', 'seafdav.conf'), 'w') as fp: + fp.writelines(fp_lines) + + # After the setup script creates all the files inside the + # container, we need to move them to the shared volume + # + # e.g move "/opt/seafile/seafile-data" to "/shared/seafile/seafile-data" files_to_copy = ['conf', 'ccnet', 'seafile-data', 'seahub-data', 'pro-data'] for fn in files_to_copy: src = join(topdir, fn) diff --git a/seafile/scripts/create_data_links.sh b/seafile/scripts/create_data_links.sh index 7604172..d07f532 100644 --- a/seafile/scripts/create_data_links.sh +++ b/seafile/scripts/create_data_links.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -e +set -e set -o pipefail if [[ $SEAFILE_BOOTSRAP != "" ]]; then @@ -34,48 +34,21 @@ for d in ${dirs[*]}; do fi done -if [[ ! -e /shared/logs/seafile ]]; then - mkdir -p /shared/logs/seafile +if [[ -e /shared/logs/seafile ]]; then + mv /shared/logs/seafile /shared/seafile/logs + rm -rf /opt/seafile/logs && ln -sf /shared/seafile/logs /opt/seafile/ +else + mkdir -p /shared/seafile/logs && ln -sf /shared/seafile/logs /opt/seafile/ fi -rm -rf /opt/seafile/logs && ln -sf /shared/logs/seafile/ /opt/seafile/logs - -current_version_dir=/opt/seafile/${SEAFILE_SERVER}-${SEAFILE_VERSION} -latest_version_dir=/opt/seafile/seafile-server-latest -seahub_data_dir=/shared/seafile/seahub-data - -if [[ ! -e $seahub_data_dir ]]; then - mkdir -p $seahub_data_dir -fi - -media_dirs=( - avatars - custom -) -for d in ${media_dirs[*]}; do - source_media_dir=${current_version_dir}/seahub/media/$d - if [ -e ${source_media_dir} ] && [ ! -e ${seahub_data_dir}/$d ]; then - mv $source_media_dir ${seahub_data_dir}/$d - fi - rm -rf $source_media_dir && ln -sf ${seahub_data_dir}/$d $source_media_dir -done - -rm -rf /var/lib/mysql -if [[ ! -e /shared/db ]];then - mkdir -p /shared/db -fi -ln -sf /shared/db /var/lib/mysql if [[ ! -e /shared/logs/var-log ]]; then - chmod 777 /var/log -R - mv /var/log /shared/logs/var-log + mkdir -p /shared/logs/ && mv /var/log /shared/logs/var-log fi rm -rf /var/log && ln -sf /shared/logs/var-log /var/log -if [[ ! -e latest_version_dir ]]; then - ln -sf $current_version_dir $latest_version_dir +mkdir -p /shared/nginx/conf/ + +if [[ -e /shared/nginx/conf/seafile.nginx.conf ]]; then + rm -rf /etc/nginx/sites-enabled/seafile.nginx.conf && \ + ln -sf /shared/nginx/conf/seafile.nginx.conf /etc/nginx/sites-enabled fi - -# chmod u+x /scripts/* - -# echo $PYTHON -# $PYTHON /scripts/init.py diff --git a/seafile/scripts/ssl.sh b/seafile/scripts/ssl.sh index 931219a..e9ec8d4 100644 --- a/seafile/scripts/ssl.sh +++ b/seafile/scripts/ssl.sh @@ -14,7 +14,7 @@ ssl_key=${domain}.key ssl_crt=${domain}.crt mkdir -p /var/www/challenges && chmod -R 777 /var/www/challenges -mkdir -p ssldir +mkdir -p $ssldir if ! [[ -d $letsencryptdir ]]; then git clone git://github.com/diafygi/acme-tiny.git $letsencryptdir @@ -37,7 +37,7 @@ if [[ ! -e ${ssl_csr} ]]; then openssl req -new -sha256 -key ${ssl_key} -subj "/CN=$domain" > $ssl_csr fi -python $letsencrypt_script --account-key ${ssl_account_key} --csr $ssl_csr --acme-dir /var/www/challenges/ > ./signed.crt +python3 $letsencrypt_script --account-key ${ssl_account_key} --csr $ssl_csr --acme-dir /var/www/challenges/ > ./signed.crt curl -sSL -o intermediate.pem https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem cat signed.crt intermediate.pem > ${ssl_crt} diff --git a/seafile/scripts/start.py b/seafile/scripts/start.py index 453b632..07bd8f4 100644 --- a/seafile/scripts/start.py +++ b/seafile/scripts/start.py @@ -1,16 +1,29 @@ #!/usr/bin/env python3 #coding: UTF-8 +""" +Starts the seafile/seahub server and watches the controller process. It is +the entrypoint command of the docker container. +""" + +import json import os +from os.path import abspath, basename, exists, dirname, join, isdir +import shutil import sys import time -import json -import argparse -from os.path import join, exists, dirname +from utils import ( + call, get_conf, get_install_dir, get_script, get_command_output, + render_template, wait_for_mysql, setup_logging +) from upgrade import check_upgrade -from utils import call, get_conf, get_script, get_command_output, get_install_dir +from bootstrap import init_seafile_server, is_https, init_letsencrypt, generate_local_nginx_conf + +shared_seafiledir = '/shared/seafile' +ssl_dir = '/shared/ssl' +generated_dir = '/bootstrap/generated' installdir = get_install_dir() topdir = dirname(installdir) @@ -28,26 +41,35 @@ def watch_controller(): print('seafile controller exited unexpectedly.') sys.exit(1) -def main(args): - call('/scripts/create_data_links.sh') - # check_upgrade() +def main(): + if not exists(shared_seafiledir): + os.mkdir(shared_seafiledir) + if not exists(generated_dir): + os.makedirs(generated_dir) + + if is_https(): + init_letsencrypt() + generate_local_nginx_conf() + call('nginx -s reload') + + wait_for_mysql() + init_seafile_server() + + check_upgrade() os.chdir(installdir) - call('service nginx start &') admin_pw = { 'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'), 'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'), } password_file = join(topdir, 'conf', 'admin.txt') - with open(password_file, 'w+') as fp: + with open(password_file, 'w') as fp: json.dump(admin_pw, fp) try: call('{} start'.format(get_script('seafile.sh'))) call('{} start'.format(get_script('seahub.sh'))) - if args.mode == 'backend': - call('{} start'.format(get_script('seafile-background-tasks.sh'))) finally: if exists(password_file): os.unlink(password_file) @@ -59,7 +81,6 @@ def main(args): print('Stopping seafile server.') sys.exit(0) -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Seafile cluster start script') - parser.add_argument('--mode') - main(parser.parse_args()) +if __name__ == '__main__': + setup_logging() + main() diff --git a/seafile/scripts/upgrade.py b/seafile/scripts/upgrade.py index 602692c..d19cca0 100644 --- a/seafile/scripts/upgrade.py +++ b/seafile/scripts/upgrade.py @@ -8,11 +8,13 @@ This script is used to run proper upgrade scripts automatically. import json import re import glob +import logging import os -from os.path import abspath, basename, exists, dirname, join, isdir +from os.path import abspath, basename, exists, dirname, join, isdir, islink import shutil import sys import time +import configparser from utils import ( call, get_install_dir, get_script, get_command_output, replace_file_pattern, @@ -21,6 +23,7 @@ from utils import ( installdir = get_install_dir() topdir = dirname(installdir) +logger = logging.getLogger(__name__) def collect_upgrade_scripts(from_version, to_version): """ @@ -52,23 +55,131 @@ def parse_upgrade_script_version(script): m = re.match(r'upgrade_([0-9+.]+)_([0-9+.]+).sh', basename(script)) return m.groups() +def run_script_and_update_version_stamp(script, new_version): + logging.info('Running script %s', script) + replace_file_pattern(script, 'read dummy', '') + call(script) + update_version_stamp(new_version) + +def is_minor_upgrade(v1, v2): + get_major_version = lambda x: x.split('.')[:2] + return v1 != v2 and get_major_version(v1) == get_major_version(v2) + +def fix_media_symlinks(current_version): + """ + If the container was recreated and it's not a minor/major upgrade, + we need to fix the media/avatars and media/custom symlink. + """ + media_dir = join( + installdir, + 'seafile-server-{}/seahub/media'.format(current_version) + ) + avatars_dir = join(media_dir, 'avatars') + if not islink(avatars_dir): + logger.info('The container was recreated, running minor-upgrade.sh to fix the media symlinks') + run_minor_upgrade(current_version) + +def run_minor_upgrade(current_version): + minor_upgrade_script = join(installdir, 'upgrade', 'minor-upgrade.sh') + run_script_and_update_version_stamp(minor_upgrade_script, current_version) + +def fix_custom_dir(): + real_custom_dir = '/shared/seafile/seahub-data/custom' + if not exists(real_custom_dir): + os.mkdir(real_custom_dir) + +def fix_ccent_conf(): + ccnet_conf_path = '/shared/seafile/conf/ccnet.conf' + if exists(ccnet_conf_path): + cp = configparser.ConfigParser({}) + try: + cp.read(ccnet_conf_path) + except configparser.DuplicateSectionError as e: + with open(ccnet_conf_path, 'r+') as fp: + content_list = fp.readlines() + aim = '[Client]\n' + count = content_list.count(aim) + if count > 1: + new_content_list = list() + client_port_index = -1 + for index, text in enumerate(content_list): + if text == aim and 'PORT = ' in content_list[index + 1]: + client_port_index = index + 1 + continue + if index == client_port_index: + client_port_index = -1 + continue + new_content_list.append(text) + + new_content = ''.join(new_content_list) + fp.seek(0) + fp.truncate() + fp.write(new_content) + print('\n------------------------------') + print('Fix ccnet conf success') + print('------------------------------\n') + +def fix_seafevents_conf(): + seafevents_conf_path = '/shared/seafile/conf/seafevents.conf' + seahub_conf_path = '/shared/seafile/conf/seahub_settings.py' + pro_data_dir = '/shared/seafile/pro-data/' + if exists(seafevents_conf_path): + os.makedirs(pro_data_dir, exist_ok=True) + + with open(seafevents_conf_path, 'r') as fp: + fp_lines = fp.readlines() + if 'port = 6000\n' in fp_lines: + return + + if '[INDEX FILES]\n' in fp_lines and 'external_es_server = true\n' not in fp_lines: + insert_index = fp_lines.index('[INDEX FILES]\n') + 1 + insert_lines = ['es_port = 9200\n', 'es_host = elasticsearch\n', 'external_es_server = true\n'] + for line in insert_lines: + fp_lines.insert(insert_index, line) + + if '[OFFICE CONVERTER]\n' in fp_lines and 'port = 6000\n' not in fp_lines: + insert_index = fp_lines.index('[OFFICE CONVERTER]\n') + 1 + insert_lines = ['host = 127.0.0.1\n', 'port = 6000\n'] + for line in insert_lines: + fp_lines.insert(insert_index, line) + + with open(seafevents_conf_path, 'w') as fp: + fp.writelines(fp_lines) + + with open(seahub_conf_path, 'r') as fp: + fp_lines = fp.readlines() + if "OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n" not in fp_lines: + fp_lines.append("OFFICE_CONVERTOR_ROOT = 'http://127.0.0.1:6000/'\n") + + with open(seahub_conf_path, 'w') as fp: + fp.writelines(fp_lines) + print('\n------------------------------') + print('Fix seafevents conf success') + print('------------------------------\n') + def check_upgrade(): + fix_custom_dir() + fix_ccent_conf() + fix_seafevents_conf() + last_version = read_version_stamp() current_version = os.environ['SEAFILE_VERSION'] + if last_version == current_version: + fix_media_symlinks(current_version) + return + elif is_minor_upgrade(last_version, current_version): + run_minor_upgrade(current_version) return + # Now we do the major upgrade scripts_to_run = collect_upgrade_scripts(from_version=last_version, to_version=current_version) for script in scripts_to_run: loginfo('Running scripts {}'.format(script)) # Here we use a trick: use a version stamp like 6.1.0 to prevent running # all upgrade scripts before 6.1 again (because 6.1 < 6.1.0 in python) new_version = parse_upgrade_script_version(script)[1] + '.0' - - replace_file_pattern(script, 'read dummy', '') - call(script) - - update_version_stamp(new_version) + run_script_and_update_version_stamp(script, new_version) update_version_stamp(current_version) diff --git a/seafile/scripts/utils/__init__.py b/seafile/scripts/utils/__init__.py index 70a3796..5532c49 100644 --- a/seafile/scripts/utils/__init__.py +++ b/seafile/scripts/utils/__init__.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -#coding: UTF-8 +# coding: UTF-8 + from configparser import ConfigParser from contextlib import contextmanager @@ -15,6 +16,7 @@ import logging.config import click import termcolor import colorlog +import pymysql logger = logging.getLogger('.utils') @@ -142,7 +144,7 @@ def get_process_cmd(pid, env=False): env = 'e' if env else '' try: return subprocess.check_output('ps {} -o command {}'.format(env, pid), - shell=True).strip().splitlines()[1] + shell=True).decode('utf8').strip().splitlines()[1] # except Exception, e: # print(e) except: @@ -151,7 +153,7 @@ def get_process_cmd(pid, env=False): def get_match_pids(pattern): pgrep_output = subprocess.check_output( 'pgrep -f "{}" || true'.format(pattern), - shell=True).strip() + shell=True).decode('utf8').strip() return [int(pid) for pid in pgrep_output.splitlines()] def ask_for_confirm(msg): @@ -169,7 +171,7 @@ def git_current_commit(): def get_command_output(cmd): shell = not isinstance(cmd, list) - return subprocess.check_output(cmd, shell=shell) + return subprocess.check_output(cmd, shell=shell).decode('utf8') def ask_yes_or_no(msg, prompt='', default=None): print('\n' + msg + '\n') @@ -196,7 +198,7 @@ def to_unicode(s): return s def to_utf8(s): - if isinstance(s, str): + if isinstance(s, unicode): return s.encode('utf-8') else: return s @@ -265,10 +267,19 @@ def update_version_stamp(version, fn=get_version_stamp_file()): fp.write(version + '\n') def wait_for_mysql(): - while not exists('/var/run/mysqld/mysqld.sock'): - logdbg('waiting for mysql server to be ready') - time.sleep(2) - logdbg('mysql server is ready') + db_host = get_conf('DB_HOST', '127.0.0.1') + db_user = 'root' + db_passwd = get_conf('DB_ROOT_PASSWD', '') + + while True: + try: + pymysql.connect(host=db_host, port=3306, user=db_user, passwd=db_passwd) + except Exception as e: + print ('waiting for mysql server to be ready: %s', e) + time.sleep(2) + continue + logdbg('mysql server is ready') + return def wait_for_nginx(): while True: