mirror of
https://github.com/ggogel/seafile-containerized.git
synced 2024-11-16 17:05:32 +00:00
commit
b261811516
56
cluster/image/Makefile
Normal file
56
cluster/image/Makefile
Normal file
|
@ -0,0 +1,56 @@
|
|||
server_version=6.2.13
|
||||
|
||||
base_image=seafileltd/base:16.04
|
||||
base_image_squashed=seafileltd/base:16.04-squashed
|
||||
pro_base_image=seafileltd/pro-base:16.04
|
||||
pro_base_image_squashed=seafileltd/pro-base:16.04-squashed
|
||||
server_image=seafileltd/seafile:$(server_version)
|
||||
server_image_squashed=seafileltd/seafile:$(server_version)-squashed
|
||||
pro_server_image=seafileltd/seafile-pro:$(server_version)
|
||||
pro_server_image_squashed=seafileltd/seafile-pro:$(server_version)-squashed
|
||||
latest_pro_server_image=seafileltd/seafile-pro:latest
|
||||
latest_server_image=seafileltd/seafile:latest
|
||||
|
||||
all:
|
||||
@echo
|
||||
@echo Pleaes use '"make base"' or '"make server"' or '"make push"'.
|
||||
@echo
|
||||
|
||||
base:
|
||||
docker pull phusion/baseimage:0.9.19
|
||||
docker-squash --tag phusion/baseimage:latest phusion/baseimage:0.9.19
|
||||
docker tag phusion/baseimage:latest phusion/baseimage:0.9.19
|
||||
cd base && docker build -t $(base_image) .
|
||||
docker-squash --tag $(base_image_squashed) $(base_image)
|
||||
docker tag $(base_image_squashed) $(base_image)
|
||||
docker rmi `docker images --filter "dangling=true" -q --no-trunc`
|
||||
|
||||
pro-base:
|
||||
cd pro_base && docker build -t $(pro_base_image) .
|
||||
docker-squash --tag $(pro_base_image_squashed) $(pro_base_image)
|
||||
docker tag $(pro_base_image_squashed) $(pro_base_image)
|
||||
docker rmi `docker images --filter "dangling=true" -q --no-trunc`
|
||||
|
||||
pro-server:
|
||||
cd pro_seafile && cp -rf ../../../templates ./ && cp -rf ../../scripts ./ && docker build -t $(pro_server_image) .
|
||||
docker-squash --tag $(pro_server_image_squashed) $(pro_server_image) --from-layer=$(pro_base_image)
|
||||
docker tag $(pro_server_image_squashed) $(pro_server_image)
|
||||
docker tag $(pro_server_image) $(latest_pro_server_image)
|
||||
docker rmi `docker images --filter "dangling=true" -q --no-trunc`
|
||||
|
||||
push-base:
|
||||
docker push $(base_image)
|
||||
|
||||
push-pro-base:
|
||||
docker tag $(pro_base_image) ${host}/$(pro_base_image)
|
||||
docker push ${host}/$(pro_base_image)
|
||||
|
||||
push-pro-server:
|
||||
docker tag $(pro_server_image) ${host}/$(pro_server_image)
|
||||
docker tag $(pro_server_image) ${host}/$(latest_pro_server_image)
|
||||
docker push ${host}/$(pro_server_image)
|
||||
docker push ${host}/$(latest_pro_server_image)
|
||||
|
||||
push: push-base push-server
|
||||
|
||||
.PHONY: base server push push-base push-server
|
50
cluster/image/base/Dockerfile
Normal file
50
cluster/image/base/Dockerfile
Normal file
|
@ -0,0 +1,50 @@
|
|||
# Lastet phusion baseimage as of 20180412, based on ubuntu 16.04
|
||||
# See https://hub.docker.com/r/phusion/baseimage/tags/
|
||||
FROM phusion/baseimage:0.10.1
|
||||
|
||||
ENV UPDATED_AT=20180412 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
CMD ["/sbin/my_init", "--", "bash", "-l"]
|
||||
|
||||
RUN apt-get update -qq && apt-get -qq -y install memcached nginx tzdata
|
||||
|
||||
# Utility tools
|
||||
RUN apt-get install -qq -y vim htop net-tools psmisc git wget curl
|
||||
|
||||
# Guidline for installing python libs: if a lib has C-compoment (e.g.
|
||||
# python-imaging depends on libjpeg/libpng), we install it use apt-get.
|
||||
# Otherwise we install it with pip.
|
||||
RUN apt-get install -y python2.7-dev python-imaging python-ldap python-mysqldb
|
||||
RUN curl -sSL -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
|
||||
python /tmp/get-pip.py && \
|
||||
rm -rf /tmp/get-pip.py && \
|
||||
pip install -U wheel
|
||||
|
||||
ADD requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -r /tmp/requirements.txt
|
||||
|
||||
COPY services /services
|
||||
|
||||
RUN mkdir -p /etc/service/nginx && \
|
||||
rm -f /etc/nginx/sites-enabled/* /etc/nginx/conf.d/* && \
|
||||
mv /services/nginx.conf /etc/nginx/nginx.conf && \
|
||||
mv /services/nginx.sh /etc/service/nginx/run
|
||||
|
||||
RUN mkdir -p /etc/my_init.d && rm -f /etc/my_init.d/00_regen_ssh_host_keys.sh
|
||||
|
||||
# Clean up for docker squash
|
||||
# See https://github.com/goldmann/docker-squash
|
||||
RUN rm -rf \
|
||||
/root/.cache \
|
||||
/root/.npm \
|
||||
/root/.pip \
|
||||
/usr/local/share/doc \
|
||||
/usr/share/doc \
|
||||
/usr/share/man \
|
||||
/usr/share/vim/vim74/doc \
|
||||
/usr/share/vim/vim74/lang \
|
||||
/usr/share/vim/vim74/spell/en* \
|
||||
/usr/share/vim/vim74/tutor \
|
||||
/var/lib/apt/lists/* \
|
||||
/tmp/*
|
47
cluster/image/base/my_init.d/99_mysql_setup.sh
Executable file
47
cluster/image/base/my_init.d/99_mysql_setup.sh
Executable file
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Init mysql data dir.
|
||||
# Borrowed from https://github.com/fideloper/docker-mysql/blob/master/etc/my_init.d/99_mysql_setup.sh
|
||||
|
||||
if [[ ! -d /var/lib/mysql/mysql ]]; then
|
||||
echo 'Rebuilding mysql data dir'
|
||||
|
||||
chown -R mysql.mysql /var/lib/mysql
|
||||
|
||||
mysql_install_db >/var/log/mysql-bootstrap.log 2>&1
|
||||
# TODO: print the log if mysql_install_db fails
|
||||
|
||||
rm -rf /var/run/mysqld/*
|
||||
|
||||
echo 'Starting mysqld'
|
||||
mysqld_safe >>/var/log/mysql-bootstrap.log 2>&1 &
|
||||
|
||||
echo 'Waiting for mysqld to come online'
|
||||
# The sleep 1 is there to make sure that inotifywait starts up before the socket is created
|
||||
while [[ ! -S /var/run/mysqld/mysqld.sock ]]; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo 'Fixing root password'
|
||||
/usr/bin/mysqladmin -u root password ''
|
||||
|
||||
# if [ -d /var/lib/mysql/setup ]; then
|
||||
# echo 'Found /var/lib/mysql/setup - scanning for SQL scripts'
|
||||
# for sql in $(ls /var/lib/mysql/setup/*.sql 2>/dev/null | sort); do
|
||||
# echo 'Running script:' $sql
|
||||
# mysql -uroot -proot -e "\. $sql"
|
||||
# mv $sql $sql.processed
|
||||
# done
|
||||
# else
|
||||
# echo 'No setup directory with extra sql scripts to run'
|
||||
# fi
|
||||
|
||||
echo 'Shutting down mysqld'
|
||||
mysqladmin -uroot shutdown
|
||||
|
||||
retry=0 maxretry=10
|
||||
while [[ -e /var/run/mysqld/mysqld.sock && $retry -le $maxretry ]]; do
|
||||
retry=$((retry+1))
|
||||
sleep 1
|
||||
done
|
||||
fi
|
12
cluster/image/base/requirements.in
Normal file
12
cluster/image/base/requirements.in
Normal file
|
@ -0,0 +1,12 @@
|
|||
# -*- mode: conf -*-
|
||||
|
||||
# Required by seafile/seahub
|
||||
python-memcached==1.58
|
||||
urllib3==1.19
|
||||
|
||||
# Utility libraries
|
||||
click==6.6
|
||||
termcolor==1.1.0
|
||||
prettytable==0.7.2
|
||||
colorlog==2.7.0
|
||||
Jinja2==2.8
|
15
cluster/image/base/requirements.txt
Normal file
15
cluster/image/base/requirements.txt
Normal file
|
@ -0,0 +1,15 @@
|
|||
#
|
||||
# This file is autogenerated by pip-compile
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile --output-file requirements.txt requirements.in
|
||||
#
|
||||
click==6.6
|
||||
colorlog==2.7.0
|
||||
Jinja2==2.8
|
||||
MarkupSafe==0.23 # via jinja2
|
||||
prettytable==0.7.2
|
||||
python-memcached==1.58
|
||||
six==1.10.0 # via python-memcached
|
||||
termcolor==1.1.0
|
||||
urllib3==1.19
|
4
cluster/image/base/services/memcached.sh
Executable file
4
cluster/image/base/services/memcached.sh
Executable file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
# `/sbin/setuser memcache` runs the given command as the user `memcache`.
|
||||
# If you omit that part, the command will be run as root.
|
||||
exec /sbin/setuser memcache /usr/bin/memcached >>/var/log/memcached.log 2>&1
|
18
cluster/image/base/services/mysql.sh
Executable file
18
cluster/image/base/services/mysql.sh
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
shutdown_mysql() {
|
||||
if [[ -S /var/run/mysqld/mysqld.sock ]]; then
|
||||
mysqladmin -u root shutdown || true
|
||||
fi
|
||||
}
|
||||
|
||||
trap shutdown_mysql EXIT
|
||||
|
||||
mkdir -p /var/run/mysqld
|
||||
chown mysql:mysql /var/run/mysqld
|
||||
|
||||
rm -f /var/lib/mysql/aria_log_control
|
||||
|
||||
/sbin/setuser mysql /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --plugin-dir=/usr/lib/mysql/plugin --user=mysql --skip-log-error --pid-file=/var/run/mysqld/mysqld.pid --socket=/var/run/mysqld/mysqld.sock --port=3306 >/var/log/mysql.log 2>&1
|
33
cluster/image/base/services/nginx.conf
Normal file
33
cluster/image/base/services/nginx.conf
Normal file
|
@ -0,0 +1,33 @@
|
|||
daemon off;
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
|
||||
events {
|
||||
worker_connections 768;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
server_names_hash_bucket_size 256;
|
||||
server_names_hash_max_size 1024;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log info;
|
||||
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/javascript application/json text/javascript;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*;
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
location / {
|
||||
return 444;
|
||||
}
|
||||
}
|
||||
}
|
3
cluster/image/base/services/nginx.sh
Executable file
3
cluster/image/base/services/nginx.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
exec 2>&1
|
||||
exec /usr/sbin/nginx
|
15
cluster/image/pro_base/Dockerfile
Normal file
15
cluster/image/pro_base/Dockerfile
Normal file
|
@ -0,0 +1,15 @@
|
|||
FROM seafileltd/base:16.04
|
||||
|
||||
# syslog-ng and syslog-forwarder would mess up the container stdout, not good
|
||||
# when debugging/upgrading.
|
||||
RUN apt update
|
||||
|
||||
RUN apt-get install -y openjdk-8-jre libmemcached-dev zlib1g-dev pwgen curl openssl poppler-utils libpython2.7 libreoffice \
|
||||
libreoffice-script-provider-python ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy python-requests
|
||||
|
||||
RUN apt-get install -y tzdata python-pip python-setuptools python-urllib3 python-ldap python-ceph
|
||||
|
||||
RUN pip install pylibmc django-pylibmc boto twilio oss2
|
||||
|
||||
|
||||
RUN apt clean
|
17
cluster/image/pro_seafile/Dockerfile
Normal file
17
cluster/image/pro_seafile/Dockerfile
Normal file
|
@ -0,0 +1,17 @@
|
|||
FROM seafileltd/pro-base:16.04
|
||||
WORKDIR /opt/seafile
|
||||
|
||||
ENV SEAFILE_VERSION=6.2.13 SEAFILE_SERVER=seafile-pro-server
|
||||
|
||||
RUN mkdir -p /etc/my_init.d
|
||||
|
||||
RUN mkdir -p /opt/seafile/
|
||||
|
||||
RUN curl -sSL -G -d "p=/seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz&dl=1" https://download.seafile.top/d/8c29766a64d24122936f/files/ \
|
||||
| tar xzf - -C /opt/seafile/
|
||||
|
||||
ADD scripts/create_data_links.sh /etc/my_init.d/01_create_data_links.sh
|
||||
|
||||
COPY scripts /scripts
|
||||
COPY templates /templates
|
||||
RUN chmod u+x /scripts/*
|
156
cluster/scripts/bootstrap.py
Executable file
156
cluster/scripts/bootstrap.py
Executable file
|
@ -0,0 +1,156 @@
|
|||
#!/usr/bin/env python
|
||||
#coding: UTF-8
|
||||
|
||||
"""
|
||||
Bootstraping seafile server, letsencrypt (verification & cron job).
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
from os.path import abspath, basename, exists, dirname, join, isdir
|
||||
import shutil
|
||||
import sys
|
||||
import uuid
|
||||
import time
|
||||
|
||||
from utils import (
|
||||
call, get_conf, get_install_dir, loginfo,
|
||||
get_script, render_template, get_seafile_version, eprint,
|
||||
cert_has_valid_days, get_version_stamp_file, update_version_stamp,
|
||||
wait_for_mysql, wait_for_nginx, read_version_stamp
|
||||
)
|
||||
|
||||
seafile_version = get_seafile_version()
|
||||
installdir = get_install_dir()
|
||||
topdir = dirname(installdir)
|
||||
shared_seafiledir = '/shared/seafile'
|
||||
ssl_dir = '/shared/ssl'
|
||||
generated_dir = '/bootstrap/generated'
|
||||
|
||||
def init_letsencrypt():
|
||||
loginfo('Preparing for letsencrypt ...')
|
||||
wait_for_nginx()
|
||||
|
||||
if not exists(ssl_dir):
|
||||
os.mkdir(ssl_dir)
|
||||
|
||||
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
|
||||
context = {
|
||||
'ssl_dir': ssl_dir,
|
||||
'domain': domain,
|
||||
}
|
||||
render_template(
|
||||
'/templates/letsencrypt.cron.template',
|
||||
join(generated_dir, 'letsencrypt.cron'),
|
||||
context
|
||||
)
|
||||
|
||||
ssl_crt = '/shared/ssl/{}.crt'.format(domain)
|
||||
if exists(ssl_crt):
|
||||
loginfo('Found existing cert file {}'.format(ssl_crt))
|
||||
if cert_has_valid_days(ssl_crt, 30):
|
||||
loginfo('Skip letsencrypt verification since we have a valid certificate')
|
||||
return
|
||||
|
||||
loginfo('Starting letsencrypt verification')
|
||||
# Create a temporary nginx conf to start a server, which would accessed by letsencrypt
|
||||
context = {
|
||||
'https': False,
|
||||
'domain': domain,
|
||||
}
|
||||
render_template('/templates/seafile.nginx.conf.template',
|
||||
'/etc/nginx/sites-enabled/seafile.nginx.conf', context)
|
||||
|
||||
call('nginx -s reload')
|
||||
time.sleep(2)
|
||||
|
||||
call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain))
|
||||
# if call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain), check_call=False) != 0:
|
||||
# eprint('Now waiting 1000s for postmortem')
|
||||
# time.sleep(1000)
|
||||
# sys.exit(1)
|
||||
|
||||
|
||||
def generate_local_nginx_conf():
|
||||
# Now create the final nginx configuratin
|
||||
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
|
||||
context = {
|
||||
'https': is_https(),
|
||||
'domain': domain,
|
||||
}
|
||||
render_template(
|
||||
'/templates/seafile.nginx.conf.template',
|
||||
'/etc/nginx/sites-enabled/seafile.nginx.conf',
|
||||
context
|
||||
)
|
||||
|
||||
|
||||
def is_https():
|
||||
return get_conf('SEAFILE_SERVER_LETSENCRYPT', 'false').lower() == 'true'
|
||||
|
||||
def parse_args():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument('--parse-ports', action='store_true')
|
||||
|
||||
return ap.parse_args()
|
||||
|
||||
def init_seafile_server():
|
||||
version_stamp_file = get_version_stamp_file()
|
||||
if exists(join(shared_seafiledir, 'seafile-data')):
|
||||
if not exists(version_stamp_file):
|
||||
update_version_stamp(os.environ['SEAFILE_VERSION'])
|
||||
# sysbol link unlink after docker finish.
|
||||
latest_version_dir='/opt/seafile/seafile-server-latest'
|
||||
current_version_dir='/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-' + read_version_stamp()
|
||||
if not exists(latest_version_dir):
|
||||
call('ln -sf ' + current_version_dir + ' ' + latest_version_dir)
|
||||
loginfo('Skip running setup-seafile-mysql.py because there is existing seafile-data folder.')
|
||||
return
|
||||
|
||||
loginfo('Now running setup-seafile-mysql.py in auto mode.')
|
||||
env = {
|
||||
'SERVER_NAME': 'seafile',
|
||||
'SERVER_IP': get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com'),
|
||||
'MYSQL_USER': 'seafile',
|
||||
'MYSQL_USER_PASSWD': str(uuid.uuid4()),
|
||||
'MYSQL_USER_HOST': '127.0.0.1',
|
||||
# Default MariaDB root user has empty password and can only connect from localhost.
|
||||
'MYSQL_ROOT_PASSWD': '',
|
||||
}
|
||||
|
||||
# Change the script to allow mysql root password to be empty
|
||||
call('''sed -i -e 's/if not mysql_root_passwd/if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ/g' {}'''
|
||||
.format(get_script('setup-seafile-mysql.py')))
|
||||
|
||||
setup_script = get_script('setup-seafile-mysql.sh')
|
||||
call('{} auto -n seafile'.format(setup_script), env=env)
|
||||
|
||||
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
|
||||
proto = 'https' if is_https() else 'http'
|
||||
with open(join(topdir, 'conf', 'seahub_settings.py'), 'a+') as fp:
|
||||
fp.write('\n')
|
||||
fp.write('FILE_SERVER_ROOT = "{proto}://{domain}/seafhttp"'.format(proto=proto, domain=domain))
|
||||
fp.write('\n')
|
||||
|
||||
# By default ccnet-server binds to the unix socket file
|
||||
# "/opt/seafile/ccnet/ccnet.sock", but /opt/seafile/ccnet/ is a mounted
|
||||
# volume from the docker host, and on windows and some linux environment
|
||||
# it's not possible to create unix sockets in an external-mounted
|
||||
# directories. So we change the unix socket file path to
|
||||
# "/opt/seafile/ccnet.sock" to avoid this problem.
|
||||
with open(join(topdir, 'conf', 'ccnet.conf'), 'a+') as fp:
|
||||
fp.write('\n')
|
||||
fp.write('[Client]\n')
|
||||
fp.write('UNIX_SOCKET = /opt/seafile/ccnet.sock\n')
|
||||
fp.write('\n')
|
||||
|
||||
files_to_copy = ['conf', 'ccnet', 'seafile-data', 'seahub-data', 'pro-data']
|
||||
for fn in files_to_copy:
|
||||
src = join(topdir, fn)
|
||||
dst = join(shared_seafiledir, fn)
|
||||
if not exists(dst) and exists(src):
|
||||
shutil.move(src, shared_seafiledir)
|
||||
call('ln -sf ' + join(shared_seafiledir, fn) + ' ' + src)
|
||||
|
||||
loginfo('Updating version stamp')
|
||||
update_version_stamp(os.environ['SEAFILE_VERSION'])
|
81
cluster/scripts/create_data_links.sh
Executable file
81
cluster/scripts/create_data_links.sh
Executable file
|
@ -0,0 +1,81 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
if [[ $SEAFILE_BOOTSRAP != "" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $TIME_ZONE != "" ]]; then
|
||||
time_zone=/usr/share/zoneinfo/$TIME_ZONE
|
||||
if [[ ! -e $time_zone ]]; then
|
||||
echo "invalid time zone"
|
||||
exit 1
|
||||
else
|
||||
ln -snf $time_zone /etc/localtime
|
||||
echo "$TIME_ZONE" > /etc/timezone
|
||||
fi
|
||||
fi
|
||||
|
||||
dirs=(
|
||||
conf
|
||||
ccnet
|
||||
seafile-data
|
||||
seahub-data
|
||||
pro-data
|
||||
seafile-license.txt
|
||||
)
|
||||
|
||||
for d in ${dirs[*]}; do
|
||||
src=/shared/seafile/$d
|
||||
if [[ -e $src ]]; then
|
||||
rm -rf /opt/seafile/$d && ln -sf $src /opt/seafile
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -e /shared/logs/seafile ]]; then
|
||||
mkdir -p /shared/logs/seafile
|
||||
fi
|
||||
rm -rf /opt/seafile/logs && ln -sf /shared/logs/seafile/ /opt/seafile/logs
|
||||
|
||||
current_version_dir=/opt/seafile/${SEAFILE_SERVER}-${SEAFILE_VERSION}
|
||||
latest_version_dir=/opt/seafile/seafile-server-latest
|
||||
seahub_data_dir=/shared/seafile/seahub-data
|
||||
|
||||
if [[ ! -e $seahub_data_dir ]]; then
|
||||
mkdir -p $seahub_data_dir
|
||||
fi
|
||||
|
||||
media_dirs=(
|
||||
avatars
|
||||
custom
|
||||
)
|
||||
for d in ${media_dirs[*]}; do
|
||||
source_media_dir=${current_version_dir}/seahub/media/$d
|
||||
if [ -e ${source_media_dir} ] && [ ! -e ${seahub_data_dir}/$d ]; then
|
||||
mv $source_media_dir ${seahub_data_dir}/$d
|
||||
fi
|
||||
rm -rf $source_media_dir && ln -sf ${seahub_data_dir}/$d $source_media_dir
|
||||
done
|
||||
|
||||
rm -rf /var/lib/mysql
|
||||
if [[ ! -e /shared/db ]];then
|
||||
mkdir -p /shared/db
|
||||
fi
|
||||
ln -sf /shared/db /var/lib/mysql
|
||||
|
||||
if [[ ! -e /shared/logs/var-log ]]; then
|
||||
chmod 777 /var/log -R
|
||||
mv /var/log /shared/logs/var-log
|
||||
fi
|
||||
rm -rf /var/log && ln -sf /shared/logs/var-log /var/log
|
||||
|
||||
if [[ ! -e latest_version_dir ]]; then
|
||||
ln -sf $current_version_dir $latest_version_dir
|
||||
fi
|
||||
|
||||
chmod u+x /scripts/*
|
||||
|
||||
echo $PYTHON
|
||||
$PYTHON /scripts/init.py
|
37
cluster/scripts/gc.sh
Executable file
37
cluster/scripts/gc.sh
Executable file
|
@ -0,0 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Before
|
||||
SEAFILE_DIR=/opt/seafile/seafile-server-latest
|
||||
|
||||
if [[ $SEAFILE_SERVER != *"pro"* ]]; then
|
||||
echo "Seafile CE: Stop Seafile to perform offline garbage collection."
|
||||
$SEAFILE_DIR/seafile.sh stop
|
||||
|
||||
echo "Waiting for the server to shut down properly..."
|
||||
sleep 5
|
||||
else
|
||||
echo "Seafile Pro: Perform online garbage collection."
|
||||
fi
|
||||
|
||||
# Do it
|
||||
(
|
||||
set +e
|
||||
$SEAFILE_DIR/seaf-gc.sh | tee -a /var/log/gc.log
|
||||
# We want to presevent the exit code of seaf-gc.sh
|
||||
exit "${PIPESTATUS[0]}"
|
||||
)
|
||||
|
||||
gc_exit_code=$?
|
||||
|
||||
# After
|
||||
|
||||
if [[ $SEAFILE_SERVER != *"pro"* ]]; then
|
||||
echo "Giving the server some time..."
|
||||
sleep 3
|
||||
|
||||
$SEAFILE_DIR/seafile.sh start
|
||||
fi
|
||||
|
||||
exit $gc_exit_code
|
46
cluster/scripts/init.py
Executable file
46
cluster/scripts/init.py
Executable file
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env python
|
||||
#coding: UTF-8
|
||||
|
||||
"""
|
||||
Starts the seafile/seahub server and watches the controller process. It is
|
||||
the entrypoint command of the docker container.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from os.path import abspath, basename, exists, dirname, join, isdir
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
from utils import (
|
||||
call, get_conf, get_install_dir, get_script, get_command_output,
|
||||
render_template, wait_for_mysql
|
||||
)
|
||||
from upgrade import check_upgrade
|
||||
from bootstrap import init_seafile_server, is_https, init_letsencrypt, generate_local_nginx_conf
|
||||
|
||||
|
||||
shared_seafiledir = '/shared/seafile'
|
||||
ssl_dir = '/shared/ssl'
|
||||
generated_dir = '/bootstrap/generated'
|
||||
installdir = get_install_dir()
|
||||
topdir = dirname(installdir)
|
||||
|
||||
|
||||
def main():
|
||||
call('cp -rf /scripts/setup-seafile-mysql.py ' + join(installdir, 'setup-seafile-mysql.py'))
|
||||
if not exists(shared_seafiledir):
|
||||
os.mkdir(shared_seafiledir)
|
||||
if not exists(generated_dir):
|
||||
os.makedirs(generated_dir)
|
||||
|
||||
if is_https():
|
||||
init_letsencrypt()
|
||||
generate_local_nginx_conf()
|
||||
|
||||
if not exists(join(shared_seafiledir, 'conf')):
|
||||
init_seafile_server()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
1497
cluster/scripts/setup-seafile-mysql.py
Executable file
1497
cluster/scripts/setup-seafile-mysql.py
Executable file
File diff suppressed because it is too large
Load diff
46
cluster/scripts/ssl.sh
Executable file
46
cluster/scripts/ssl.sh
Executable file
|
@ -0,0 +1,46 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
ssldir=${1:?"error params"}
|
||||
domain=${2:?"error params"}
|
||||
|
||||
letsencryptdir=$ssldir/letsencrypt
|
||||
letsencrypt_script=$letsencryptdir/acme_tiny.py
|
||||
|
||||
ssl_account_key=${domain}.account.key
|
||||
ssl_csr=${domain}.csr
|
||||
ssl_key=${domain}.key
|
||||
ssl_crt=${domain}.crt
|
||||
|
||||
mkdir -p /var/www/challenges && chmod -R 777 /var/www/challenges
|
||||
mkdir -p ssldir
|
||||
|
||||
if ! [[ -d $letsencryptdir ]]; then
|
||||
git clone git://github.com/diafygi/acme-tiny.git $letsencryptdir
|
||||
else
|
||||
cd $letsencryptdir
|
||||
git pull origin master:master
|
||||
fi
|
||||
|
||||
cd $ssldir
|
||||
|
||||
if [[ ! -e ${ssl_account_key} ]]; then
|
||||
openssl genrsa 4096 > ${ssl_account_key}
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ssl_key} ]]; then
|
||||
openssl genrsa 4096 > ${ssl_key}
|
||||
fi
|
||||
|
||||
if [[ ! -e ${ssl_csr} ]]; then
|
||||
openssl req -new -sha256 -key ${ssl_key} -subj "/CN=$domain" > $ssl_csr
|
||||
fi
|
||||
|
||||
python $letsencrypt_script --account-key ${ssl_account_key} --csr $ssl_csr --acme-dir /var/www/challenges/ > ./signed.crt
|
||||
curl -sSL -o intermediate.pem https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem
|
||||
cat signed.crt intermediate.pem > ${ssl_crt}
|
||||
|
||||
nginx -s reload
|
||||
|
||||
echo "Nginx reloaded."
|
61
cluster/scripts/start.py
Executable file
61
cluster/scripts/start.py
Executable file
|
@ -0,0 +1,61 @@
|
|||
import os
|
||||
import time
|
||||
import json
|
||||
import argparse
|
||||
from os.path import join, exists, dirname
|
||||
|
||||
from upgrade import check_upgrade
|
||||
from utils import call, get_conf, get_script, get_command_output, get_install_dir
|
||||
|
||||
installdir = get_install_dir()
|
||||
topdir = dirname(installdir)
|
||||
|
||||
def watch_controller():
|
||||
maxretry = 4
|
||||
retry = 0
|
||||
while retry < maxretry:
|
||||
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
|
||||
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
|
||||
if not controller_pid and not garbage_collector_pid:
|
||||
retry += 1
|
||||
else:
|
||||
retry = 0
|
||||
time.sleep(5)
|
||||
print 'seafile controller exited unexpectedly.'
|
||||
sys.exit(1)
|
||||
|
||||
def main(args):
|
||||
call('/scripts/create_data_links.sh')
|
||||
check_upgrade()
|
||||
os.chdir(installdir)
|
||||
call('service nginx start &')
|
||||
|
||||
admin_pw = {
|
||||
'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'),
|
||||
'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'),
|
||||
}
|
||||
password_file = join(topdir, 'conf', 'admin.txt')
|
||||
with open(password_file, 'w+') as fp:
|
||||
json.dump(admin_pw, fp)
|
||||
|
||||
|
||||
try:
|
||||
call('{} start'.format(get_script('seafile.sh')))
|
||||
call('{} start'.format(get_script('seahub.sh')))
|
||||
if args.mode == 'backend':
|
||||
call('{} start'.format(get_script('seafile-background-tasks.sh')))
|
||||
finally:
|
||||
if exists(password_file):
|
||||
os.unlink(password_file)
|
||||
|
||||
print 'seafile server is running now.'
|
||||
try:
|
||||
watch_controller()
|
||||
except KeyboardInterrupt:
|
||||
print 'Stopping seafile server.'
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description='Seafile cluster start script')
|
||||
parser.add_argument('--mode')
|
||||
main(parser.parse_args())
|
18
cluster/scripts/start.sh
Executable file
18
cluster/scripts/start.sh
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
function start-front-end() {
|
||||
python /scripts/start.py
|
||||
}
|
||||
|
||||
function start-back-end() {
|
||||
python /scripts/start.py --mode backend
|
||||
}
|
||||
|
||||
case $1 in
|
||||
"front-end" )
|
||||
start-front-end
|
||||
;;
|
||||
"back-end" )
|
||||
start-back-end
|
||||
;;
|
||||
esac
|
82
cluster/scripts/upgrade.py
Executable file
82
cluster/scripts/upgrade.py
Executable file
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/env python
|
||||
#coding: UTF-8
|
||||
|
||||
"""
|
||||
This script is used to run proper upgrade scripts automatically.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import glob
|
||||
import os
|
||||
from os.path import abspath, basename, exists, dirname, join, isdir
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
from utils import (
|
||||
call, get_install_dir, get_script, get_command_output, replace_file_pattern,
|
||||
read_version_stamp, wait_for_mysql, update_version_stamp, loginfo
|
||||
)
|
||||
|
||||
installdir = get_install_dir()
|
||||
topdir = dirname(installdir)
|
||||
|
||||
def collect_upgrade_scripts(from_version, to_version):
|
||||
"""
|
||||
Give the current installed version, calculate which upgrade scripts we need
|
||||
to run to upgrade it to the latest verison.
|
||||
|
||||
For example, given current version 5.0.1 and target version 6.1.0, and these
|
||||
upgrade scripts:
|
||||
|
||||
upgrade_4.4_5.0.sh
|
||||
upgrade_5.0_5.1.sh
|
||||
upgrade_5.1_6.0.sh
|
||||
upgrade_6.0_6.1.sh
|
||||
|
||||
We need to run upgrade_5.0_5.1.sh, upgrade_5.1_6.0.sh, and upgrade_6.0_6.1.sh.
|
||||
"""
|
||||
from_major_ver = '.'.join(from_version.split('.')[:2])
|
||||
to_major_ver = '.'.join(to_version.split('.')[:2])
|
||||
|
||||
scripts = []
|
||||
for fn in sorted(glob.glob(join(installdir, 'upgrade', 'upgrade_*_*.sh'))):
|
||||
va, vb = parse_upgrade_script_version(fn)
|
||||
if va >= from_major_ver and vb <= to_major_ver:
|
||||
scripts.append(fn)
|
||||
return scripts
|
||||
|
||||
def parse_upgrade_script_version(script):
|
||||
script = basename(script)
|
||||
m = re.match(r'upgrade_([0-9+.]+)_([0-9+.]+).sh', basename(script))
|
||||
return m.groups()
|
||||
|
||||
def check_upgrade():
|
||||
last_version = read_version_stamp()
|
||||
current_version = os.environ['SEAFILE_VERSION']
|
||||
if last_version == current_version:
|
||||
return
|
||||
|
||||
scripts_to_run = collect_upgrade_scripts(from_version=last_version, to_version=current_version)
|
||||
for script in scripts_to_run:
|
||||
loginfo('Running scripts {}'.format(script))
|
||||
# Here we use a trick: use a version stamp like 6.1.0 to prevent running
|
||||
# all upgrade scripts before 6.1 again (because 6.1 < 6.1.0 in python)
|
||||
new_version = parse_upgrade_script_version(script)[1] + '.0'
|
||||
|
||||
replace_file_pattern(script, 'read dummy', '')
|
||||
call(script)
|
||||
|
||||
update_version_stamp(new_version)
|
||||
|
||||
update_version_stamp(current_version)
|
||||
|
||||
def main():
|
||||
wait_for_mysql()
|
||||
|
||||
os.chdir(installdir)
|
||||
check_upgrade()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
287
cluster/scripts/utils/__init__.py
Normal file
287
cluster/scripts/utils/__init__.py
Normal file
|
@ -0,0 +1,287 @@
|
|||
# coding: UTF-8
|
||||
|
||||
from __future__ import print_function
|
||||
from ConfigParser import ConfigParser
|
||||
from contextlib import contextmanager
|
||||
import os
|
||||
import datetime
|
||||
from os.path import abspath, basename, exists, dirname, join, isdir, expanduser
|
||||
import platform
|
||||
import sys
|
||||
import subprocess
|
||||
import time
|
||||
import logging
|
||||
import logging.config
|
||||
import click
|
||||
import termcolor
|
||||
import colorlog
|
||||
|
||||
logger = logging.getLogger('.utils')
|
||||
|
||||
DEBUG_ENABLED = os.environ.get('SEAFILE_DOCKER_VERBOSE', '').lower() in ('true', '1', 'yes')
|
||||
|
||||
def eprint(*a, **kw):
|
||||
kw['file'] = sys.stderr
|
||||
print(*a, **kw)
|
||||
|
||||
def identity(msg, *a, **kw):
|
||||
return msg
|
||||
|
||||
colored = identity if not os.isatty(sys.stdin.fileno()) else termcolor.colored
|
||||
red = lambda s: colored(s, 'red')
|
||||
green = lambda s: colored(s, 'green')
|
||||
|
||||
def underlined(msg):
|
||||
return '\x1b[4m{}\x1b[0m'.format(msg)
|
||||
|
||||
def sudo(*a, **kw):
|
||||
call('sudo ' + a[0], *a[1:], **kw)
|
||||
|
||||
def _find_flag(args, *opts, **kw):
|
||||
is_flag = kw.get('is_flag', False)
|
||||
if is_flag:
|
||||
return any([opt in args for opt in opts])
|
||||
else:
|
||||
for opt in opts:
|
||||
try:
|
||||
return args[args.index(opt) + 1]
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def call(*a, **kw):
|
||||
dry_run = kw.pop('dry_run', False)
|
||||
quiet = kw.pop('quiet', DEBUG_ENABLED)
|
||||
cwd = kw.get('cwd', os.getcwd())
|
||||
check_call = kw.pop('check_call', True)
|
||||
reduct_args = kw.pop('reduct_args', [])
|
||||
if not quiet:
|
||||
toprint = a[0]
|
||||
args = [x.strip('"') for x in a[0].split() if '=' not in x]
|
||||
for arg in reduct_args:
|
||||
value = _find_flag(args, arg)
|
||||
toprint = toprint.replace(value, '{}**reducted**'.format(value[:3]))
|
||||
logdbg('calling: ' + green(toprint))
|
||||
logdbg('cwd: ' + green(cwd))
|
||||
kw.setdefault('shell', True)
|
||||
if not dry_run:
|
||||
if check_call:
|
||||
return subprocess.check_call(*a, **kw)
|
||||
else:
|
||||
return subprocess.Popen(*a, **kw).wait()
|
||||
|
||||
@contextmanager
|
||||
def cd(path):
|
||||
path = expanduser(path)
|
||||
olddir = os.getcwd()
|
||||
os.chdir(path)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.chdir(olddir)
|
||||
|
||||
def must_makedir(p):
|
||||
p = expanduser(p)
|
||||
if not exists(p):
|
||||
logger.info('created folder %s', p)
|
||||
os.makedirs(p)
|
||||
else:
|
||||
logger.debug('folder %s already exists', p)
|
||||
|
||||
def setup_colorlog():
|
||||
logging.config.dictConfig({
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'standard': {
|
||||
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
|
||||
},
|
||||
'colored': {
|
||||
'()': 'colorlog.ColoredFormatter',
|
||||
'format': "%(log_color)s[%(asctime)s]%(reset)s %(blue)s%(message)s",
|
||||
'datefmt': '%m/%d/%Y %H:%M:%S',
|
||||
},
|
||||
},
|
||||
'handlers': {
|
||||
'default': {
|
||||
'level': 'INFO',
|
||||
'formatter': 'colored',
|
||||
'class': 'logging.StreamHandler',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'': {
|
||||
'handlers': ['default'],
|
||||
'level': 'INFO',
|
||||
'propagate': True
|
||||
},
|
||||
'django.request': {
|
||||
'handlers': ['default'],
|
||||
'level': 'WARN',
|
||||
'propagate': False
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
|
||||
logging.WARNING)
|
||||
|
||||
|
||||
def setup_logging(level=logging.INFO):
|
||||
kw = {
|
||||
'format': '[%(asctime)s][%(module)s]: %(message)s',
|
||||
'datefmt': '%m/%d/%Y %H:%M:%S',
|
||||
'level': level,
|
||||
'stream': sys.stdout
|
||||
}
|
||||
|
||||
logging.basicConfig(**kw)
|
||||
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
|
||||
logging.WARNING)
|
||||
|
||||
def get_process_cmd(pid, env=False):
|
||||
env = 'e' if env else ''
|
||||
try:
|
||||
return subprocess.check_output('ps {} -o command {}'.format(env, pid),
|
||||
shell=True).strip().splitlines()[1]
|
||||
# except Exception, e:
|
||||
# print(e)
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_match_pids(pattern):
|
||||
pgrep_output = subprocess.check_output(
|
||||
'pgrep -f "{}" || true'.format(pattern),
|
||||
shell=True).strip()
|
||||
return [int(pid) for pid in pgrep_output.splitlines()]
|
||||
|
||||
def ask_for_confirm(msg):
|
||||
confirm = click.prompt(msg, default='Y')
|
||||
return confirm.lower() in ('y', 'yes')
|
||||
|
||||
def confirm_command_to_run(cmd):
|
||||
if ask_for_confirm('Run the command: {} ?'.format(green(cmd))):
|
||||
call(cmd)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
def git_current_commit():
|
||||
return get_command_output('git rev-parse --short HEAD').strip()
|
||||
|
||||
def get_command_output(cmd):
|
||||
shell = not isinstance(cmd, list)
|
||||
return subprocess.check_output(cmd, shell=shell)
|
||||
|
||||
def ask_yes_or_no(msg, prompt='', default=None):
|
||||
print('\n' + msg + '\n')
|
||||
while True:
|
||||
answer = raw_input(prompt + ' [yes/no] ').lower()
|
||||
if not answer:
|
||||
continue
|
||||
|
||||
if answer not in ('yes', 'no', 'y', 'n'):
|
||||
continue
|
||||
|
||||
if answer in ('yes', 'y'):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def git_branch_exists(branch):
|
||||
return call('git rev-parse --short --verify {}'.format(branch)) == 0
|
||||
|
||||
def to_unicode(s):
|
||||
if isinstance(s, str):
|
||||
return s.decode('utf-8')
|
||||
else:
|
||||
return s
|
||||
|
||||
def to_utf8(s):
|
||||
if isinstance(s, unicode):
|
||||
return s.encode('utf-8')
|
||||
else:
|
||||
return s
|
||||
|
||||
def git_commit_time(refspec):
|
||||
return int(get_command_output('git log -1 --format="%ct" {}'.format(
|
||||
refspec)).strip())
|
||||
|
||||
def get_seafile_version():
|
||||
return os.environ['SEAFILE_VERSION']
|
||||
|
||||
def get_install_dir():
|
||||
return join('/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-{}'.format(get_seafile_version()))
|
||||
|
||||
def get_script(script):
|
||||
return join(get_install_dir(), script)
|
||||
|
||||
|
||||
_config = None
|
||||
|
||||
def get_conf(key, default=None):
|
||||
key = key.upper()
|
||||
return os.environ.get(key, default)
|
||||
|
||||
def _add_default_context(context):
|
||||
default_context = {
|
||||
'current_timestr': datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S'),
|
||||
}
|
||||
for k in default_context:
|
||||
context.setdefault(k, default_context[k])
|
||||
|
||||
def render_template(template, target, context):
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
env = Environment(loader=FileSystemLoader(dirname(template)))
|
||||
_add_default_context(context)
|
||||
content = env.get_template(basename(template)).render(**context)
|
||||
with open(target, 'w') as fp:
|
||||
fp.write(content)
|
||||
|
||||
def logdbg(msg):
|
||||
if DEBUG_ENABLED:
|
||||
msg = '[debug] ' + msg
|
||||
loginfo(msg)
|
||||
|
||||
def loginfo(msg):
|
||||
msg = '[{}] {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), green(msg))
|
||||
eprint(msg)
|
||||
|
||||
def cert_has_valid_days(cert, days):
|
||||
assert exists(cert)
|
||||
|
||||
secs = 86400 * int(days)
|
||||
retcode = call('openssl x509 -checkend {} -noout -in {}'.format(secs, cert), check_call=False)
|
||||
return retcode == 0
|
||||
|
||||
def get_version_stamp_file():
|
||||
return '/shared/seafile/seafile-data/current_version'
|
||||
|
||||
def read_version_stamp(fn=get_version_stamp_file()):
|
||||
assert exists(fn), 'version stamp file {} does not exist!'.format(fn)
|
||||
with open(fn, 'r') as fp:
|
||||
return fp.read().strip()
|
||||
|
||||
def update_version_stamp(version, fn=get_version_stamp_file()):
|
||||
with open(fn, 'w') as fp:
|
||||
fp.write(version + '\n')
|
||||
|
||||
def wait_for_mysql():
|
||||
while not exists('/var/run/mysqld/mysqld.sock'):
|
||||
logdbg('waiting for mysql server to be ready')
|
||||
time.sleep(2)
|
||||
logdbg('mysql server is ready')
|
||||
|
||||
def wait_for_nginx():
|
||||
while True:
|
||||
logdbg('waiting for nginx server to be ready')
|
||||
output = get_command_output('netstat -nltp')
|
||||
if ':80 ' in output:
|
||||
logdbg(output)
|
||||
logdbg('nginx is ready')
|
||||
return
|
||||
time.sleep(2)
|
||||
|
||||
def replace_file_pattern(fn, pattern, replacement):
|
||||
with open(fn, 'r') as fp:
|
||||
content = fp.read()
|
||||
with open(fn, 'w') as fp:
|
||||
fp.write(content.replace(pattern, replacement))
|
3
templates/letsencrypt.cron.template
Normal file
3
templates/letsencrypt.cron.template
Normal file
|
@ -0,0 +1,3 @@
|
|||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
# min hour dayofmonth month dayofweek command
|
||||
0 0 1 * * root /scripts/ssl.sh {{ ssl_dir }} {{ domain }}
|
81
templates/seafile.nginx.conf.template
Normal file
81
templates/seafile.nginx.conf.template
Normal file
|
@ -0,0 +1,81 @@
|
|||
# -*- mode: nginx -*-
|
||||
# Auto generated at {{ current_timestr }}
|
||||
{% if https -%}
|
||||
server {
|
||||
listen 80;
|
||||
server_name _ default_server;
|
||||
rewrite ^ https://{{ domain }}$request_uri? permanent;
|
||||
}
|
||||
{% endif -%}
|
||||
|
||||
server {
|
||||
{% if https -%}
|
||||
listen 443;
|
||||
ssl on;
|
||||
ssl_certificate /shared/ssl/{{ domain }}.crt;
|
||||
ssl_certificate_key /shared/ssl/{{ domain }}.key;
|
||||
|
||||
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
|
||||
|
||||
# TODO: More SSL security hardening: ssl_session_tickets & ssl_dhparam
|
||||
# ssl_session_tickets on;
|
||||
# ssl_session_ticket_key /etc/nginx/sessionticket.key;
|
||||
# ssl_session_cache shared:SSL:10m;
|
||||
# ssl_session_timeout 10m;
|
||||
{% else -%}
|
||||
listen 80;
|
||||
{% endif -%}
|
||||
|
||||
server_name {{ domain }};
|
||||
|
||||
client_max_body_size 10m;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8000/;
|
||||
proxy_read_timeout 310s;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Forwarded "for=$remote_addr;proto=$scheme";
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Connection "";
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
|
||||
location /seafhttp {
|
||||
rewrite ^/seafhttp(.*)$ $1 break;
|
||||
proxy_pass http://127.0.0.1:8082;
|
||||
client_max_body_size 0;
|
||||
proxy_connect_timeout 36000s;
|
||||
proxy_read_timeout 36000s;
|
||||
}
|
||||
|
||||
location /seafdav {
|
||||
client_max_body_size 0;
|
||||
fastcgi_pass 127.0.0.1:8080;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param PATH_INFO $fastcgi_script_name;
|
||||
|
||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||
fastcgi_param QUERY_STRING $query_string;
|
||||
fastcgi_param REQUEST_METHOD $request_method;
|
||||
fastcgi_param CONTENT_TYPE $content_type;
|
||||
fastcgi_param CONTENT_LENGTH $content_length;
|
||||
fastcgi_param SERVER_ADDR $server_addr;
|
||||
fastcgi_param SERVER_PORT $server_port;
|
||||
fastcgi_param SERVER_NAME $server_name;
|
||||
|
||||
access_log /var/log/nginx/seafdav.access.log;
|
||||
error_log /var/log/nginx/seafdav.error.log;
|
||||
}
|
||||
|
||||
location /media {
|
||||
root /opt/seafile/seafile-server-latest/seahub;
|
||||
}
|
||||
|
||||
# For letsencrypt
|
||||
location /.well-known/acme-challenge/ {
|
||||
alias /var/www/challenges/;
|
||||
try_files $uri =404;
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue