This commit is contained in:
Hulk 2018-12-19 07:43:08 +01:00
commit 372ccbd3f6
30 changed files with 2516 additions and 46 deletions

17
.github/stale.yml vendored Normal file
View file

@ -0,0 +1,17 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 90
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: wontfix
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

View file

@ -34,6 +34,21 @@ Wait for a few minutes for the first time initialization, then visit `http://sea
This command will mount folder `/opt/seafile-data` at the local server to the docker instance. You can find logs and other data under this folder.
### Put your licence file
If you have a `seafile-license.txt` licence file, simply put it in the folder `/opt/seafile-data/seafile/`. In your host machine:
```sh
mkdir -p /opt/seafile-data/seafile/
cp /path/to/seafile-license.txt /opt/seafile-data/seafile/
```
Then restart the container.
```sh
docker restart seafile
```
### More configuration Options
#### Custom Admin Username and Password

View file

@ -55,8 +55,10 @@ Now run the following commands:
(Note that if you're using another drive than "C:", say "D:", you should change the "c:\\seafile" in the following commands to "d:\\seafile" instead.)
```sh
docker pull seafileltd/seafile:6.2.1
docker run -d --name seafile-server -v /root/seafile:/shared -p 80:80 seafileltd/seafile:6.2.1
docker pull seafileltd/seafile:6.3.3
docker run -d --name seafile-server -v /root/seafile:/shared -p 80:80 seafileltd/seafile:6.3.3
```
The tag for the most recent version of the image can be found at https://hub.docker.com/r/seafileltd/seafile/tags/.
If you are not familiar with docker commands, refer to [docker documentation](https://docs.docker.com/engine/reference/commandline/cli/).

View file

@ -1,14 +1,14 @@
server_version=6.2.13
server_version=6.3.7
base_image=seafileltd/base:16.04
base_image_squashed=seafileltd/base:16.04-squashed
pro_base_image=seafileltd/pro-base:16.04
pro_base_image_squashed=seafileltd/pro-base:16.04-squashed
base_image=seafileltd/cluster-base:18.04
base_image_squashed=seafileltd/cluster-base:18.04-squashed
pro_base_image=seafileltd/cluster-pro-base:18.04
pro_base_image_squashed=seafileltd/cluster-pro-base:18.04-squashed
server_image=seafileltd/seafile:$(server_version)
server_image_squashed=seafileltd/seafile:$(server_version)-squashed
pro_server_image=seafileltd/seafile-pro:$(server_version)
pro_server_image_squashed=seafileltd/seafile-pro:$(server_version)-squashed
latest_pro_server_image=seafileltd/seafile-pro:latest
pro_server_image=seafileltd/cluster-seafile-pro:$(server_version)
pro_server_image_squashed=seafileltd/cluster-seafile-pro:$(server_version)-squashed
latest_pro_server_image=seafileltd/cluster-seafile-pro:latest
latest_server_image=seafileltd/seafile:latest
all:
@ -17,9 +17,9 @@ all:
@echo
base:
docker pull phusion/baseimage:0.9.19
docker-squash --tag phusion/baseimage:latest phusion/baseimage:0.9.19
docker tag phusion/baseimage:latest phusion/baseimage:0.9.19
docker pull phusion/baseimage:0.11
docker-squash --tag phusion/baseimage:latest phusion/baseimage:0.11
docker tag phusion/baseimage:latest phusion/baseimage:0.11
cd base && docker build -t $(base_image) .
docker-squash --tag $(base_image_squashed) $(base_image)
docker tag $(base_image_squashed) $(base_image)

View file

@ -1,6 +1,6 @@
# Lastet phusion baseimage as of 20180412, based on ubuntu 16.04
# Lastet phusion baseimage as of 20180412, based on ubuntu 18.04
# See https://hub.docker.com/r/phusion/baseimage/tags/
FROM phusion/baseimage:0.10.1
FROM phusion/baseimage:0.11
ENV UPDATED_AT=20180412 \
DEBIAN_FRONTEND=noninteractive
@ -15,7 +15,7 @@ RUN apt-get install -qq -y vim htop net-tools psmisc git wget curl
# Guidline for installing python libs: if a lib has C-compoment (e.g.
# python-imaging depends on libjpeg/libpng), we install it use apt-get.
# Otherwise we install it with pip.
RUN apt-get install -y python2.7-dev python-imaging python-ldap python-mysqldb
RUN apt-get install -y python2.7-dev python-ldap python-mysqldb libmemcached-dev zlib1g-dev gcc
RUN curl -sSL -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
python /tmp/get-pip.py && \
rm -rf /tmp/get-pip.py && \

View file

@ -9,7 +9,8 @@ colorlog==2.7.0
Jinja2==2.8
MarkupSafe==0.23 # via jinja2
prettytable==0.7.2
python-memcached==1.58
six==1.10.0 # via python-memcached
termcolor==1.1.0
urllib3==1.19
Pillow==4.3.0
pylibmc
django-pylibmc

View file

@ -1,15 +1,25 @@
FROM seafileltd/base:16.04
FROM seafileltd/cluster-base:18.04
# syslog-ng and syslog-forwarder would mess up the container stdout, not good
# when debugging/upgrading.
# Fixing the "Sub-process /usr/bin/dpkg returned an error code (1)",
# when RUN apt-get
RUN mkdir -p /usr/share/man/man1
RUN apt update
RUN apt-get install -y openjdk-8-jre libmemcached-dev zlib1g-dev pwgen curl openssl poppler-utils libpython2.7 libreoffice \
libreoffice-script-provider-python ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy python-requests
libreoffice-script-provider-python ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy python-requests mysql-client
RUN apt-get install -y tzdata python-pip python-setuptools python-urllib3 python-ldap python-ceph
RUN pip install pylibmc django-pylibmc boto twilio oss2
# The S3 storage, oss storage and psd online preview etc,
# depends on the python-backages as follow:
RUN pip install boto==2.43.0 \
oss2==2.3.0 \
psd-tools==1.4 \
pycryptodome==3.7.2 \
twilio==5.7.0
RUN apt clean

View file

@ -1,13 +1,13 @@
FROM seafileltd/pro-base:16.04
FROM seafileltd/cluster-pro-base:18.04
WORKDIR /opt/seafile
ENV SEAFILE_VERSION=6.2.13 SEAFILE_SERVER=seafile-pro-server
ENV SEAFILE_VERSION=6.3.7 SEAFILE_SERVER=seafile-pro-server
RUN mkdir -p /etc/my_init.d
RUN mkdir -p /opt/seafile/
RUN curl -sSL -G -d "p=/seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz&dl=1" https://download.seafile.top/d/8c29766a64d24122936f/files/ \
RUN curl -sSL -G -d "p=/pro/seafile-pro-server_${SEAFILE_VERSION}_x86-64_Ubuntu.tar.gz&dl=1" https://download.seafile.com/d/6e5297246c/files/ \
| tar xzf - -C /opt/seafile/
ADD scripts/create_data_links.sh /etc/my_init.d/01_create_data_links.sh

View file

@ -0,0 +1,156 @@
#!/usr/bin/env python
#coding: UTF-8
"""
Bootstraping seafile server, letsencrypt (verification & cron job).
"""
import argparse
import os
from os.path import abspath, basename, exists, dirname, join, isdir
import shutil
import sys
import uuid
import time
from utils import (
call, get_conf, get_install_dir, loginfo,
get_script, render_template, get_seafile_version, eprint,
cert_has_valid_days, get_version_stamp_file, update_version_stamp,
wait_for_mysql, wait_for_nginx, read_version_stamp
)
seafile_version = get_seafile_version()
installdir = get_install_dir()
topdir = dirname(installdir)
shared_seafiledir = '/shared/seafile'
ssl_dir = '/shared/ssl'
generated_dir = '/bootstrap/generated'
def init_letsencrypt():
loginfo('Preparing for letsencrypt ...')
wait_for_nginx()
if not exists(ssl_dir):
os.mkdir(ssl_dir)
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
context = {
'ssl_dir': ssl_dir,
'domain': domain,
}
render_template(
'/templates/letsencrypt.cron.template',
join(generated_dir, 'letsencrypt.cron'),
context
)
ssl_crt = '/shared/ssl/{}.crt'.format(domain)
if exists(ssl_crt):
loginfo('Found existing cert file {}'.format(ssl_crt))
if cert_has_valid_days(ssl_crt, 30):
loginfo('Skip letsencrypt verification since we have a valid certificate')
return
loginfo('Starting letsencrypt verification')
# Create a temporary nginx conf to start a server, which would accessed by letsencrypt
context = {
'https': False,
'domain': domain,
}
render_template('/templates/seafile.nginx.conf.template',
'/etc/nginx/sites-enabled/seafile.nginx.conf', context)
call('nginx -s reload')
time.sleep(2)
call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain))
# if call('/scripts/ssl.sh {0} {1}'.format(ssl_dir, domain), check_call=False) != 0:
# eprint('Now waiting 1000s for postmortem')
# time.sleep(1000)
# sys.exit(1)
def generate_local_nginx_conf():
# Now create the final nginx configuratin
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
context = {
'https': is_https(),
'domain': domain,
}
render_template(
'/templates/seafile.nginx.conf.template',
'/etc/nginx/sites-enabled/seafile.nginx.conf',
context
)
def is_https():
return get_conf('SEAFILE_SERVER_LETSENCRYPT', 'false').lower() == 'true'
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('--parse-ports', action='store_true')
return ap.parse_args()
def init_seafile_server():
version_stamp_file = get_version_stamp_file()
if exists(join(shared_seafiledir, 'seafile-data')):
if not exists(version_stamp_file):
update_version_stamp(os.environ['SEAFILE_VERSION'])
# sysbol link unlink after docker finish.
latest_version_dir='/opt/seafile/seafile-server-latest'
current_version_dir='/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-' + read_version_stamp()
if not exists(latest_version_dir):
call('ln -sf ' + current_version_dir + ' ' + latest_version_dir)
loginfo('Skip running setup-seafile-mysql.py because there is existing seafile-data folder.')
return
loginfo('Now running setup-seafile-mysql.py in auto mode.')
env = {
'SERVER_NAME': 'seafile',
'SERVER_IP': get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com'),
'MYSQL_USER': 'seafile',
'MYSQL_USER_PASSWD': str(uuid.uuid4()),
'MYSQL_USER_HOST': '127.0.0.1',
# Default MariaDB root user has empty password and can only connect from localhost.
'MYSQL_ROOT_PASSWD': '',
}
# Change the script to allow mysql root password to be empty
call('''sed -i -e 's/if not mysql_root_passwd/if not mysql_root_passwd and "MYSQL_ROOT_PASSWD" not in os.environ/g' {}'''
.format(get_script('setup-seafile-mysql.py')))
setup_script = get_script('setup-seafile-mysql.sh')
call('{} auto -n seafile'.format(setup_script), env=env)
domain = get_conf('SEAFILE_SERVER_HOSTNAME', 'seafile.example.com')
proto = 'https' if is_https() else 'http'
with open(join(topdir, 'conf', 'seahub_settings.py'), 'a+') as fp:
fp.write('\n')
fp.write('FILE_SERVER_ROOT = "{proto}://{domain}/seafhttp"'.format(proto=proto, domain=domain))
fp.write('\n')
# By default ccnet-server binds to the unix socket file
# "/opt/seafile/ccnet/ccnet.sock", but /opt/seafile/ccnet/ is a mounted
# volume from the docker host, and on windows and some linux environment
# it's not possible to create unix sockets in an external-mounted
# directories. So we change the unix socket file path to
# "/opt/seafile/ccnet.sock" to avoid this problem.
with open(join(topdir, 'conf', 'ccnet.conf'), 'a+') as fp:
fp.write('\n')
fp.write('[Client]\n')
fp.write('UNIX_SOCKET = /opt/seafile/ccnet.sock\n')
fp.write('\n')
files_to_copy = ['conf', 'ccnet', 'seafile-data', 'seahub-data', 'pro-data']
for fn in files_to_copy:
src = join(topdir, fn)
dst = join(shared_seafiledir, fn)
if not exists(dst) and exists(src):
shutil.move(src, shared_seafiledir)
call('ln -sf ' + join(shared_seafiledir, fn) + ' ' + src)
loginfo('Updating version stamp')
update_version_stamp(os.environ['SEAFILE_VERSION'])

View file

@ -0,0 +1,81 @@
#!/bin/bash
set -e
set -o pipefail
if [[ $SEAFILE_BOOTSRAP != "" ]]; then
exit 0
fi
if [[ $TIME_ZONE != "" ]]; then
time_zone=/usr/share/zoneinfo/$TIME_ZONE
if [[ ! -e $time_zone ]]; then
echo "invalid time zone"
exit 1
else
ln -snf $time_zone /etc/localtime
echo "$TIME_ZONE" > /etc/timezone
fi
fi
dirs=(
conf
ccnet
seafile-data
seahub-data
pro-data
seafile-license.txt
)
for d in ${dirs[*]}; do
src=/shared/seafile/$d
if [[ -e $src ]]; then
rm -rf /opt/seafile/$d && ln -sf $src /opt/seafile
fi
done
if [[ ! -e /shared/logs/seafile ]]; then
mkdir -p /shared/logs/seafile
fi
rm -rf /opt/seafile/logs && ln -sf /shared/logs/seafile/ /opt/seafile/logs
current_version_dir=/opt/seafile/${SEAFILE_SERVER}-${SEAFILE_VERSION}
latest_version_dir=/opt/seafile/seafile-server-latest
seahub_data_dir=/shared/seafile/seahub-data
if [[ ! -e $seahub_data_dir ]]; then
mkdir -p $seahub_data_dir
fi
media_dirs=(
avatars
custom
)
for d in ${media_dirs[*]}; do
source_media_dir=${current_version_dir}/seahub/media/$d
if [ -e ${source_media_dir} ] && [ ! -e ${seahub_data_dir}/$d ]; then
mv $source_media_dir ${seahub_data_dir}/$d
fi
rm -rf $source_media_dir && ln -sf ${seahub_data_dir}/$d $source_media_dir
done
rm -rf /var/lib/mysql
if [[ ! -e /shared/db ]];then
mkdir -p /shared/db
fi
ln -sf /shared/db /var/lib/mysql
if [[ ! -e /shared/logs/var-log ]]; then
chmod 777 /var/log -R
mv /var/log /shared/logs/var-log
fi
rm -rf /var/log && ln -sf /shared/logs/var-log /var/log
if [[ ! -e latest_version_dir ]]; then
ln -sf $current_version_dir $latest_version_dir
fi
chmod u+x /scripts/*
echo $PYTHON
$PYTHON /scripts/init.py

View file

@ -0,0 +1,37 @@
#!/bin/bash
set -e
# Before
SEAFILE_DIR=/opt/seafile/seafile-server-latest
if [[ $SEAFILE_SERVER != *"pro"* ]]; then
echo "Seafile CE: Stop Seafile to perform offline garbage collection."
$SEAFILE_DIR/seafile.sh stop
echo "Waiting for the server to shut down properly..."
sleep 5
else
echo "Seafile Pro: Perform online garbage collection."
fi
# Do it
(
set +e
$SEAFILE_DIR/seaf-gc.sh "$@" | tee -a /var/log/gc.log
# We want to presevent the exit code of seaf-gc.sh
exit "${PIPESTATUS[0]}"
)
gc_exit_code=$?
# After
if [[ $SEAFILE_SERVER != *"pro"* ]]; then
echo "Giving the server some time..."
sleep 3
$SEAFILE_DIR/seafile.sh start
fi
exit $gc_exit_code

View file

@ -0,0 +1,46 @@
#!/usr/bin/env python
#coding: UTF-8
"""
Starts the seafile/seahub server and watches the controller process. It is
the entrypoint command of the docker container.
"""
import json
import os
from os.path import abspath, basename, exists, dirname, join, isdir
import shutil
import sys
import time
from utils import (
call, get_conf, get_install_dir, get_script, get_command_output,
render_template, wait_for_mysql
)
from upgrade import check_upgrade
from bootstrap import init_seafile_server, is_https, init_letsencrypt, generate_local_nginx_conf
shared_seafiledir = '/shared/seafile'
ssl_dir = '/shared/ssl'
generated_dir = '/bootstrap/generated'
installdir = get_install_dir()
topdir = dirname(installdir)
def main():
call('cp -rf /scripts/setup-seafile-mysql.py ' + join(installdir, 'setup-seafile-mysql.py'))
if not exists(shared_seafiledir):
os.mkdir(shared_seafiledir)
if not exists(generated_dir):
os.makedirs(generated_dir)
if is_https():
init_letsencrypt()
generate_local_nginx_conf()
if not exists(join(shared_seafiledir, 'conf')):
init_seafile_server()
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,46 @@
#!/bin/bash
set -e
ssldir=${1:?"error params"}
domain=${2:?"error params"}
letsencryptdir=$ssldir/letsencrypt
letsencrypt_script=$letsencryptdir/acme_tiny.py
ssl_account_key=${domain}.account.key
ssl_csr=${domain}.csr
ssl_key=${domain}.key
ssl_crt=${domain}.crt
mkdir -p /var/www/challenges && chmod -R 777 /var/www/challenges
mkdir -p ssldir
if ! [[ -d $letsencryptdir ]]; then
git clone git://github.com/diafygi/acme-tiny.git $letsencryptdir
else
cd $letsencryptdir
git pull origin master:master
fi
cd $ssldir
if [[ ! -e ${ssl_account_key} ]]; then
openssl genrsa 4096 > ${ssl_account_key}
fi
if [[ ! -e ${ssl_key} ]]; then
openssl genrsa 4096 > ${ssl_key}
fi
if [[ ! -e ${ssl_csr} ]]; then
openssl req -new -sha256 -key ${ssl_key} -subj "/CN=$domain" > $ssl_csr
fi
python $letsencrypt_script --account-key ${ssl_account_key} --csr $ssl_csr --acme-dir /var/www/challenges/ > ./signed.crt
curl -sSL -o intermediate.pem https://letsencrypt.org/certs/lets-encrypt-x3-cross-signed.pem
cat signed.crt intermediate.pem > ${ssl_crt}
nginx -s reload
echo "Nginx reloaded."

View file

@ -0,0 +1,61 @@
import os
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
def watch_controller():
maxretry = 4
retry = 0
while retry < maxretry:
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
if not controller_pid and not garbage_collector_pid:
retry += 1
else:
retry = 0
time.sleep(5)
print 'seafile controller exited unexpectedly.'
sys.exit(1)
def main(args):
call('/scripts/create_data_links.sh')
check_upgrade()
os.chdir(installdir)
call('service nginx start &')
admin_pw = {
'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'),
'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'),
}
password_file = join(topdir, 'conf', 'admin.txt')
with open(password_file, 'w+') as fp:
json.dump(admin_pw, fp)
try:
call('{} start'.format(get_script('seafile.sh')))
call('{} start'.format(get_script('seahub.sh')))
if args.mode == 'backend':
call('{} start'.format(get_script('seafile-background-tasks.sh')))
finally:
if exists(password_file):
os.unlink(password_file)
print 'seafile server is running now.'
try:
watch_controller()
except KeyboardInterrupt:
print 'Stopping seafile server.'
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())

View file

@ -0,0 +1,18 @@
#!/bin/bash
function start-front-end() {
python /scripts/start.py
}
function start-back-end() {
python /scripts/start.py --mode backend
}
case $1 in
"front-end" )
start-front-end
;;
"back-end" )
start-back-end
;;
esac

View file

@ -0,0 +1,82 @@
#!/usr/bin/env python
#coding: UTF-8
"""
This script is used to run proper upgrade scripts automatically.
"""
import json
import re
import glob
import os
from os.path import abspath, basename, exists, dirname, join, isdir
import shutil
import sys
import time
from utils import (
call, get_install_dir, get_script, get_command_output, replace_file_pattern,
read_version_stamp, wait_for_mysql, update_version_stamp, loginfo
)
installdir = get_install_dir()
topdir = dirname(installdir)
def collect_upgrade_scripts(from_version, to_version):
"""
Give the current installed version, calculate which upgrade scripts we need
to run to upgrade it to the latest verison.
For example, given current version 5.0.1 and target version 6.1.0, and these
upgrade scripts:
upgrade_4.4_5.0.sh
upgrade_5.0_5.1.sh
upgrade_5.1_6.0.sh
upgrade_6.0_6.1.sh
We need to run upgrade_5.0_5.1.sh, upgrade_5.1_6.0.sh, and upgrade_6.0_6.1.sh.
"""
from_major_ver = '.'.join(from_version.split('.')[:2])
to_major_ver = '.'.join(to_version.split('.')[:2])
scripts = []
for fn in sorted(glob.glob(join(installdir, 'upgrade', 'upgrade_*_*.sh'))):
va, vb = parse_upgrade_script_version(fn)
if va >= from_major_ver and vb <= to_major_ver:
scripts.append(fn)
return scripts
def parse_upgrade_script_version(script):
script = basename(script)
m = re.match(r'upgrade_([0-9+.]+)_([0-9+.]+).sh', basename(script))
return m.groups()
def check_upgrade():
last_version = read_version_stamp()
current_version = os.environ['SEAFILE_VERSION']
if last_version == current_version:
return
scripts_to_run = collect_upgrade_scripts(from_version=last_version, to_version=current_version)
for script in scripts_to_run:
loginfo('Running scripts {}'.format(script))
# Here we use a trick: use a version stamp like 6.1.0 to prevent running
# all upgrade scripts before 6.1 again (because 6.1 < 6.1.0 in python)
new_version = parse_upgrade_script_version(script)[1] + '.0'
replace_file_pattern(script, 'read dummy', '')
call(script)
update_version_stamp(new_version)
update_version_stamp(current_version)
def main():
wait_for_mysql()
os.chdir(installdir)
check_upgrade()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,287 @@
# coding: UTF-8
from __future__ import print_function
from ConfigParser import ConfigParser
from contextlib import contextmanager
import os
import datetime
from os.path import abspath, basename, exists, dirname, join, isdir, expanduser
import platform
import sys
import subprocess
import time
import logging
import logging.config
import click
import termcolor
import colorlog
logger = logging.getLogger('.utils')
DEBUG_ENABLED = os.environ.get('SEAFILE_DOCKER_VERBOSE', '').lower() in ('true', '1', 'yes')
def eprint(*a, **kw):
kw['file'] = sys.stderr
print(*a, **kw)
def identity(msg, *a, **kw):
return msg
colored = identity if not os.isatty(sys.stdin.fileno()) else termcolor.colored
red = lambda s: colored(s, 'red')
green = lambda s: colored(s, 'green')
def underlined(msg):
return '\x1b[4m{}\x1b[0m'.format(msg)
def sudo(*a, **kw):
call('sudo ' + a[0], *a[1:], **kw)
def _find_flag(args, *opts, **kw):
is_flag = kw.get('is_flag', False)
if is_flag:
return any([opt in args for opt in opts])
else:
for opt in opts:
try:
return args[args.index(opt) + 1]
except ValueError:
pass
def call(*a, **kw):
dry_run = kw.pop('dry_run', False)
quiet = kw.pop('quiet', DEBUG_ENABLED)
cwd = kw.get('cwd', os.getcwd())
check_call = kw.pop('check_call', True)
reduct_args = kw.pop('reduct_args', [])
if not quiet:
toprint = a[0]
args = [x.strip('"') for x in a[0].split() if '=' not in x]
for arg in reduct_args:
value = _find_flag(args, arg)
toprint = toprint.replace(value, '{}**reducted**'.format(value[:3]))
logdbg('calling: ' + green(toprint))
logdbg('cwd: ' + green(cwd))
kw.setdefault('shell', True)
if not dry_run:
if check_call:
return subprocess.check_call(*a, **kw)
else:
return subprocess.Popen(*a, **kw).wait()
@contextmanager
def cd(path):
path = expanduser(path)
olddir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(olddir)
def must_makedir(p):
p = expanduser(p)
if not exists(p):
logger.info('created folder %s', p)
os.makedirs(p)
else:
logger.debug('folder %s already exists', p)
def setup_colorlog():
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
'colored': {
'()': 'colorlog.ColoredFormatter',
'format': "%(log_color)s[%(asctime)s]%(reset)s %(blue)s%(message)s",
'datefmt': '%m/%d/%Y %H:%M:%S',
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'colored',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'INFO',
'propagate': True
},
'django.request': {
'handlers': ['default'],
'level': 'WARN',
'propagate': False
},
}
})
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def setup_logging(level=logging.INFO):
kw = {
'format': '[%(asctime)s][%(module)s]: %(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
'level': level,
'stream': sys.stdout
}
logging.basicConfig(**kw)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARNING)
def get_process_cmd(pid, env=False):
env = 'e' if env else ''
try:
return subprocess.check_output('ps {} -o command {}'.format(env, pid),
shell=True).strip().splitlines()[1]
# except Exception, e:
# print(e)
except:
return None
def get_match_pids(pattern):
pgrep_output = subprocess.check_output(
'pgrep -f "{}" || true'.format(pattern),
shell=True).strip()
return [int(pid) for pid in pgrep_output.splitlines()]
def ask_for_confirm(msg):
confirm = click.prompt(msg, default='Y')
return confirm.lower() in ('y', 'yes')
def confirm_command_to_run(cmd):
if ask_for_confirm('Run the command: {} ?'.format(green(cmd))):
call(cmd)
else:
sys.exit(1)
def git_current_commit():
return get_command_output('git rev-parse --short HEAD').strip()
def get_command_output(cmd):
shell = not isinstance(cmd, list)
return subprocess.check_output(cmd, shell=shell)
def ask_yes_or_no(msg, prompt='', default=None):
print('\n' + msg + '\n')
while True:
answer = raw_input(prompt + ' [yes/no] ').lower()
if not answer:
continue
if answer not in ('yes', 'no', 'y', 'n'):
continue
if answer in ('yes', 'y'):
return True
else:
return False
def git_branch_exists(branch):
return call('git rev-parse --short --verify {}'.format(branch)) == 0
def to_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
else:
return s
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return s
def git_commit_time(refspec):
return int(get_command_output('git log -1 --format="%ct" {}'.format(
refspec)).strip())
def get_seafile_version():
return os.environ['SEAFILE_VERSION']
def get_install_dir():
return join('/opt/seafile/' + get_conf('SEAFILE_SERVER', 'seafile-server') + '-{}'.format(get_seafile_version()))
def get_script(script):
return join(get_install_dir(), script)
_config = None
def get_conf(key, default=None):
key = key.upper()
return os.environ.get(key, default)
def _add_default_context(context):
default_context = {
'current_timestr': datetime.datetime.now().strftime('%m/%d/%Y %H:%M:%S'),
}
for k in default_context:
context.setdefault(k, default_context[k])
def render_template(template, target, context):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(dirname(template)))
_add_default_context(context)
content = env.get_template(basename(template)).render(**context)
with open(target, 'w') as fp:
fp.write(content)
def logdbg(msg):
if DEBUG_ENABLED:
msg = '[debug] ' + msg
loginfo(msg)
def loginfo(msg):
msg = '[{}] {}'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), green(msg))
eprint(msg)
def cert_has_valid_days(cert, days):
assert exists(cert)
secs = 86400 * int(days)
retcode = call('openssl x509 -checkend {} -noout -in {}'.format(secs, cert), check_call=False)
return retcode == 0
def get_version_stamp_file():
return '/shared/seafile/seafile-data/current_version'
def read_version_stamp(fn=get_version_stamp_file()):
assert exists(fn), 'version stamp file {} does not exist!'.format(fn)
with open(fn, 'r') as fp:
return fp.read().strip()
def update_version_stamp(version, fn=get_version_stamp_file()):
with open(fn, 'w') as fp:
fp.write(version + '\n')
def wait_for_mysql():
while not exists('/var/run/mysqld/mysqld.sock'):
logdbg('waiting for mysql server to be ready')
time.sleep(2)
logdbg('mysql server is ready')
def wait_for_nginx():
while True:
logdbg('waiting for nginx server to be ready')
output = get_command_output('netstat -nltp')
if ':80 ' in output:
logdbg(output)
logdbg('nginx is ready')
return
time.sleep(2)
def replace_file_pattern(fn, pattern, replacement):
with open(fn, 'r') as fp:
content = fp.read()
with open(fn, 'w') as fp:
fp.write(content.replace(pattern, replacement))

View file

@ -0,0 +1,3 @@
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# min hour dayofmonth month dayofweek command
0 0 1 * * root /scripts/ssl.sh {{ ssl_dir }} {{ domain }}

View file

@ -0,0 +1,81 @@
# -*- mode: nginx -*-
# Auto generated at {{ current_timestr }}
{% if https -%}
server {
listen 80;
server_name _ default_server;
rewrite ^ https://{{ domain }}$request_uri? permanent;
}
{% endif -%}
server {
{% if https -%}
listen 443;
ssl on;
ssl_certificate /shared/ssl/{{ domain }}.crt;
ssl_certificate_key /shared/ssl/{{ domain }}.key;
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;
# TODO: More SSL security hardening: ssl_session_tickets & ssl_dhparam
# ssl_session_tickets on;
# ssl_session_ticket_key /etc/nginx/sessionticket.key;
# ssl_session_cache shared:SSL:10m;
# ssl_session_timeout 10m;
{% else -%}
listen 80;
{% endif -%}
server_name {{ domain }};
client_max_body_size 10m;
location / {
proxy_pass http://127.0.0.1:8000/;
proxy_read_timeout 310s;
proxy_set_header Host $host;
proxy_set_header Forwarded "for=$remote_addr;proto=$scheme";
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Connection "";
proxy_http_version 1.1;
}
location /seafhttp {
rewrite ^/seafhttp(.*)$ $1 break;
proxy_pass http://127.0.0.1:8082;
client_max_body_size 0;
proxy_connect_timeout 36000s;
proxy_read_timeout 36000s;
}
location /seafdav {
client_max_body_size 0;
fastcgi_pass 127.0.0.1:8080;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_script_name;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
access_log /var/log/nginx/seafdav.access.log;
error_log /var/log/nginx/seafdav.error.log;
}
location /media {
root /opt/seafile/seafile-server-latest/seahub;
}
# For letsencrypt
location /.well-known/acme-challenge/ {
alias /var/www/challenges/;
try_files $uri =404;
}
}

View file

@ -18,7 +18,7 @@ fi
# Do it
(
set +e
$SEAFILE_DIR/seaf-gc.sh | tee -a /var/log/gc.log
$SEAFILE_DIR/seaf-gc.sh "$@" | tee -a /var/log/gc.log
# We want to presevent the exit code of seaf-gc.sh
exit "${PIPESTATUS[0]}"
)

View file

@ -1,9 +1,9 @@
server_version=6.3.7
base_image=seafileltd/base:16.04
base_image_squashed=seafileltd/base:16.04-squashed
pro_base_image=seafileltd/pro-base:16.04
pro_base_image_squashed=seafileltd/pro-base:16.04-squashed
base_image=seafileltd/base:18.04
base_image_squashed=seafileltd/base:18.04-squashed
pro_base_image=seafileltd/pro-base:18.04
pro_base_image_squashed=seafileltd/pro-base:18.04-squashed
server_image=seafileltd/seafile:$(server_version)
server_image_squashed=seafileltd/seafile:$(server_version)-squashed
pro_server_image=seafileltd/seafile-pro:$(server_version)
@ -17,9 +17,9 @@ all:
@echo
base:
docker pull phusion/baseimage:0.9.19
docker-squash --tag phusion/baseimage:latest phusion/baseimage:0.9.19
docker tag phusion/baseimage:latest phusion/baseimage:0.9.19
docker pull phusion/baseimage:0.11
docker-squash --tag phusion/baseimage:latest phusion/baseimage:0.11
docker tag phusion/baseimage:latest phusion/baseimage:0.11
cd base && docker build -t $(base_image) .
docker-squash --tag $(base_image_squashed) $(base_image)
docker tag $(base_image_squashed) $(base_image)

View file

@ -1,6 +1,6 @@
# Lastet phusion baseimage as of 20180412, based on ubuntu 16.04
# Lastet phusion baseimage as of 20180412, based on ubuntu 18.04
# See https://hub.docker.com/r/phusion/baseimage/tags/
FROM phusion/baseimage:0.10.1
FROM phusion/baseimage:0.11
ENV UPDATED_AT=20180412 \
DEBIAN_FRONTEND=noninteractive
@ -15,7 +15,7 @@ RUN apt-get install -qq -y vim htop net-tools psmisc git wget curl
# Guidline for installing python libs: if a lib has C-compoment (e.g.
# python-imaging depends on libjpeg/libpng), we install it use apt-get.
# Otherwise we install it with pip.
RUN apt-get install -y python2.7-dev python-imaging python-ldap python-mysqldb
RUN apt-get install -y python2.7-dev python-ldap python-mysqldb zlib1g-dev libmemcached-dev gcc
RUN curl -sSL -o /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py && \
python /tmp/get-pip.py && \
rm -rf /tmp/get-pip.py && \

View file

@ -9,7 +9,8 @@ colorlog==2.7.0
Jinja2==2.8
MarkupSafe==0.23 # via jinja2
prettytable==0.7.2
python-memcached==1.58
six==1.10.0 # via python-memcached
termcolor==1.1.0
urllib3==1.19
Pillow==4.3.0
pylibmc==1.6.0
django-pylibmc==0.6.1

View file

@ -1,7 +1,12 @@
FROM seafileltd/base:16.04
FROM seafileltd/base:18.04
# syslog-ng and syslog-forwarder would mess up the container stdout, not good
# when debugging/upgrading.
# Fixing the "Sub-process /usr/bin/dpkg returned an error code (1)",
# when RUN apt-get
RUN mkdir -p /usr/share/man/man1
RUN apt update
RUN apt-get install -y openjdk-8-jre libmemcached-dev zlib1g-dev pwgen curl openssl poppler-utils libpython2.7 libreoffice \
@ -9,7 +14,12 @@ libreoffice-script-provider-python ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy py
RUN apt-get install -y tzdata python-pip python-setuptools python-urllib3 python-ldap python-ceph
RUN pip install pylibmc django-pylibmc boto twilio oss2
# The S3 storage, oss storage and psd online preview etc,
# depends on the python-backages as follow:
RUN pip install boto==2.43.0 \
oss2==2.3.0 \
psd-tools==1.4 \
pycryptodome==3.7.2 \
twilio==5.7.0
RUN apt clean

View file

@ -1,4 +1,4 @@
FROM seafileltd/pro-base:16.04
FROM seafileltd/pro-base:18.04
WORKDIR /opt/seafile
ENV SEAFILE_VERSION=6.3.7 SEAFILE_SERVER=seafile-pro-server

View file

@ -4,7 +4,16 @@
server {
listen 80;
server_name _ default_server;
rewrite ^ https://{{ domain }}$request_uri? permanent;
# allow certbot to connect to challenge location via HTTP Port 80
# otherwise renewal request will fail
location /.well-known/acme-challenge/ {
alias /var/www/challenges/;
try_files $uri =404;
}
location / {
rewrite ^ https://{{ domain }}$request_uri? permanent;
}
}
{% endif -%}

View file

@ -1,4 +1,4 @@
FROM seafileltd/base:16.04
FROM seafileltd/base:18.04
WORKDIR /opt/seafile
RUN mkdir -p /etc/my_init.d

View file

@ -4,7 +4,17 @@
server {
listen 80;
server_name _ default_server;
rewrite ^ https://{{ domain }}$request_uri? permanent;
# allow certbot to connect to challenge location via HTTP Port 80
# otherwise renewal request will fail
location /.well-known/acme-challenge/ {
alias /var/www/challenges/;
try_files $uri =404;
}
location / {
rewrite ^ https://{{ domain }}$request_uri? permanent;
}
}
{% endif -%}

View file

@ -18,7 +18,7 @@ fi
# Do it
(
set +e
$SEAFILE_DIR/seaf-gc.sh | tee -a /var/log/gc.log
$SEAFILE_DIR/seaf-gc.sh "$@" | tee -a /var/log/gc.log
# We want to presevent the exit code of seaf-gc.sh
exit "${PIPESTATUS[0]}"
)