Repository: mritd/dockerfile Branch: master Commit: 6686c3423f39 Files: 88 Total size: 327.9 KB Directory structure: gitextract_lmye4wqf/ ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── acng/ │ └── Dockerfile ├── activemq/ │ └── Dockerfile ├── adb/ │ ├── Dockerfile │ ├── adbkey │ ├── adbkey.pub │ └── update-platform-tools.sh ├── alpine/ │ └── Dockerfile ├── asuswrt-merlin-build/ │ ├── Dockerfile │ ├── README.md │ ├── VERSION │ ├── build.sh │ ├── download_merlin.sh │ └── hooks/ │ └── build ├── caddy/ │ └── Dockerfile ├── cfssl-build/ │ ├── Dockerfile │ └── build.sh ├── chrome-headless/ │ ├── Dockerfile │ └── entrypoint.sh ├── confluence/ │ ├── Dockerfile │ ├── atlassian-agent.jar │ └── hijack.sh ├── demo/ │ ├── Dockerfile │ └── index.html ├── docker-kubectl/ │ └── Dockerfile ├── elastalert/ │ └── Dockerfile ├── fpm/ │ └── Dockerfile ├── frp/ │ └── Dockerfile ├── gh-pages/ │ ├── Dockerfile │ └── Gemfile ├── html/ │ ├── Dockerfile │ └── landscape-animation-experiment/ │ ├── README.txt │ ├── css/ │ │ └── style.css │ ├── index.html │ ├── js/ │ │ └── index.js │ └── license.txt ├── jira/ │ ├── Dockerfile │ ├── atlassian-agent.jar │ └── hijack.sh ├── mattermost/ │ ├── Dockerfile │ ├── README.md │ └── entrypoint.sh ├── metricbeat/ │ ├── Dockerfile │ └── docker-entrypoint.sh ├── owncloud/ │ ├── Dockerfile │ ├── docker-entrypoint.sh │ └── opcache-recommended.ini ├── privoxy/ │ └── Dockerfile ├── puppeteer-base/ │ └── Dockerfile ├── rssbot/ │ └── Dockerfile ├── shadowsocks/ │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── runit/ │ ├── kcptun/ │ │ └── run │ └── shadowsocks/ │ └── run ├── simple-obfs/ │ └── Dockerfile ├── sniproxy/ │ ├── Dockerfile │ ├── entrypoint.sh │ └── sniproxy.conf ├── swagger-editor/ │ └── Dockerfile ├── teleport/ │ ├── Dockerfile │ └── teleport.yaml ├── time-machine/ │ ├── Dockerfile │ ├── README.md │ ├── conf/ │ │ ├── afp.conf │ │ └── afpd.service │ ├── entrypoint.sh │ └── services/ │ ├── avahi/ │ │ └── run │ └── netatalk/ │ └── run ├── tor/ │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── torrc ├── twemproxy/ │ ├── Dockerfile │ ├── config.yml │ └── entrypoint.sh ├── upsource/ │ └── Dockerfile ├── v2ray/ │ ├── Dockerfile │ ├── README.md │ └── entrypoint.sh ├── videovip/ │ ├── Dockerfile │ └── vip/ │ └── index.html ├── yearning/ │ └── Dockerfile └── zeroclipboard/ ├── Dockerfile └── Gemfile ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ gcr-registry/docker-entrypoint.sh gcr-registry/registry/*.json goflyway/goflyway mritd/vippasswd atlassian-confluence/atlassian-agent.jar atlassian-jira/atlassian-agent.jar rap2-dolores/config.prod.ts ================================================ FILE: .gitmodules ================================================ [submodule "ytpay"] path = ytpay url = git@github.com:ytpay/dockerfiles.git ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2017 mritd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ ## Dockerfile ### ⚠️⚠️⚠️ Note: Some build work of docker image is being migrated to earthly, you may not see the update commit in the current repo. ### ⚠️⚠️⚠️ Earthfile has better scalability and is more convenient for cross-compilation; the migrated dockerfle can be viewed in [https://github.com/mritd/autobuild](https://github.com/mritd/autobuild). --- This repository contains some dockerfiles of personally created docker images; it will be maintained for long periods if necessary. **The dockerfile for docker images running in a production environment has been** **moved to [ytpay/dockerfiles](https://github.com/ytpay/dockerfiles). such as `all in one build image`、`jdk`、`tomcat`;** **dockerfiles for these docker images will be maintained for a long time** ## Stargazers over time [![Stargazers over time](https://starcharts.herokuapp.com/mritd/dockerfile.svg)](https://starcharts.herokuapp.com/mritd/dockerfile) ## JetBrains Thanks to JetBrains for providing IDE support for this project, click to buy JetBrains IDE license to support the strongest IDE in the universe. [![JetBrains](jetbrains.jpeg)](https://www.jetbrains.com/) ================================================ FILE: acng/Dockerfile ================================================ FROM sameersbn/apt-cacher-ng LABEL maintainer="mritd " RUN set -ex \ && apt update \ && apt install tzdata -y \ && ln -sf /dev/stdout /var/log/apt-cacher-ng/apt-cacher.log \ && ln -sf /dev/stderr /var/log/apt-cacher-ng/apt-cacher.err \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apt autoremove -y \ && apt autoclean -y \ && rm -rf /var/lib/apt/lists/* ENTRYPOINT ["/sbin/entrypoint.sh"] CMD ["/usr/sbin/apt-cacher-ng"] ================================================ FILE: activemq/Dockerfile ================================================ FROM openjdk:11.0.2-jre-slim-stretch LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV ACTIVEMQ_VERSION 5.15.8 ENV ACTIVEMQ_MQTT 1883 ENV ACTIVEMQ_AMQP 5672 ENV ACTIVEMQ_UI 8161 ENV ACTIVEMQ_STOMP 61613 ENV ACTIVEMQ_WS 61614 ENV ACTIVEMQ_TCP 61616 ENV ACTIVEMQ_HOME /opt/activemq ENV ACTIVEMQ apache-activemq-${ACTIVEMQ_VERSION} ENV ACTIVEMQ_DOWNLOAD_URL https://archive.apache.org/dist/activemq/${ACTIVEMQ_VERSION}/${ACTIVEMQ}-bin.tar.gz ENV ACTIVEMQ_SHA512_VAL 8c9b3216a0378f6377a9ba35f23915a3a52a1c15ac7b316bc06781d6a6ba83ce775534aa0054bd1aa37fb4d285946f914dbb21a14cc485e180a0d86c834df02e RUN apt update \ && apt upgrade -y \ && apt install bash tzdata curl -y \ && curl ${ACTIVEMQ_DOWNLOAD_URL} -o ${ACTIVEMQ}-bin.tar.gz \ && if [ "${ACTIVEMQ_SHA512_VAL}" != "$(sha512sum ${ACTIVEMQ}-bin.tar.gz | awk '{print($1)}')" ]; then \ echo "sha512 values doesn't match! exiting." && exit 1; \ fi \ && tar xzf ${ACTIVEMQ}-bin.tar.gz -C /opt \ && ln -s /opt/${ACTIVEMQ} ${ACTIVEMQ_HOME} \ && useradd activemq -U -d ${ACTIVEMQ_HOME} -s /usr/sbin/nologin \ && chown -R activemq:activemq /opt/${ACTIVEMQ} \ && chown -h activemq:activemq ${ACTIVEMQ_HOME} \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apt clean USER activemq WORKDIR ${ACTIVEMQ_HOME} EXPOSE ${ACTIVEMQ_TCP} EXPOSE ${ACTIVEMQ_AMQP} EXPOSE ${ACTIVEMQ_STOMP} EXPOSE ${ACTIVEMQ_MQTT} EXPOSE ${ACTIVEMQ_WS} EXPOSE ${ACTIVEMQ_UI} CMD ["/bin/bash", "-c", "bin/activemq console"] ================================================ FILE: adb/Dockerfile ================================================ FROM frolvlad/alpine-glibc:alpine-3.9_glibc-2.29 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV PATH ${PATH}:/opt/platform-tools COPY update-platform-tools.sh /update-platform-tools.sh COPY adbkey* /root/.android/ RUN apk upgrade --update \ && apk add bash tzdata wget ca-certificates xmlstarlet \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && /update-platform-tools.sh \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* EXPOSE 5037 CMD ["adb", "-a", "-P", "5037", "server", "nodaemon"] ================================================ FILE: adb/adbkey ================================================ -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9KB09pq3GE2ZJ q8UDXdZ0A4QLxta/35KImWs8BEIx2fXd/wV+UKk36crDpafaDNRa3mw86O40M2xi +/zTmxzBWTxZCwdJvNJLX3BJe97QYeFzQd6z+sLdanMYfXhcv6aJ1F3v+xAHKlfQ 0fObTxZL1Hu/wkmKgkFABcoN5izEU2z4ezspsmwDFRGzID9FJMJUrcnPVbIyEfaG lUOOun/quZSxv13uHFvB91Ut/qvNI5AGNi4bAuBfDLl73pXy7JVJt+6fRmlpHvYy uElybWA0hLZPzg6G5yZxzY4YtspbNFGWCbcAB7Bw0WE8qXkcSXCAQPb30GRKPcIn 7IGHTjj9AgMBAAECggEAIHKDIZI3+ORXBYgrkXpFUT1RJ1wCdoN5dWkr1e29FSQY Yu5eGN7pSCgYmhsOgf71ZmkFFCW4xseTbh9frUTMV/Zgvb2AkIuNX1SNxG01OXWw 4L6J30HCr4yVFbxgKzjM7pO5UXM1uqTuz2lUam3Um0lluO0xBEt3ue3ETUIQp9SD tsaMZ97H6OgtMjrlsAPpaJia/Ix+1KZuXm7MoXCrz15K5BdQ672r/vo89JZUMsOF P9rKySQoOfdpBqNk9C6NchfOQfXhuvJvRW0NJf+4xd1aQIt01N79Y4xJwjmZGYsk znyJ/WWYk6iC9dFOdv3a5cjGfWhb0nqCuHOeEHO72QKBgQD3b20vuN1hv5QrKmVf HSjEtlutNdedIPB6t3DLgYf1sIZkpgITjYcqP4/wvzx4LfcvDY+xb+vMXXM0HsRb omp7/hZxNAg65aylWNfDT14Nm54DgX65KwfrWDu/joqbbyvZ08MFuymoinySs6i6 3fwcZKhtRqQvKcg+ZlFokWSQMwKBgQDDtEUIyilYmwlBYlWuCZ2u6F/IxfRrHjf1 e1DZglnoF3KrXnESvp3aR1/Jv4YFUx9DED2tQ8apqMxTLHPSNjH/DDSts0ofAoJX mD3UW1Tw6nd6hDeeldpaDqV/U/HnKj1tStSYW7GsDjPdX/ZOp66z/C2kGmeFz5Dz OY9KPLEiDwKBgQDZpYLGencJF0pO2eEHVA/bUIi9iGHbTfEaEKe/6nVccOUWPUwQ ROqDCBwl6SFYmR4Xnncp3cftILpIO1P/QpMl8+9rrhgbLpG5c7d+jh6uG5dXgB2m 5Sn3IsqTid90L8rDtViTfvl6zi4boLqnfMHZe9UHIh8jeT4xXTD4qQNrLwKBgEAZ FhdH53ze4owowflLqvqzn1OqCmDfN+LOLe/fssTCkUsxloVWK2tnvybb9PBfhji3 5AuQzEubPrjrMVAjcgKgI8zUkS1Q7BH2iiG4fDyf/twA3Bqz6B1g+LGYc/2LpyzZ uoHgXnQE/tW97XVblGvc57H89/Uqw8X2D0l4UWffAoGAJdX9hO0JP32lxa7rOMIu lLGb7Wg45gO5w53KTj5HSs5mgbIBt8fc7xBYI1wztNjkEvghu3PO5VNMygD2llO4 U74ya7vR5G5vx559QZmwzKropAo+yqogs/nDafjYZq1yI1VsUOL4J351jZZcH5WQ 1xT73Cm28XHkPGbqzUX+b74= -----END PRIVATE KEY----- ================================================ FILE: adb/adbkey.pub ================================================ QAAAAKtb1vH9OE6Hgewnwj1KZND39kCAcEkceak8YdFwsAcAtwmWUTRbyrYYjs1xJueGDs5PtoQ0YG1ySbgy9h5paUaf7rdJlezyld57uQxf4AIbLjYGkCPNq/4tVffBWxzuXb+xlLnqf7qOQ5WG9hEyslXPya1UwiRFPyCzERUDbLIpO3v4bFPELOYNygVAQYKKScK/e9RLFk+b89HQVyoHEPvvXdSJpr9ceH0Yc2rdwvqz3kFz4WHQ3ntJcF9L0rxJBwtZPFnBHJvT/PtibDM07ug8bN5a1Azap6XDyuk3qVB+Bf/d9dkxQgQ8a5mIkt+/1sYLhAN01l0DxatJZhPGraY9HSi9BbT4hrBiWiv55ef0MZYgLZp3lDdd/3bQAntfnZZP43f4ekZvpxjb3glF8E9MXcmrlAS0ONRPAvisGjcNG/U3vlL2ZH74fpolt19gOxM28S0WWRd0+35YihYXBBfIFSof54VJbwIbsH+wImNkRzJG+06X05D/SkXH9fw1CSG8HYxR3a2//HqUuM+HogS4keBceJvI8DhQyQK973IcG9DKlYkDftSTszdXLtyoetqyZ7YHI+6itfGMoRWHHKFTP+JOkNBndCgfCGcvLhmvh1MrOhDINqI+N8iErA15Ct3Ghg/+44+5QVIcKE2m3N95POuhhtkMVDKCuGFA5aGufZsoAgEAAQA= @unknown ================================================ FILE: adb/update-platform-tools.sh ================================================ #!/usr/bin/env bash set -e PLATFORM="linux" REPO="https://dl.google.com/android/repository" REPOXML="${REPO}/repository-11.xml" fetch_repository_xml() { echo "Fetching ${REPOXML}" >&2 wget -q -O - "$REPOXML" } parse_repository_xml() { echo "Parsing repository" >&2 xmlstarlet sel -t -c "//sdk:platform-tool/sdk:archives/sdk:archive[contains(sdk:host-os,'linux')]" | xmlstarlet sel -t -v "//sdk:checksum | //sdk:url" } install_platform_tools() { local SHA="$1" local FILE_NAME="$2" local TMPFILE=$(mktemp) mkdir -p /opt echo "Fetching ${URL}" >&2 wget -O "$TMPFILE" "${REPO}/${FILE_NAME}" echo "Verifying sha1 checksum ${SHA}" >&2 echo "$SHA $TMPFILE" | sha1sum -sc echo "Removing previous version of platform tools if any" >&2 rm -rf /opt/platform-tools echo "Unpacking platform tools" >&2 unzip -d /opt "$TMPFILE" rm "$TMPFILE" echo "Platform tools installed!" >&2 } install_platform_tools $(fetch_repository_xml | parse_repository_xml) ================================================ FILE: alpine/Dockerfile ================================================ FROM alpine:3.13 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN apk upgrade \ && apk add bash tzdata bind-tools busybox-extras ca-certificates libc6-compat wget curl \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* CMD ["/bin/bash"] ================================================ FILE: asuswrt-merlin-build/Dockerfile ================================================ FROM ubuntu:16.04 LABEL maintainer="mritd " ARG BUILD_DATE ARG VCS_REF ARG VERSION LABEL org.label-schema.build-date=$BUILD_DATE \ org.label-schema.name="Asuswrt Merlin Build" \ org.label-schema.description="Asuswrt Merlin 固件交叉编译环境" \ org.label-schema.url="https://mritd.me" \ org.label-schema.vcs-ref=$VCS_REF \ org.label-schema.vcs-url="https://github.com/mritd/dockerfile/tree/master/asuswrt-merlin-build" \ org.label-schema.vendor="mritd" \ org.label-schema.version=$VERSION \ org.label-schema.schema-version="1.0" ENV ASUSWRT_MERLIN_VERSION 380.66 COPY build.sh /root/build.sh COPY download_merlin.sh /root/download_merlin.sh RUN dpkg --add-architecture i386 \ && apt-get update -y \ && apt-get install -y \ sudo net-tools cron e2fsprogs wget vim openssl curl psmisc git \ heirloom-mailx autoconf automake bison bzip2 bsdtar diffutils \ sed file flex g++ gawk gcc-multilib gettext gperf groff-base \ zsh libncurses-dev libexpat1-dev libslang2 libssl-dev libtool \ libxml-parser-perl make patch perl pkg-config python shtool tar \ texinfo unzip zlib1g zlib1g-dev intltool autopoint libltdl7-dev \ lib32z1-dev lib32stdc++6 automake1.11 libelf-dev:i386 libelf1:i386 \ && apt-get autoremove -y \ && apt-get autoclean -y \ && rm -rf /var/lib/apt/lists/* \ && ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo "Asia/Shanghai" > /etc/timezone \ && git clone https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh \ && cp ~/.oh-my-zsh/templates/zshrc.zsh-template ~/.zshrc \ && chsh -s /bin/zsh \ && echo ". ~/build.sh" >> /root/.zshrc CMD ["zsh"] ================================================ FILE: asuswrt-merlin-build/README.md ================================================ ## Asuswrt Merlin 固件交叉编译环境 [![](https://images.microbadger.com/badges/image/mritd/asuswrt-merlin-build.svg)](https://microbadger.com/images/mritd/asuswrt-merlin-build "Get your own image badge on microbadger.com") [![](https://images.microbadger.com/badges/version/mritd/asuswrt-merlin-build.svg)](https://microbadger.com/images/mritd/asuswrt-merlin-build "Get your own version badge on microbadger.com") > 本镜像基于 ubuntu 16.04 制作,参考了 `koolshare/koolshare-merlin-debian` 镜像(感谢原作者[Clang](https://github.com/clangcn));本镜像默认安装了大部分编译所需依赖包,**但尚未打包 Merlin 固件源码(打包后镜像体积 2G),使用时需要先从 [Merlin Release](https://github.com/RMerl/asuswrt-merlin/releases) 下载源码,并挂载到 `/home/asuswrt-merlin` 目录(如不想挂载,镜像内也提供了下载脚本),编译前请先执行 `/root/build.sh` 初始化相关环境变量(默认已经执行)** ### 1、下载源码 编译能够在 Merlin、Tomato 固件上运行的程序之前,需要先获取 Merlin 固件源码(需要其交叉编译工具链),下载地址可从 [Merlin Release](https://github.com/RMerl/asuswrt-merlin/releases) 获取 ``` sh export ASUSWRT_MERLIN_VERSION=380.66 wget https://github.com/RMerl/asuswrt-merlin/archive/${ASUSWRT_MERLIN_VERSION}.tar.gz tar -zxf ${ASUSWRT_MERLIN_VERSION}.tar.gz mv asuswrt-merlin-${ASUSWRT_MERLIN_VERSION} asuswrt-merlin ``` **如果 tar 命令解压出现 `Directory renamed before its status could be extracted` 错误,请安装 `bsdtar` 命令,Ubunut 下执行 `sudo apt-get install -y bsdtar`;然后使用 `bsdtar` 解压,用法同 `tar` 命令** ### 2、运行编译环境 准备好 Merlin 源码后,只需要将其挂载到 `/home/asuswrt-merlin` 目录(当然可能你需要同时挂载你要编译程序的源码目录),并运行容器即可 ``` docker run -dt --name build -v /data/asuswrt-merlin:/home/asuswrt-merlin -v /data/curl-7.54.0:/root/curl-7.54.0 mritd/asuswrt-merlin-build ``` **`/data/asuswrt-merlin` 为刚刚下载的 Merlin 固件源码目录,`/data/curl-7.54.0` 为要编译的程序源码目录** ### 3、进入容器编译 容器运行后,可以通过 `docker ps` 查看其运行状态,并通过 `docker exec` 命令进入容器,**容器内默认已经安装了 `oh-my-zsh`,可以做直接已 `zsh` 进入** ``` sh docker exec -it build zsh ``` 如果不习惯 `zsh` 也可以使用 `bash` 进入容器,只需替换命令即可;交叉编译时请确保 C 编译器为 `arm-linux-gcc`,即可以声明变量 `export CC=/home/asuswrt-merlin/release/src-rt-6.x.4708/toolchains/hndtools-arm-linux-2.6.36-uclibc-4.5.3/bin/arm-linux-gcc` ================================================ FILE: asuswrt-merlin-build/VERSION ================================================ 1.0.0 ================================================ FILE: asuswrt-merlin-build/build.sh ================================================ #!/bin/bash fun_set_text_color(){ COLOR_RED='\E[1;31m' COLOR_GREEN='\E[1;32m' COLOR_YELOW='\E[1;33m' COLOR_BLUE='\E[1;34m' COLOR_PINK='\E[1;35m' COLOR_PINKBACK_WHITEFONT='\033[45;37m' COLOR_GREEN_LIGHTNING='\033[32m \033[05m' COLOR_END='\E[0m' } main(){ echo -e "${COLOR_YELOW}============== Initialized build environment ==============${COLOR_END}" if [ -d "/home/asuswrt-merlin/tools/brcm" ] && [ -d "/home/asuswrt-merlin/release/src-rt-6.x.4708/toolchains/hndtools-arm-linux-2.6.36-uclibc-4.5.3" ]; then if [ ! -L /opt/brcm-arm ] || [ ! -L /opt/brcm ]; then echo -e -n "${COLOR_PINK}link brcm & brcm-arm${COLOR_END}" ln -s /home/asuswrt-merlin/tools/brcm /opt/brcm ln -s /home/asuswrt-merlin/release/src-rt-6.x.4708/toolchains/hndtools-arm-linux-2.6.36-uclibc-4.5.3 /opt/brcm-arm if [ -L /opt/brcm-arm ] && [ -L /opt/brcm ];then echo -e " ${COLOR_GREEN}done${COLOR_END}" else echo -e " ${COLOR_RED}failed${COLOR_END}" return 1 fi fi else echo -e "${COLOR_RED}[error] /home/asuswrt-merlin/ not found${COLOR_END}" return 1 fi echo -e -n "${COLOR_PINK}setting Environment...${COLOR_END}" CROSS_TOOLCHAINS_DIR=/opt/brcm-arm export PATH=$PATH:/opt/brcm/hndtools-mipsel-linux/bin:/opt/brcm/hndtools-mipsel-uclibc/bin:/opt/brcm-arm/bin export LD_LIBRARY_PATH=$CROSS_TOOLCHAINS_DIR/lib echo -e " ${COLOR_GREEN}done${COLOR_END}" #echo "$PATH" } fun_set_text_color main ================================================ FILE: asuswrt-merlin-build/download_merlin.sh ================================================ #!/bin/bash ASUSWRT_MERLIN_VERSION=380.66 wget https://github.com/RMerl/asuswrt-merlin/archive/${ASUSWRT_MERLIN_VERSION}.tar.gz bsdtar -zxf ${ASUSWRT_MERLIN_VERSION}.tar.gz mv asuswrt-merlin-${ASUSWRT_MERLIN_VERSION} /home/asuswrt-merlin ================================================ FILE: asuswrt-merlin-build/hooks/build ================================================ #!/bin/bash echo "Build hook running..." IMAGE_VERSION=`cat VERSION` docker build --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` \ --build-arg VCS_REF=`git rev-parse --short HEAD` \ --build-arg VERSION=$IMAGE_VERSION \ -t $IMAGE_NAME . ================================================ FILE: caddy/Dockerfile ================================================ FROM golang:1.16-alpine3.13 AS builder RUN set -e \ && apk upgrade \ && apk add jq curl git \ && export version=$(curl -s "https://api.github.com/repos/caddyserver/caddy/releases/latest" | jq -r .tag_name) \ && echo ">>>>>>>>>>>>>>> ${version} ###############" \ && go get -u github.com/caddyserver/xcaddy/cmd/xcaddy \ && xcaddy build ${version} --output /caddy \ --with github.com/caddy-dns/route53 \ --with github.com/caddy-dns/cloudflare \ --with github.com/caddy-dns/alidns \ --with github.com/caddy-dns/dnspod \ --with github.com/caddy-dns/gandi \ --with github.com/abiosoft/caddy-exec \ --with github.com/greenpau/caddy-trace \ --with github.com/hairyhenderson/caddy-teapot-module \ --with github.com/kirsch33/realip \ --with github.com/porech/caddy-maxmind-geolocation \ --with github.com/caddyserver/format-encoder \ --with github.com/caddyserver/replace-response \ --with github.com/imgk/caddy-trojan FROM alpine:3.13 AS dist LABEL maintainer="mritd " # See https://caddyserver.com/docs/conventions#file-locations for details ENV XDG_CONFIG_HOME /config ENV XDG_DATA_HOME /data ENV TZ Asia/Shanghai COPY --from=builder /caddy /usr/bin/caddy ADD https://raw.githubusercontent.com/caddyserver/dist/master/config/Caddyfile /etc/caddy/Caddyfile ADD https://raw.githubusercontent.com/caddyserver/dist/master/welcome/index.html /usr/share/caddy/index.html # set up nsswitch.conf for Go's "netgo" implementation # - https://github.com/golang/go/blob/go1.9.1/src/net/conf.go#L194-L275 # - docker run --rm debian:stretch grep '^hosts:' /etc/nsswitch.conf RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf RUN set -e \ && apk upgrade \ && apk add bash tzdata mailcap \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* VOLUME /config VOLUME /data EXPOSE 80 EXPOSE 443 EXPOSE 2019 WORKDIR /srv CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"] ================================================ FILE: cfssl-build/Dockerfile ================================================ FROM golang:1.15-stretch LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN set -ex \ && apt update \ && apt upgrade -y \ && apt install tzdata ruby ruby-dev rubygems fakeroot build-essential -y \ && gem install --no-document fpm \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/lib/apt/lists/* COPY build.sh /go/build.sh VOLUME /dist CMD ["/bin/bash"] ================================================ FILE: cfssl-build/build.sh ================================================ #!/usr/bin/env bash set -ex CGO_ENABLE=0 SOURCE_DIR='/go/src/github.com/cloudflare/cfssl' git clone https://github.com/cloudflare/cfssl.git ${SOURCE_DIR} cd ${SOURCE_DIR} && make package-deb && mv *.deb /dist/ ================================================ FILE: chrome-headless/Dockerfile ================================================ FROM ubuntu:18.04 MAINTAINER mritd ENV TZ 'Asia/Shanghai' ENV CHROME_VERSION 67.0.3396.79 ENV CHROME_APT "deb https://dl.google.com/linux/chrome/deb/ stable main" RUN apt update -y && apt upgrade -y \ && apt install wget tzdata libnss3 libnss3-tools libfontconfig1 \ gnupg2 ca-certificates apt-transport-https inotify-tools -y \ && echo ${CHROME_APT} > /etc/apt/sources.list.d/google-chrome.list \ && wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add - \ && apt update -y && apt upgrade -y \ && apt install google-chrome-stable=${CHROME_VERSION}-1 -y \ && apt autoremove -y && apt autoclean -y \ && mkdir /data \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/lib/apt/lists/* /var/cache/apt/* COPY entrypoint.sh / EXPOSE 9222 VOLUME /data ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: chrome-headless/entrypoint.sh ================================================ #!/bin/bash set -e google-chrome-stable \ --disable-gpu \ --headless \ --no-sandbox \ --remote-debugging-address=0.0.0.0 \ --remote-debugging-port=9222 \ --user-data-dir=/data $@ ================================================ FILE: confluence/Dockerfile ================================================ FROM atlassian/confluence-server:7.4.1 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV AGENT_PATH /opt/atlassian-agent.jar COPY atlassian-agent.jar ${AGENT_PATH} COPY hijack.sh /hijack.sh RUN set -x \ && export DEBIAN_FRONTEND=noninteractive \ && apt update \ && apt upgrade -y \ && apt install tzdata -y \ && chown ${RUN_USER}:${RUN_GROUP} ${AGENT_PATH} \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && dpkg-reconfigure --frontend noninteractive tzdata \ && apt autoremove -y \ && apt autoclean -y CMD ["/hijack.sh"] ================================================ FILE: confluence/hijack.sh ================================================ #!/bin/bash export JAVA_OPTS="${JAVA_OPTS} -javaagent:${AGENT_PATH}" # If you want to use SECURE_SMTP, mount the /opt/atlassian/confluence/conf directory # and follow the link documentation to modify the configuration. # refs https://confluence.atlassian.com/doc/setting-up-a-mail-session-for-the-confluence-distribution-6328.html if [ "${JNDI_EMAIL}" == "true" ]; then mv ${CONFLUENCE_INSTALL_DIR}/confluence/WEB-INF/lib/javax.mail-*.jar ${CONFLUENCE_INSTALL_DIR}/lib/ fi /entrypoint.py -fg ================================================ FILE: demo/Dockerfile ================================================ FROM nginx:1.19-alpine LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} RUN apk upgrade --update \ && apk add bash tzdata curl wget ca-certificates \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* COPY index.html /usr/share/nginx/html/index.html COPY docker.png /usr/share/nginx/html/docker.png EXPOSE 80 443 CMD ["nginx", "-g", "daemon off;"] ================================================ FILE: demo/index.html ================================================ Running!

Your container is running!

docker
================================================ FILE: docker-kubectl/Dockerfile ================================================ FROM docker:20 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV KUBE_VERSION v1.21.1 ENV KUBECTL_DOWNLOAD_URL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl RUN apk upgrade --update \ && apk add bash curl tzdata wget ca-certificates git \ && wget -q ${KUBECTL_DOWNLOAD_URL} -O /usr/local/bin/kubectl \ && chmod +x /usr/local/bin/kubectl \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* CMD ["/bin/bash"] ================================================ FILE: elastalert/Dockerfile ================================================ FROM python:3.6.10-alpine3.11 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV VERSION v0.2.4 ENV SOURCE_DIR /usr/local/elastalert ENV CONFIG_DIR /etc/elastalert ENV DOWNLOAD_URL https://github.com/Yelp/elastalert/archive/${VERSION}.tar.gz RUN set -ex \ && apk upgrade \ && apk add bash wget curl ca-certificates tzdata openssl libmagic \ && apk add --virtual .build-deps tar openssl-dev libffi-dev gcc musl-dev \ && mkdir -p ${SOURCE_DIR} ${CONFIG_DIR} \ && wget ${DOWNLOAD_URL} -O elastalert.tar.gz \ && tar -zxf elastalert.tar.gz -C ${SOURCE_DIR} --strip-components 1 \ && (cd ${SOURCE_DIR} \ && pip install -r requirements.txt \ && python setup.py install) \ && cp ${SOURCE_DIR}/config.yaml.example ${CONFIG_DIR}/config.yaml \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del .build-deps \ && rm -rf ~/.cache elastalert.tar.gz ${SOURCE_DIR} /var/cache/apk/* ENTRYPOINT ["elastalert"] CMD ["--verbose","--config","/etc/elastalert/config.yaml"] ================================================ FILE: fpm/Dockerfile ================================================ FROM ruby:2.7.2-alpine3.12 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN apk upgrade --update \ && apk add bash tzdata make gcc libc-dev tar git \ && git clone https://github.com/jordansissel/fpm.git \ && (cd fpm && make install) \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf fpm /var/cache/apk/* CMD ["/bin/bash"] ================================================ FILE: frp/Dockerfile ================================================ FROM alpine:3.11 LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} ENV VERSION 0.30.0 ENV DOWNLOAD_URL https://github.com/fatedier/frp/releases/download/v${VERSION}/frp_${VERSION}_linux_amd64.tar.gz RUN apk upgrade --update \ && apk add bash tzdata curl \ && curl -sSLO ${DOWNLOAD_URL} \ && tar -zxf frp_${VERSION}_linux_amd64.tar.gz \ && rm -f frp_${VERSION}_linux_amd64/LICENSE \ && mv frp_${VERSION}_linux_amd64/*.ini /etc \ && mv frp_${VERSION}_linux_amd64/* /usr/bin \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo "${TZ}" > /etc/timezone \ && apk del curl \ && rm -rf frp_${VERSION}_linux_amd64.tar.gz \ frp_${VERSION}_linux_amd64 \ /var/cache/apk/* CMD ["frps","-c","/etc/frps.ini"] ================================================ FILE: gh-pages/Dockerfile ================================================ FROM alpine:3.13 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} COPY Gemfile Gemfile RUN apk upgrade --update \ && apk add bash build-base libffi zlib libxml2 \ libxslt ruby ruby-io-console ruby-json yaml \ nodejs git perl tzdata \ && apk add --virtual .build-deps \ build-base libffi-dev zlib-dev libxml2-dev \ libxslt-dev ruby-dev \ && echo 'gem: --no-document' >> ~/.gemrc \ && cp ~/.gemrc /etc/gemrc \ && chmod uog+r /etc/gemrc \ && gem install bundler \ && bundle config build.jekyll --no-rdoc \ && bundle install \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del .build-deps \ && rm -rf /Gemfile* \ /var/cache/apk/* \ /usr/lib/lib/ruby/gems/*/cache/* \ ~/.gem WORKDIR /root CMD ["/bin/bash"] ================================================ FILE: gh-pages/Gemfile ================================================ source 'https://rubygems.org' require 'json' require 'open-uri' versions = JSON.parse(open('https://pages.github.com/versions.json').read) gem 'jekyll', versions['jekyll'] gem 'jekyll-sass-converter', versions['jekyll-sass-converter'] gem 'kramdown', versions['kramdown'] gem 'liquid', versions['liquid'] gem 'rouge', versions['rouge'] gem 'jemoji', versions['jemoji'] gem 'jekyll-mentions', versions['jekyll-mentions'] gem 'jekyll-redirect-from', versions['jekyll-redirect-from'] gem 'jekyll-sitemap', versions['jmekyll-sitemap'] gem 'jekyll-feed', versions['jekyll-feed'] gem 'jekyll-gist', versions['jekyll-gist'] gem 'jekyll-paginate', versions['jekyll-paginate'] gem 'github-pages-health-check', versions['github-pages-health-check'] gem 'jekyll-coffeescript', versions['jekyll-coffeescript'] gem 'jekyll-seo-tag', versions['jekyll-seo-tag'] gem 'github-pages', versions['github-pages'] gem 'jekyll-github-metadata', versions['jekyll-github-metadata'] gem 'html-pipeline', versions['html-pipeline'] gem 'listen', versions['listen'] gem 'sass', versions['sass'] gem 'safe_yaml', versions['safe_yaml'] gem 'html-proofer' ================================================ FILE: html/Dockerfile ================================================ FROM nginx:1.19.0-alpine LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} RUN apk upgrade --update \ && apk add bash tzdata curl wget ca-certificates \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /usr/share/nginx/html /var/cache/apk/* COPY landscape-animation-experiment /usr/share/nginx/html EXPOSE 80 443 CMD ["nginx", "-g", "daemon off;"] ================================================ FILE: html/landscape-animation-experiment/README.txt ================================================ A Pen created at CodePen.io. You can find this one at https://codepen.io/mritd/pen/BvMadM. Best in full view - http://louie.co.nz/25th_hour/ She's a resource hungry beast, downscale your browser window if the frame rate is going funky. ================================================ FILE: html/landscape-animation-experiment/css/style.css ================================================ @import url(https://fonts.googleapis.com/css?family=Open+Sans:300); /* Global Styles ---------------------------------------------- */ html {font-family: 'Open Sans', sans-serif; background: #0D133A; height:100%; font-size: 100%; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; -webkit-font-smoothing: antialiased;overflow: hidden;position: relative;} body {height:100%;overflow: hidden; margin: 0; font-size: 1em; line-height: 1.4; position: relative;} img { border: 0; -ms-interpolation-mode: bicubic; vertical-align: middle; } svg:not(:root) { overflow: hidden; } a{color: white;text-decoration: none;} /* Animation globals ---------------------------------------------- */ #landscape, .land, #bottom, .stags,.stag, .counter:before, #lensFlare, .sunMask{ -webkit-animation-duration: 60s; -webkit-animation-iteration-count: infinite; -webkit-animation-timing-function: linear; } /* Background gradients ---------------------------------------------- */ #sky{ height: 60%; margin-bottom: -6px; position: absolute; top: 0px; z-index: 2; } #sky-rect{height:100%;} #reflection, #sunMask{ height: 40%; position: absolute; top: 60%; z-index: 4; } #reflection-rect{height:100%;} #sunMask{ background: #0D133A; height: 40%; position: absolute; top: 60%; width: 100%; } /* Stag ---------------------------------------------- */ #stag{ position: absolute; bottom: 15.3%; width: 6.3%; left: 38%; z-index: 5; } .stag{ width:100%; position: absolute; bottom: 0px; } .stags{ -webkit-animation-name: stags; } @-webkit-keyframes stags{ 0% { fill:#0D141E;} 4% { fill:#101522;} 8% { fill:#121726;} 12% { fill:#141829;} 16% { fill:#1C1E3C;} 20% { fill:#22214F;} 24% { fill:#262262;} 28% { fill:#1D4065;} 32% { fill:#125768;} 36% { fill:#1E4553;} 40% { fill:#1E404E;} 44% { fill:#1E3B49;} 48% { fill:#1D3643;} 52% { fill:#1C313E;} 56% { fill:#1C3344;} 60% { fill:#1C3449;} 64% { fill:#1B344F;} 68% { fill:#183454;} 72% { fill:#242B4A;} 76% { fill:#2B2241;} 80% { fill:#24203C;} 84% { fill:#1D1D37;} 88% { fill:#151A32;} 92% { fill:#14192C;} 96% { fill:#111725;} 100% { fill:#0D141E;} } #stag1{ -webkit-animation-name: stag_one; width: 108%; left: -20%; } @-webkit-keyframes stag_one{ 0% {opacity:1.0;} 15% {opacity:1.0;} 20% {opacity:0.0;} 90% {opacity:0.0;} 95% {opacity:1.0;} 100% {opacity:1.0;} } #stag2{ -webkit-animation-name: stag_two; } @-webkit-keyframes stag_two{ 0% {opacity:0.0;} 17% {opacity:0.0;} 20% {opacity:1.0;} 40% {opacity:1.0;} 45% {opacity:0.0;} 65% {opacity:0.0;} 70% {opacity:1.0;} 90% {opacity:1.0;} 95% {opacity:0.0;} 100% {opacity:0.0;} } #stag3{ -webkit-animation-name: stag_three; width: 144%; left: -10%; bottom: -3%; } @-webkit-keyframes stag_three{ 0% {opacity:0.0;} 15% {opacity:0.0;} 20% {opacity:0.0;} 40% {opacity:0.0;} 45% {opacity:1.0;} 65% {opacity:1.0;} 70% {opacity:0.0;} 100% {opacity:0.0;} } /* Sun ---------------------------------------------- */ .sunMask{ position:absolute; width:100%; height:100%; -webkit-mask-image: -webkit-gradient(linear, left 50%, left 60%, from(rgba(0,0,0,1)), to(rgba(0,0,0,0))); z-index: 4; mix-blend-mode: screen; -webkit-animation-name: sunFocus; } @-webkit-keyframes sunFocus{ 0% { -webkit-filter: blur(10px);} 16% {-webkit-filter: blur(10px);} 20% {-webkit-filter: blur(10px);} 25% {-webkit-filter: blur(5px);} 30% {-webkit-filter: blur(0px);} 80% {-webkit-filter: blur(0px);} 88% {-webkit-filter: blur(5px);} 100% {-webkit-filter: blur(10px);} } .sun{ width: 100%; padding-bottom: 100%; position:absolute; right: -51%; top: -330%; } .sun div{ background: transparent url(https://s3-us-west-2.amazonaws.com/s.cdpn.io/21555/sun.svg); position: absolute; top: 0; bottom: 0; left: 0; right: 0; } .suncrane{ animation: suncrane 60s linear infinite; position:absolute; width: 21%; height: 4%; background: transparent; margin: auto; top: 57%; left: 0; right: 0; } @keyframes suncrane{ 0% {transform:rotate(90deg);} 100% {transform:rotate(-270deg);} } .sun:before{ display:block; content:' '; animation: glare 60s linear infinite; position:absolute; width: 120%; height: 120%; background: url(https://s3-us-west-2.amazonaws.com/s.cdpn.io/21555/glare.svg) no-repeat scroll center; top: -10%; left: -10%; background-size: 100%; } @keyframes glare{ from {transform:rotate(90deg);opacity:0.0;} 30%{opacity:0.0;} 36%{opacity:1.0;} 68%{opacity:1.0;} 74%{opacity:0.0;} to {transform:rotate(450deg);opacity:0.0;} } .sun:after{display: block; content:' '; position:absolute;background: white;width: 10%;height: 10%;top: 45%;border-radius: 100%;margin: auto;left: 0;right: 0;box-shadow: 0px 0px 80px 30px white;} /* Clouds ---------------------------------------------- */ .clouds{ position: absolute; width: 100%; z-index: 4; mix-blend-mode: screen; height: 100%; } .clouds svg{ width: 60%; position: absolute; top: 51%; -webkit-filter: blur(2px); opacity: 0.4; left: -60%; -webkit-animation-duration: 60s; -webkit-animation-iteration-count: infinite; -webkit-animation-timing-function: linear; -webkit-animation-name: clouds; } @-webkit-keyframes clouds{ 0% {transform: translate3d(110%, 0px, 0px);opacity: 0.0;} 19%{opacity: 0.0;-webkit-filter: blur(5px);} 25%{opacity: 0.3;-webkit-filter: blur(2px);} 50%{opacity: 0.6;} 75%{opacity: 0.2;} 90%{opacity: 0.0;} 100% {transform: translate3d(150%, 0px, 0px);opacity: 0.0} } /* Lens flare ---------------------------------------------- */ .lighting{ width: 100%; height: 100%; position: absolute; z-index: 8; opacity: 0.3; -webkit-mask-image: -webkit-gradient(linear, left 50%, left 60%, from(rgba(0,0,0,1)), to(rgba(0,0,0,1))); mix-blend-mode: screen; pointer-events: none; -webkit-filter: blur(3px); } .lighting .suncrane{ width: 100%; } #lensFlare{ transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); -webkit-animation-name: flaring; } @-webkit-keyframes flaring{ 0% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 0.0;} 28% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 0.0; } 35% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 1.0; } 70% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 1.0; } 78% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 0.0; } 100% { transform: rotate(16deg) translate3d(9%, -47%, 0px) scale(1); opacity: 0.0; } } /* Sun on lake twinkles ---------------------------------------------- */ .twinkleWrap{ position: absolute; z-index: 4; width:100%; height:100%; top:0; opacity: 0.55; } .twinkles{ width: 2.3%; position: absolute; right: 49.2%; top: 72.5%; animation: twinkles 60s linear infinite; } @-webkit-keyframes twinkles{ 0% {transform: translate(450%, 0%);opacity:0.0;} 32%{opacity:0.0;} 34%{opacity:1.0;} 36.5% {transform: translate(450%, 0%);} 54% {transform: translate(0%, 0%);} 72% {transform: translate(-450%, 0%);opacity:1.0;} 78%{opacity:0.0;} 100% {transform: translate(-450%, 0%);opacity:0.0;} } .twinkles:before{ content:' '; display: none; position:absolute; height: 600px; width:3px; background:red; top: -600px; left: 50%; } .twinkles svg{ width: 100%; position: absolute; top: 0; } #twinkle1{ animation: twinkle1 2s linear infinite; } @-webkit-keyframes twinkle1{ 0% {opacity:1.0;} 33.33% {opacity:0.0;} 66.66% {opacity:0.0;} 99.99% {opacity:1.0;} 100% {opacity:1.0;} } #twinkle2{ animation: twinkle2 2s linear infinite; } @-webkit-keyframes twinkle2{ 0% {opacity:0.0;} 33.33% {opacity:1.0;} 66.66% {opacity:0.0;} 99.99% {opacity:0.0;} 100% {opacity:0.0;} } #twinkle3{ animation: twinkle3 2s linear infinite; } @-webkit-keyframes twinkle3{ 0% {opacity:0.0;} 33.33% {opacity:0.0;} 66.66% {opacity:1.0;} 99.99% {opacity:0.0;} 100% {opacity:0.0;} } /* Vignette ---------------------------------------------- */ .vignette{ background: radial-gradient(transparent 60%, rgb(1, 14, 39) 130%); background-size: cover; height: 100%; z-index: 9; position: absolute; width: 100%; pointer-events: none; } /* Stars ---------------------------------------------- */ .stars{ height: 100%; z-index: 2; position: absolute; width: 100%; overflow: hidden; } .starWrap{ height: 60%; width: 100%; position:relative; } .starProject{ overflow: hidden; } .starReflect{ overflow: hidden; height: 40%; opacity: 0.9; top: 1%; } #stars{ position: absolute; width: 120%; border-radius: 100%; margin: auto; left: -10%; right: 0; animation: stars 120s linear infinite; transform: rotate(0deg); top: -35%; } @-webkit-keyframes stars{ 100% {transform: rotate(-360deg);} } #starReflection{ position: absolute; width: 120%; border-radius: 100%; margin: auto; left: -10%; right: 0; animation: starsReflect 120s linear infinite; transform: rotate(0deg); top:initial; bottom: -102%; } @-webkit-keyframes starsReflect{ 100% {transform: rotate(360deg);} } /* Sprites ----------------------------------------------- */ .spriteWrap{ height: 100%; width: 100%; position: absolute; z-index: 13; animation: sprites 60s linear infinite; pointer-events: none; } @-webkit-keyframes sprites{ 0% {opacity:0.8;} 20% {opacity:0.8;} 25%{opacity:0.0;} 73% {opacity:0.0;} 90% {opacity:0.8;} 100% {opacity:0.8;} } #sprites{ height: 100%; width: 100%; } /* Controls ---------------------------------------------- */ .controls{ position:absolute; top:0px; width 20%; z-index: 10; background: rgba(0, 0, 0, 0.16); height: 100%; width: 380px; padding: 23px; -webkit-transform: translate3d(-250px, 0px, 0px); -moz-transform: translate3d(-250px, 0px, 0px); -o-transform: translate3d(-250px, 0px, 0px); -ms-transform: translate3d(-250px, 0px, 0px); transform: translate3d(-250px, 0px, 0px); opacity: 0; } .controls, .controls *{ -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; -webkit-transition: all 0.25s ease; -moz-transition: all 0.25s ease; -o-transition: all 0.25s ease; transition: all 0.25s ease; } .controls:hover{ -webkit-transform: translate3d(0px, 0px, 0px); -moz-transform: translate3d(0px, 0px, 0px); -o-transform: translate3d(0px, 0px, 0px); -ms-transform: translate3d(0px, 0px, 0px); transform: translate3d(0px, 0px, 0px); opacity: 1.0; } .controls ul{ margin: 0px; padding: 0px; list-style: none; } .controls ul li{ border-bottom: 1px solid rgba(255, 255, 255, 0.09); color: white; } .controls ul li span.title{ display: inline-block; padding: 10px 0px; } .controls ul li a{ display: block; padding: 10px 0px; } .controls ul li a:hover{ padding-left: 10px; } .controls ul li a.active{} .controls ul li a.active:after{content: 'on';float: right;} .controls audio{ width: 100%; opacity: 0.2; position: relative; width: 80%; display: inline-block; top: 8px; float: right; } .controls audio:hover{opacity:1.0;} .noise{ width:100%; height:100%; background:transparent; opacity:0.03; z-index: 9; position: absolute; top: 0; pointer-events: none; } .noise.active{ background: transparent url(https://s3-us-west-2.amazonaws.com/s.cdpn.io/21555/static2.gif); opacity: 0.015; background-size: 620px; } .counter{ color: rgba(255, 255, 255, 0.12); position: absolute; width: calc(100% - 46px); bottom: 21px; text-align:justify; font-size: 0px; } .counter:before{ content: ':'; display:inline-block; position: absolute; -webkit-animation-name: timer; background: rgba(255, 255, 255, 0.03); text-align: right; left:0px; bottom: 0px; color: transparent; border-right: 1px solid rgba(255, 255, 255, 0.13); line-height: 34px; } @-webkit-keyframes timer{ 0% { width:0%; } 100% { width:100%; } } /* Landscape ---------------------------------------------- */ #landscape{ width: 100.02%; position: absolute; bottom: 11%; z-index: 5; -webkit-animation-name: focus; } @-webkit-keyframes focus{ 0% { -webkit-filter: blur(5px);} 16% {-webkit-filter: blur(2px);} 20% {-webkit-filter: blur(0px);} 80% {-webkit-filter: blur(0px);} 88% {-webkit-filter: blur(2px);} 100% {-webkit-filter: blur(5px);} } #bottom{ -webkit-animation-name: bottomFill; height: 100%; position: absolute; top: 88%; width: 100%; z-index: 5; } @-webkit-keyframes bottomFill{ 0% { background:#0D141E; } 4% { background:#101522; } 8% { background:#121726; } 12% { background:#141829; } 16% { background:#1C1E3C; } 20% { background:#22214F; } 24% { background:#262262; } 28% { background:#1D4065; } 32% { background:#125768; } 36% { background:#1E4553; } 40% { background:#1E404E; } 44% { background:#1E3B49; } 48% { background:#1D3643; } 52% { background:#1C313E; } 56% { background:#1C3344; } 60% { background:#1C3449; } 64% { background:#1B344F; } 68% { background:#183454; } 72% { background:#242B4A; } 76% { background:#2B2241; } 80% { background:#24203C; } 84% { background:#1D1D37; } 88% { background:#151A32; } 92% { background:#14192C; } 96% { background:#111725; } 100% { background:#0D141E; } } /* Land - layer 1 animation */ #landscape .layer1 { fill:#F1F2F2; -webkit-animation-name: layer1; } @-webkit-keyframes layer1{ 0% { fill:#244154; } 4% { fill:#344358; } 8% { fill:#42465D; } 12% { fill:#4F4761; } 16% { fill:#7E5773; } 20% { fill:#A3517F; } 24% { fill:#F3829F; } 28% { fill:#D4B2AF; } 32% { fill:#AEDABB; } 36% { fill:#A1D6D6; } 40% { fill:#9ED5DD; } 44% { fill:#9AD4E4; } 48% { fill:#97D3EA; } 52% { fill:#92D3F4; } 56% { fill:#95C8DA; } 60% { fill:#96BDC5; } 64% { fill:#96B3B2; } 68% { fill:#96AA9E; } 72% { fill:#AF866A; } 76% { fill:#C0633B; } 80% { fill:#9D5E51; } 84% { fill:#7B5960; } 88% { fill:#59546D; } 92% { fill:#484E64; } 96% { fill:#37475C; } 100% { fill:#244154; } } /* Land - layer 2 animation */ #landscape .layer2 { fill:#E6E7E8; -webkit-animation-name: layer2; } @-webkit-keyframes layer2{ 0% { fill:#0F2B46; } 4% { fill:#1C2D4A; } 8% { fill:#272E4E; } 12% { fill:#302F52; } 16% { fill:#663966; } 20% { fill:#913776; } 24% { fill:#D94A93; } 28% { fill:#BB94AD; } 32% { fill:#76CCCE; } 36% { fill:#6BAEC9; } 40% { fill:#62A7CA; } 44% { fill:#59A0CB; } 48% { fill:#5099CC; } 52% { fill:#4692CF; } 56% { fill:#4D8FBD; } 60% { fill:#518CAF; } 64% { fill:#548AA1; } 68% { fill:#578793; } 72% { fill:#7E6768; } 76% { fill:#8F4244; } 80% { fill:#74404D; } 84% { fill:#593D55; } 88% { fill:#393B5D; } 92% { fill:#2C3655; } 96% { fill:#1F304E; } 100% { fill:#0F2B46; } } /* Land - layer 3 animation */ #landscape .layer3 { fill:#D1D3D4; -webkit-animation-name: layer3; } @-webkit-keyframes layer3{ 0% { fill:#0F2944; } 4% { fill:#1B2B47; } 8% { fill:#252C4B; } 12% { fill:#2E2D4E; } 16% { fill:#5F3663; } 20% { fill:#863572; } 24% { fill:#C8458D; } 28% { fill:#A48BAB; } 32% { fill:#68BFC7; } 36% { fill:#54A4C8; } 40% { fill:#4F9EC8; } 44% { fill:#4B98C7; } 48% { fill:#4691C7; } 52% { fill:#408BC8; } 56% { fill:#4688B7; } 60% { fill:#4984A9; } 64% { fill:#4C819C; } 68% { fill:#4E7E8F; } 72% { fill:#776167; } 76% { fill:#893E45; } 80% { fill:#6F3C4C; } 84% { fill:#543A52; } 88% { fill:#353758; } 92% { fill:#2A3351; } 96% { fill:#1E2E4A; } 100% { fill:#0F2944; } } /* Land - layer 4 animation */ #landscape .layer4 { fill:#BCBEC0; -webkit-animation-name: layer4; } @-webkit-keyframes layer4{ 0% { fill:#0F2841; } 4% { fill:#1A2945; } 8% { fill:#232A48; } 12% { fill:#2B2A4B; } 16% { fill:#59335F; } 20% { fill:#7C336D; } 24% { fill:#B84089; } 28% { fill:#9683A5; } 32% { fill:#57B5C1; } 36% { fill:#4798BD; } 40% { fill:#4391BC; } 44% { fill:#408BBB; } 48% { fill:#3D85BA; } 52% { fill:#397FBA; } 56% { fill:#3E7DAC; } 60% { fill:#407AA1; } 64% { fill:#427896; } 68% { fill:#44768B; } 72% { fill:#705B66; } 76% { fill:#823B46; } 80% { fill:#69394B; } 84% { fill:#503650; } 88% { fill:#323454; } 92% { fill:#27304D; } 96% { fill:#1C2C47; } 100% { fill:#0F2841; } } /* Land - layer 5 animation */ #landscape .layer5 { fill:#A7A9AC; -webkit-animation-name: layer5; } @-webkit-keyframes layer5{ 0% { fill:#0E263F; } 4% { fill:#192742; } 8% { fill:#212745; } 12% { fill:#292848; } 16% { fill:#51305B; } 20% { fill:#6E3068; } 24% { fill:#A23B82; } 28% { fill:#84799F; } 32% { fill:#44AABC; } 36% { fill:#3A8DB3; } 40% { fill:#3887B1; } 44% { fill:#3680AF; } 48% { fill:#347AAD; } 52% { fill:#3273AB; } 56% { fill:#3672A1; } 60% { fill:#387198; } 64% { fill:#396F90; } 68% { fill:#3A6E87; } 72% { fill:#695565; } 76% { fill:#7C3747; } 80% { fill:#64354A; } 84% { fill:#4B334D; } 88% { fill:#2E314F; } 92% { fill:#252D4A; } 96% { fill:#1B2A44; } 100% { fill:#0E263F; } } /* Land - layer 6 animation */ #landscape .layer6 { fill:#939598; -webkit-animation-name: layer6; } @-webkit-keyframes layer6{ 0% { fill:#0E243C; } 4% { fill:#18253F; } 8% { fill:#202542; } 12% { fill:#262544; } 16% { fill:#482C56; } 20% { fill:#612D63; } 24% { fill:#8D357C; } 28% { fill:#6F719B; } 32% { fill:#00A1BC; } 36% { fill:#2583A8; } 40% { fill:#287CA3; } 44% { fill:#2A759F; } 48% { fill:#2D6F9A; } 52% { fill:#2E6895; } 56% { fill:#2F6891; } 60% { fill:#30678D; } 64% { fill:#306788; } 68% { fill:#306683; } 72% { fill:#625064; } 76% { fill:#753347; } 80% { fill:#5E3249; } 84% { fill:#47304A; } 88% { fill:#2B2E4B; } 92% { fill:#222A46; } 96% { fill:#192741; } 100% { fill:#0E243C; } } /* Land - layer 7 animation */ #landscape .layer7 { fill:#808285; -webkit-animation-name: layer7; } @-webkit-keyframes layer7{ 0% { fill:#102237; } 4% { fill:#18223A; } 8% { fill:#1E233D; } 12% { fill:#242340; } 16% { fill:#402952; } 20% { fill:#542A5E; } 24% { fill:#793177; } 28% { fill:#5F668F; } 32% { fill:#0A8FA7; } 36% { fill:#237595; } 40% { fill:#256F90; } 44% { fill:#27698C; } 48% { fill:#286387; } 52% { fill:#285D82; } 56% { fill:#285E82; } 60% { fill:#285F81; } 64% { fill:#275F81; } 68% { fill:#255F7F; } 72% { fill:#574A63; } 76% { fill:#683148; } 80% { fill:#542F48; } 84% { fill:#3F2D47; } 88% { fill:#242742; } 92% { fill:#202841; } 96% { fill:#19253C; } 100% { fill:#102237; } } /* Land - layer 8 animation */ #landscape .layer8 { fill:#6D6E71; -webkit-animation-name: layer8; } @-webkit-keyframes layer8{ 0% { fill:#111F31; } 4% { fill:#172034; } 8% { fill:#1C2037; } 12% { fill:#20213B; } 16% { fill:#37274C; } 20% { fill:#472759; } 24% { fill:#662C71; } 28% { fill:#4F5C83; } 32% { fill:#118095; } 36% { fill:#206983; } 40% { fill:#21637E; } 44% { fill:#225D7A; } 48% { fill:#225775; } 52% { fill:#225270; } 56% { fill:#235372; } 60% { fill:#235574; } 64% { fill:#235675; } 68% { fill:#235676; } 72% { fill:#4C445F; } 76% { fill:#5B2F49; } 80% { fill:#4A2C47; } 84% { fill:#382A44; } 88% { fill:#242742; } 92% { fill:#1E253D; } 96% { fill:#182338; } 100% { fill:#111F31; } } /* Land - layer 9 animation */ #landscape .layer9 { fill:#58595B; -webkit-animation-name: layer9; } @-webkit-keyframes layer9{ 0% { fill:#111C2B; } 4% { fill:#151D2E; } 8% { fill:#191E32; } 12% { fill:#1D1E35; } 16% { fill:#2F2447; } 20% { fill:#3A2454; } 24% { fill:#52296C; } 28% { fill:#405279; } 32% { fill:#137185; } 36% { fill:#1C5C72; } 40% { fill:#1C576E; } 44% { fill:#1C5269; } 48% { fill:#1B4C64; } 52% { fill:#1A475F; } 56% { fill:#1C4A63; } 60% { fill:#1E4B67; } 64% { fill:#1F4D6A; } 68% { fill:#204E6D; } 72% { fill:#413E5A; } 76% { fill:#4E2D49; } 80% { fill:#402A45; } 84% { fill:#312742; } 88% { fill:#20243E; } 92% { fill:#1B2238; } 96% { fill:#171F32; } 100% { fill:#111C2B; } } /* Land - layer 10 animation */ #landscape .layer10 { fill:#414042; -webkit-animation-name: layer10; } @-webkit-keyframes layer10{ 0% { fill:#101825; } 4% { fill:#131928; } 8% { fill:#161A2C; } 12% { fill:#181B2F; } 16% { fill:#262141; } 20% { fill:#2C214F; } 24% { fill:#3D2567; } 28% { fill:#30486F; } 32% { fill:#136476; } 36% { fill:#165163; } 40% { fill:#154C5E; } 44% { fill:#144759; } 48% { fill:#134254; } 52% { fill:#113D4F; } 56% { fill:#154055; } 60% { fill:#19425A; } 64% { fill:#1B445F; } 68% { fill:#1E4564; } 72% { fill:#363856; } 76% { fill:#412A49; } 80% { fill:#352744; } 84% { fill:#29243F; } 88% { fill:#1C203A; } 92% { fill:#191F33; } 96% { fill:#151C2C; } 100% { fill:#101825; } } /* Land - layer 11 animation */ #landscape .layer11 { fill:#232323; -webkit-animation-name: layer11; } @-webkit-keyframes layer11{ 0% { fill:#0D141E; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 4% { fill:#101522; transform: skew(1deg, 0deg) translate3d(-0.5%, 0px, 0px);} 8% { fill:#121726; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 12% { fill:#141829; transform: skew(-1deg, 0deg) translate3d(0.5%, 0px, 0px);} 16% { fill:#1C1E3C; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 20% { fill:#22214F; transform: skew(1deg, 0deg) translate3d(-0.5%, 0px, 0px);} 24% { fill:#262262; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 28% { fill:#1D4065; transform: skew(-1deg, 0deg) translate3d(0.5%, 0px, 0px);} 32% { fill:#125768; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 36% { fill:#1E4553; transform: skew(1deg, 0deg) translate3d(-0.5%, 0px, 0px);} 40% { fill:#1E404E; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 44% { fill:#1E3B49; transform: skew(-1deg, 0deg) translate3d(0.5%, 0px, 0px);} 48% { fill:#1D3643; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 52% { fill:#1C313E; transform: skew(1deg, 0deg) translate3d(-0.5%, 0px, 0px);} 56% { fill:#1C3344; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 60% { fill:#1C3449; transform: skew(-1deg, 0deg) translate3d(0.5%, 0px, 0px);} 64% { fill:#1B344F; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 68% { fill:#183454; transform: skew(1deg, 0deg) translate3d(-0.5%, 0px, 0px);} 72% { fill:#242B4A; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 76% { fill:#2B2241; transform: skew(-1deg, 0deg) translate3d(0.5%, 0px, 0px);} 80% { fill:#24203C; transform: skew(0deg, 0deg) translate3d(0px, 0px, 0px);} 84% { fill:#1D1D37; } 88% { fill:#151A32; } 92% { fill:#14192C; } 96% { fill:#111725; } 100% { fill:#0D141E; } } /* Aspect ratio media queries ---------------------------------------------- */ /* 3/2 and 6/1 -- out of range*/ @media screen and (min-aspect-ratio: 3/1) and (max-aspect-ratio: 6/1){ body{background: rgb(31, 60, 80);} body:before{content:'Aspect ratio out of range - too wide';color: white;text-align: center;width: 100%;height: 100%;display: block;position: absolute;top: 50%;} #sky, #reflection, #sunMask, #landscape, #bottom, #stag, .controls, .stars, .sunMask, .clouds, .lighting, .vignette, .twinkleWrap,.spriteWrap{display: none;} } /* 14/5 and 3/1 */ @media screen and (min-aspect-ratio: 14/5) and (max-aspect-ratio: 7/2){ .twinkles{} #reflection, #sunMask{height: 42%;} #landscape{bottom: -3%;} #bottom{top: 101%;} #stag{bottom: 3%;} .sun{top: -570%;} .twinkles{top: 77%;} } /* 5/2 and 14/5 */ @media screen and (min-aspect-ratio: 5/2) and (max-aspect-ratio: 14/5){ .twinkles{top: 75%;} #reflection, #sunMask{} #landscape{bottom: 1%;} #bottom{top: 98%;} #stag{bottom: 7%;} .sun{top: -532%;} } /* 9/4 and 5/2 */ @media screen and (min-aspect-ratio: 9/4) and (max-aspect-ratio: 5/2){ .twinkles{top: 75%;} #reflection, #sunMask{height: 40%;} #landscape{bottom: 5%;} #bottom{top: 94%;} #stag{bottom: 10%;} .sun{top: -452%;} } /* 11/5 and 9/4 */ @media screen and (min-aspect-ratio: 11/5) and (max-aspect-ratio: 9/4){ .twinkles{} #reflection, #sunMask{} #landscape{bottom: 6%;} #bottom{top: 93%;} #stag{ bottom: 11%;} .sun{top: -410%;} } /* 13/6 and 11/5 */ @media screen and (min-aspect-ratio: 13/6) and (max-aspect-ratio: 11/5){ .twinkles{} #reflection, #sunMask{height: 37%;} #landscape{bottom: 6%;} #bottom{top: 93%;} #stag{bottom: 11%;} .sun{} } /* 15/7 and 13/6 */ @media screen and (min-aspect-ratio: 15/7) and (max-aspect-ratio: 13/6){ .twinkles{} #reflection, #sunMask{height: 31%;} #landscape{bottom: 7%;} #bottom{top: 92%;} #stag{bottom: 12%;} .sun{} } /* 2/1 and 15/7 */ @media screen and (min-aspect-ratio: 2/1) and (max-aspect-ratio: 15/7){ .twinkles{} #reflection, #sunMask{height: 31%;} #landscape{bottom: 8%;} #bottom{top: 91%;} #stag{bottom: 12%;} .sun{top: -370%;} } @media screen and (min-aspect-ratio: 15/8) and (max-aspect-ratio: 2/1){ .twinkles{} #reflection, #sunMask{height: 30%;} #landscape{} #bottom{} #stag{bottom: 15%;} .sun{} } /* 7/4 and 15/8 */ @media screen and (min-aspect-ratio: 7/4) and (max-aspect-ratio: 15/8){ .twinkles{top: 71%;} #reflection, #sunMask{height: 28%;} #landscape{bottom: 13%;} #bottom{top: 86%;} #stag{bottom: 17%;} .sun{top: -300%;} } /* 11/7 and 7/4 */ @media screen and (min-aspect-ratio: 11/7) and (max-aspect-ratio: 7/4){ .twinkles{top: 69%;} #reflection, #sunMask{height: 24%;} #landscape{bottom: 16%;} #bottom{top: 83%;} #stag{bottom: 20%;} .sun{top: -270%;} } /* 13/9 and 11/7 */ @media screen and (min-aspect-ratio: 13/9) and (max-aspect-ratio: 11/7){ .twinkles{top: 68%;} #reflection, #sunMask{height: 22%;} #landscape{bottom: 18%;} #bottom{top: 81%;} #stag{bottom: 21.6%;} .sun{top: -240%;} } /* 4/3 and 13/9 */ @media screen and (min-aspect-ratio: 4/3) and (max-aspect-ratio: 13/9){ .twinkles{top: 66%;} #reflection, #sunMask{height: 19%;} #landscape{bottom: 22%;} #bottom{top: 77%;} #stag{bottom: 25%;} .sun{top: -230%;} } /* 8/7 and 4/3 */ @media screen and (min-aspect-ratio: 8/7) and (max-aspect-ratio: 4/3){ .twinkles{top: 65%;} #reflection, #sunMask{height: 18%;} #landscape{bottom: 23%;} #bottom{top: 76%;} #stag{bottom: 25.3%;} .sun{top: -180%;} } /* 14/15 and 8/7 */ @media screen and (min-aspect-ratio: 14/15) and (max-aspect-ratio: 8/7){ .twinkles{top: 63%;width: 3%;} #reflection, #sunMask{height: 17%;} #landscape{bottom: 26%;} #bottom{top: 73.8%;} #stag{bottom: 28.3%;} .sun{top: -140%;} } /* 5/6 and 14/15 */ @media screen and (min-aspect-ratio: 5/6) and (max-aspect-ratio: 14/15){ .twinkles{top: 63%; width: 3%;} #reflection, #sunMask{height: 13%;} #landscape{bottom: 28%;} #bottom{top: 71.5%;} #stag{bottom: 30%;} .sun{top: -110%;} } /* 7/10 and 5/6 */ @media screen and (min-aspect-ratio: 7/10) and (max-aspect-ratio: 5/6){ .twinkles{top: 62%; width: 3%;} #reflection, #sunMask{height: 11%;} #landscape{bottom: 30%;} #bottom{top: 69.6%;} #stag{bottom: 31.8%;} .sun{top: -70%;} } /* 5/9 and 7/10 */ @media screen and (min-aspect-ratio: 5/9) and (max-aspect-ratio: 7/10){ .twinkles{top: 62%; width: 3%;} #reflection, #sunMask{height: 8%;} #landscape{bottom: 32%;} #bottom{top: 67.5%;} #stag{bottom: 33.5%;} .sun{top: -55%;} } /* 1/10 and 5/9 --- out of range*/ @media screen and (min-aspect-ratio: 1/10) and (max-aspect-ratio: 5/9){ body{background: rgb(31, 60, 80);} body:before{content:'Aspect ratio out of range - too narrow';color: white;text-align: center;width: 100%;height: 100%;display: block;position: absolute;top: 50%;} #sky, #reflection, #sunMask, #landscape, #bottom, #stag, .controls, .stars, .sunMask, .clouds, .lighting, .vignette, .twinkleWrap,.spriteWrap{display: none;} } ================================================ FILE: html/landscape-animation-experiment/index.html ================================================ Landscape animation experiment
>
  • Noise
  • Refresh
  • Audio
================================================ FILE: html/landscape-animation-experiment/js/index.js ================================================ jQuery(document).ready(function ($) { // noise grain toggle $('a.noiseTest').on('click', function (event) { event.preventDefault(); $('.noise').toggleClass('active'); $(this).toggleClass('active'); }); }); /* Credit to Collin Henderson @ AstralApp.com */ (function() { var WIDTH, HEIGHT, canvas, con, g; var pxs = []; var rint = 50; $.fn.sprites = function () { this.append($('')); setup(this); } function setup (container) { var windowSize = function() { WIDTH = container.innerWidth(); HEIGHT = container.innerHeight(); canvas = container.find('#sprites'); canvas.attr('width', WIDTH).attr('height', HEIGHT); }; windowSize(); $(window).resize(function() { windowSize(); }); con = canvas[0].getContext('2d'); for (var i = 0; i < 100; i++) { pxs[i] = new Circle(); pxs[i].reset(); } requestAnimationFrame(draw); } function draw () { con.clearRect(0, 0, WIDTH, HEIGHT); con.globalCompositeOperation = "lighter"; for (var i = 0; i < pxs.length; i++) { pxs[i].fade(); pxs[i].move(); pxs[i].draw(); } requestAnimationFrame(draw); } function Circle() { this.s = { ttl: 15000, xmax: 5, ymax: 2, rmax: 7, rt: 1, xdef: 960, ydef: 540, xdrift: 4, ydrift: 4, random: true, blink: true }; this.reset = function() { this.x = (this.s.random ? WIDTH * Math.random() : this.s.xdef); this.y = (this.s.random ? HEIGHT * Math.random() : this.s.ydef); this.r = ((this.s.rmax - 1) * Math.random()) + 1; this.dx = (Math.random() * this.s.xmax) * (Math.random() < 0.5 ? -1 : 1); this.dy = (Math.random() * this.s.ymax) * (Math.random() < 0.5 ? -1 : 1); this.hl = (this.s.ttl / rint) * (this.r / this.s.rmax); this.rt = Math.random() * this.hl; this.stop = Math.random() * 0.2 + 0.4; this.s.rt = Math.random() + 1; this.s.xdrift *= Math.random() * (Math.random() < 0.5 ? -1 : 1); this.s.ydrift *= Math.random() * (Math.random() < 0.5 ? -1 : 1); }; this.fade = function() { this.rt += this.s.rt; }; this.draw = function() { var newo, cr; if (this.s.blink && (this.rt <= 0 || this.rt >= this.hl)) { this.s.rt = this.s.rt * -1; } else if (this.rt >= this.hl) { this.reset(); } newo = 1 - (this.rt / this.hl); con.beginPath(); con.arc(this.x, this.y, this.r, 0, Math.PI * 2, true); con.closePath(); cr = this.r * newo; g = con.createRadialGradient(this.x, this.y, 0, this.x, this.y, (cr <= 0 ? 1 : cr)); g.addColorStop(0.0, 'rgba(193,254,254,' + newo + ')'); g.addColorStop(this.stop, 'rgba(193,254,254,' + (newo * 0.2) + ')'); g.addColorStop(1.0, 'rgba(193,254,254,0)'); con.fillStyle = g; con.fill(); }; this.move = function() { this.x += (this.rt / this.hl) * this.dx; this.y += (this.rt / this.hl) * this.dy; if (this.x > WIDTH || this.x < 0) this.dx *= -1; if (this.y > HEIGHT || this.y < 0) this.dy *= -1; }; this.getX = function() { return this.x; }; this.getY = function() { return this.y; }; }; })(); $('.spriteWrap').sprites(); ================================================ FILE: html/landscape-animation-experiment/license.txt ================================================ ================================================ FILE: jira/Dockerfile ================================================ FROM atlassian/jira-software:8.10.0 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV AGENT_PATH /opt/atlassian-agent.jar COPY atlassian-agent.jar ${AGENT_PATH} COPY hijack.sh /hijack.sh RUN set -x \ && export DEBIAN_FRONTEND=noninteractive \ && apt update \ && apt upgrade -y \ && apt install tzdata -y \ && chown ${RUN_USER}:${RUN_GROUP} ${AGENT_PATH} \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && dpkg-reconfigure --frontend noninteractive tzdata \ && apt autoremove -y \ && apt autoclean -y CMD ["/hijack.sh"] ================================================ FILE: jira/hijack.sh ================================================ #!/bin/bash export JAVA_OPTS="${JAVA_OPTS} -javaagent:${AGENT_PATH}" /entrypoint.py ================================================ FILE: mattermost/Dockerfile ================================================ FROM alpine:3.12 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV MATTERMOST_VERSION 5.25.2 ENV MATTERMOST_HOME /mattermost ENV MATTERMOST_DATA_DIR /data ENV MATTERMOST_DOWNLOAD_URL https://releases.mattermost.com/${MATTERMOST_VERSION}/mattermost-team-${MATTERMOST_VERSION}-linux-amd64.tar.gz ENV PATH ${PATH}:${MATTERMOST_HOME}/bin RUN set -ex \ && apk upgrade \ && apk add bash tzdata curl ca-certificates \ libc6-compat libffi-dev mailcap \ && curl -s ${MATTERMOST_DOWNLOAD_URL} | tar -xz \ && ln -sf ${MATTERMOST_DATA_DIR} ${MATTERMOST_HOME}/data \ && ln -sf ${MATTERMOST_DATA_DIR}/logs ${MATTERMOST_HOME}/logs \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf ${MATTERMOST_HOME}/bin/platform \ /var/cache/apk/* WORKDIR ${MATTERMOST_HOME} VOLUME ${MATTERMOST_DATA_DIR} EXPOSE 8065 COPY entrypoint.sh / ENTRYPOINT ["/entrypoint.sh"] CMD ["mattermost","server"] ================================================ FILE: mattermost/README.md ================================================ ## Mattermost [![](https://images.microbadger.com/badges/image/mritd/mattermost.svg)](https://microbadger.com/images/mritd/mattermost "Get your own image badge on microbadger.com") [![](https://images.microbadger.com/badges/version/mritd/mattermost.svg)](https://microbadger.com/images/mritd/mattermost "Get your own version badge on microbadger.com") > Mattermost 是一个开源 IM 工具,目前个人主要应用于自动化部署,如通过 Hubot 对接 Kuberntes 实现机器人部署、GitLab-CI 部署通知、Sentry 错误告警推送等 本镜像基于 Alpine 制作,未集成数据库等,启动测试 docker-compose 如下 ``` sh version: '2' services: mattermost: image: mritd/mattermost:3.10.0 restart: always volumes: - ./etc/mattermost/config.json:/usr/local/mattermost/config/config.json links: - mysql ports: - 8065:8065 mysql: image: mysql:5.7.17 restart: always volumes: - ./data/mysql:/var/lib/mysql - ./init/init.sql:/docker-entrypoint-initdb.d/init.sql environment: - MYSQL_ROOT_PASSWORD ``` **完整 docker-compose 配置请参考 [mritd/docker-compose](https://github.com/mritd/docker-compose/tree/master/mattermost)** ================================================ FILE: mattermost/entrypoint.sh ================================================ #!/usr/bin/env bash for dir in /data/data /data/logs /data/config /data/plugins /data/client-plugins; do if [ ! -d "${dir}" ]; then mkdir -p ${dir} fi done exec $@ ================================================ FILE: metricbeat/Dockerfile ================================================ FROM alpine:3.10 ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV METRICBEAT_VERSION 6.4.0 ENV METRICBEAT_HOME /usr/share/metricbeat ENV METRICBEAT_DOWNLOAD_URL https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-${METRICBEAT_VERSION}-linux-x86_64.tar.gz RUN apk upgrade \ && apk add bash tzdata libc6-compat \ && apk add --virtual=build-dependencies wget ca-certificates \ && wget -q ${METRICBEAT_DOWNLOAD_URL} \ && mkdir -p ${METRICBEAT_HOME}/data ${METRICBEAT_HOME}/logs \ && tar -zxf metricbeat-${METRICBEAT_VERSION}-linux-x86_64.tar.gz \ -C ${METRICBEAT_HOME} --strip-components 1 \ && rm -f metricbeat-${METRICBEAT_VERSION}-linux-x86_64.tar.gz \ ${METRICBEAT_HOME}/.build_hash.txt \ ${METRICBEAT_HOME}/NOTICE \ ${METRICBEAT_HOME}/README.md \ && ln -s ${METRICBEAT_HOME}/metricbeat /usr/bin/metricbeat \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del build-dependencies \ && rm -rf /var/cache/apk/* COPY docker-entrypoint.sh /entrypoint.sh VOLUME /etc/metricbeat ENTRYPOINT ["/entrypoint.sh"] CMD ["-e","-c","/etc/metricbeat.yaml"] ================================================ FILE: metricbeat/docker-entrypoint.sh ================================================ #!/bin/bash set -euo pipefail # Check if the the user has invoked the image with flags. # eg. "metricbeat -c metricbeat.yml" if [[ -z $1 ]] || [[ ${1:0:1} == '-' ]] ; then exec metricbeat "$@" else # They may be looking for a Beat subcommand, like "metricbeat setup". subcommands=$(metricbeat help \ | awk 'BEGIN {RS=""; FS="\n"} /Available Commands:/' \ | awk '/^\s+/ {print $1}') # If we _did_ get a subcommand, pass it to metricbeat. for subcommand in $subcommands; do if [[ $1 == $subcommand ]]; then exec metricbeat "$@" fi done fi # If niether of those worked, then they have specified the binary they want, so # just do exactly as they say. exec "$@" ================================================ FILE: owncloud/Dockerfile ================================================ FROM alpine:3.12 LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} ENV OWNCLOUD_VERSION 10.4.1 ENV OWNCLOUD_GPGKEY E3036906AD9F30807351FAC32D5D5E97F6978A26 ENV OWNCLOUD_DOWNLOAD_URL https://download.owncloud.org/community/owncloud-${OWNCLOUD_VERSION}.tar.bz2 ENV OWNCLOUD_DOWNLOAD_ASC_URL https://download.owncloud.org/community/owncloud-${OWNCLOUD_VERSION}.tar.bz2.asc RUN apk upgrade --update \ && apk add bash tzdata gnupg openssl tar curl ca-certificates \ php7-fpm php7-exif php7-gd php7-intl php7-ldap \ php7-mbstring php7-mcrypt php7-opcache php7-pdo \ php7-pdo_mysql php7-pdo_pgsql php7-pgsql php7-zip \ php7-apcu php7-memcached php7-redis \ && curl -fsSL -o owncloud.tar.bz2 ${OWNCLOUD_DOWNLOAD_URL} \ && curl -fsSL -o owncloud.tar.bz2.asc ${OWNCLOUD_DOWNLOAD_ASC_URL} \ && export GNUPGHOME="$(mktemp -d)" \ && gpg --keyserver pgp.mit.edu --recv-keys ${OWNCLOUD_GPGKEY} \ && gpg --batch --verify owncloud.tar.bz2.asc owncloud.tar.bz2 \ && mkdir /usr/src \ && tar -xjf owncloud.tar.bz2 -C /usr/src \ && addgroup -g 82 -S www-data \ && adduser -u 82 -D -S -G www-data www-data \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo "${TZ}" > /etc/timezone \ && rm -rf ${GNUPGHOME} \ owncloud.tar.bz2 \ owncloud.tar.bz2.asc \ /var/cache/apk/* VOLUME /var/www/html WORKDIR /var/www/html COPY opcache-recommended.ini /usr/local/etc/php/conf.d COPY docker-entrypoint.sh /usr/local/bin ENTRYPOINT ["docker-entrypoint.sh"] CMD ["php-fpm7"] ================================================ FILE: owncloud/docker-entrypoint.sh ================================================ #!/bin/bash set -e if [ ! -e '/var/www/html/version.php' ]; then tar cf - --one-file-system -C /usr/src/owncloud . | tar xf - chown -R www-data /var/www/html fi exec "$@" ================================================ FILE: owncloud/opcache-recommended.ini ================================================ opcache.memory_consumption=128 opcache.interned_strings_buffer=8 opcache.max_accelerated_files=4000 opcache.revalidate_freq=60 opcache.fast_shutdown=1 opcache.enable_cli=1 ================================================ FILE: privoxy/Dockerfile ================================================ FROM alpine:3.12 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN apk upgrade --update \ && apk add bash tzdata privoxy \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* CMD ["privoxy","--no-daemon","/etc/privoxy/config"] ================================================ FILE: puppeteer-base/Dockerfile ================================================ FROM node:9.3-slim LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN apt-get update \ && apt-get install -y --force-yes --no-install-recommends \ gconf-service libasound2 libatk1.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 \ libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 \ libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 \ libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 \ ca-certificates fonts-liberation libappindicator1 libnss3 xdg-utils ttf-wqy-zenhei fonts-wqy-microhei \ && apt-get autoclean CMD ["/bin/bash"] ================================================ FILE: rssbot/Dockerfile ================================================ FROM alpine:3.12 ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV VERSION 2.0.0-alpha.7 ENV DOWNLOAD_URL https://github.com/iovxw/rssbot/releases/download/v${VERSION}/rssbot-en-amd64-linux ENV MIN_INTERVAL 300 ENV MAX_INTERVAL 43200 ADD ${DOWNLOAD_URL} /usr/local/bin/rssbot RUN set -ex \ && apk add tzdata ca-certificates \ && chmod +x /usr/local/bin/rssbot \ && rssbot -V \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* VOLUME /data CMD ["sh", "-c", "rssbot --database /data/data.json --min-interval ${MIN_INTERVAL} --max-interval ${MAX_INTERVAL} ${TELEGRAM_BOT_TOKEN}"] ================================================ FILE: shadowsocks/Dockerfile ================================================ FROM alpine:3.13 LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} ENV SS_LIBEV_VERSION v3.3.5 ENV KCP_VERSION 20210103 ENV V2RAY_PLUGIN_VERSION v1.3.1 ENV SS_DOWNLOAD_URL https://github.com/shadowsocks/shadowsocks-libev.git ENV KCP_DOWNLOAD_URL https://github.com/xtaci/kcptun/releases/download/v${KCP_VERSION}/kcptun-linux-amd64-${KCP_VERSION}.tar.gz ENV PLUGIN_OBFS_DOWNLOAD_URL https://github.com/shadowsocks/simple-obfs.git ENV PLUGIN_V2RAY_DOWNLOAD_URL https://github.com/shadowsocks/v2ray-plugin/releases/download/${V2RAY_PLUGIN_VERSION}/v2ray-plugin-linux-amd64-${V2RAY_PLUGIN_VERSION}.tar.gz #ENV LINUX_HEADERS_DOWNLOAD_URL=http://dl-cdn.alpinelinux.org/alpine/v3.7/main/x86_64/linux-headers-4.4.6-r2.apk RUN apk upgrade \ && apk add bash tzdata rng-tools runit \ && apk add --virtual .build-deps \ autoconf \ automake \ build-base \ curl \ linux-headers \ c-ares-dev \ libev-dev \ libtool \ libcap \ libsodium-dev \ mbedtls-dev \ pcre-dev \ tar \ git \ && git clone ${SS_DOWNLOAD_URL} \ && (cd shadowsocks-libev \ && git checkout tags/${SS_LIBEV_VERSION} -b ${SS_LIBEV_VERSION} \ && git submodule update --init --recursive \ && ./autogen.sh \ && ./configure --prefix=/usr --disable-documentation \ && make install) \ && git clone ${PLUGIN_OBFS_DOWNLOAD_URL} \ && (cd simple-obfs \ && git submodule update --init --recursive \ && ./autogen.sh \ && ./configure --disable-documentation \ && make install) \ && curl -o v2ray_plugin.tar.gz -sSL ${PLUGIN_V2RAY_DOWNLOAD_URL} \ && tar -zxf v2ray_plugin.tar.gz \ && mv v2ray-plugin_linux_amd64 /usr/bin/v2ray-plugin \ && curl -sSLO ${KCP_DOWNLOAD_URL} \ && tar -zxf kcptun-linux-amd64-${KCP_VERSION}.tar.gz \ && mv server_linux_amd64 /usr/bin/kcpserver \ && mv client_linux_amd64 /usr/bin/kcpclient \ && for binPath in `ls /usr/bin/ss-* /usr/local/bin/obfs-* /usr/bin/kcp* /usr/bin/v2ray*`; do \ setcap CAP_NET_BIND_SERVICE=+eip $binPath; \ done \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && adduser -h /tmp -s /sbin/nologin -S -D -H shadowsocks \ && adduser -h /tmp -s /sbin/nologin -S -D -H kcptun \ && apk del .build-deps \ && apk add --no-cache \ $(scanelf --needed --nobanner /usr/bin/ss-* /usr/local/bin/obfs-* \ | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \ | sort -u) \ && rm -rf /linux-headers-4.4.6-r2.apk \ kcptun-linux-amd64-${KCP_VERSION}.tar.gz \ shadowsocks-libev \ simple-obfs \ v2ray_plugin.tar.gz \ /etc/service \ /var/cache/apk/* SHELL ["/bin/bash"] COPY runit /etc/service COPY entrypoint.sh /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: shadowsocks/README.md ================================================ ## shadowsocks ![](https://img.shields.io/docker/stars/mritd/shadowsocks.svg) ![](https://img.shields.io/docker/pulls/mritd/shadowsocks.svg) ![](https://img.shields.io/microbadger/image-size/mritd/shadowsocks.svg) ![](https://img.shields.io/microbadger/layers/mritd/shadowsocks.svg) - **shadowsocks-libev 版本: 3.3.5** - **kcptun 版本: 20201126** **注意: 由于 Docker Hub 自动构建功能最近出现的 Bug 比较多,构建队列缓慢;部分镜像(包含本镜像)可能会在采用本地 Build 然后直接 push 到远程仓库的方式构建;如有安全疑虑,可自行使用本 Dockerfile 构建** ### 打开姿势 ``` sh docker run -dt --name ss -p 6443:6443 mritd/shadowsocks -s "-s 0.0.0.0 -p 6443 -m chacha20-ietf-poly1305 -k test123" ``` ### 支持选项 - `-m` : 指定 shadowsocks 命令,默认为 `ss-server` - `-s` : shadowsocks-libev 参数字符串 - `-x` : 开启 kcptun 支持 - `-e` : 指定 kcptun 命令,默认为 `kcpserver` - `-k` : kcptun 参数字符串 ### 选项描述 - `-m` : 参数后指定一个 shadowsocks 命令,如 ss-local,不写默认为 ss-server;该参数用于 shadowsocks 在客户端和服务端工作模式间切换,可选项如下: `ss-local`、`ss-manager`、`ss-nat`、`ss-redir`、`ss-server`、`ss-tunnel` - `-s` : 参数后指定一个 shadowsocks-libev 的参数字符串,所有参数将被拼接到 `ss-server` 后 - `-x` : 指定该参数后才会开启 kcptun 支持,否则将默认禁用 kcptun - `-e` : 参数后指定一个 kcptun 命令,如 kcpclient,不写默认为 kcpserver;该参数用于 kcptun 在客户端和服务端工作模式间切换,可选项如下: `kcpserver`、`kcpclient` - `-k` : 参数后指定一个 kcptun 的参数字符串,所有参数将被拼接到 `kcptun` 后 ### 命令示例 **Server 端** ``` sh docker run -dt --name ssserver -p 6443:6443 -p 6500:6500/udp mritd/shadowsocks -m "ss-server" -s "-s 0.0.0.0 -p 6443 -m chacha20-ietf-poly1305 -k test123" -x -e "kcpserver" -k "-t 127.0.0.1:6443 -l :6500 -mode fast2" ``` **以上命令相当于执行了** ``` sh ss-server -s 0.0.0.0 -p 6443 -m chacha20-ietf-poly1305 -k test123 kcpserver -t 127.0.0.1:6443 -l :6500 -mode fast2 ``` **Client 端** ``` sh docker run -dt --name ssclient -p 1080:1080 mritd/shadowsocks -m "ss-local" -s "-s 127.0.0.1 -p 6500 -b 0.0.0.0 -l 1080 -m chacha20-ietf-poly1305 -k test123" -x -e "kcpclient" -k "-r SSSERVER_IP:6500 -l :6500 -mode fast2" ``` **以上命令相当于执行了** ``` sh ss-local -s 127.0.0.1 -p 6500 -b 0.0.0.0 -l 1080 -m chacha20-ietf-poly1305 -k test123 kcpclient -r SSSERVER_IP:6500 -l :6500 -mode fast2 ``` **关于 shadowsocks-libev 和 kcptun 都支持哪些参数请自行查阅官方文档,本镜像只做一个拼接** **注意:kcptun 映射端口为 udp 模式(`6500:6500/udp`),不写默认 tcp;shadowsocks 请监听 0.0.0.0** ### 环境变量支持 |环境变量|作用|取值| |-------|---|---| |SS_MODULE|shadowsocks 启动命令| `ss-local`、`ss-manager`、`ss-nat`、`ss-redir`、`ss-server`、`ss-tunnel`| |SS_CONFIG|shadowsocks-libev 参数字符串|所有字符串内内容应当为 shadowsocks-libev 支持的选项参数| |KCP_FLAG|是否开启 kcptun 支持|可选参数为 true 和 false,默认为 fasle 禁用 kcptun| |KCP_MODULE|kcptun 启动命令| `kcpserver`、`kcpclient`| |KCP_CONFIG|kcptun 参数字符串|所有字符串内内容应当为 kcptun 支持的选项参数| 使用时可指定环境变量,如下 ``` sh docker run -dt --name ss -p 6443:6443 -p 6500:6500/udp -e SS_CONFIG="-s 0.0.0.0 -p 6443 -m chacha20-ietf-poly1305 -k test123" -e KCP_MODULE="kcpserver" -e KCP_CONFIG="-t 127.0.0.1:6443 -l :6500 -mode fast2" -e KCP_FLAG="true" mritd/shadowsocks ``` ### 容器平台说明 **各大免费容器平台都已经对代理工具做了对应封锁,一是为了某些不可描述的原因,二是为了防止被利用称为 DDOS 工具等;基于种种原因,公共免费容器平台问题将不予回复** ### GCE 随机数生成错误 如果在 GCE 上使用本镜像,在特殊情况下可能会出现 `This system doesn't provide enough entropy to quickly generate high-quality random numbers.` 错误; 这种情况是由于宿主机没有能提供足够的熵来生成随机数导致,修复办法可以考虑增加 `--device /dev/urandom:/dev/urandom` 选项来使用 `/dev/urandom` 来生成,不过并不算推荐此种方式 ### 更新日志 - 2016-10-12 基于 shadowsocks 2.9.0 版本 基于 shadowsocks 2.9.0 版本打包 docker image - 2016-10-13 增加 kcptun 支持 增加 kcptun 的支持,使用 `-x` 可关闭 - 2016-10-14 增加 环境变量支持 增加 默认读取环境变量策略,可通过环境变量指定 shadowsocks 相关设置 - 2016-11-01 升级 kcptun,增加 kcptun 自定义配置选项(-c 或 环境变量) 增加了 `-c` 参数和环境变量 `KCPTUN_CONFIG`,用于在不挂载文件的情况下重写 kcptun 的配置 - 2016-11-07 chacha20 加密支持 增加了 libsodium 库,用于支持 chacha20 加密算法(感谢 Lihang Chen 提出),删除了 wget 进一步精简镜像体积 - 2016-11-30 更新 kcptun 版本 更新 kcptun 版本到 20161118,修正样例命令中 kcptun 端口号使用 tcp 问题(应使用 udp),感谢 Zheart 提出 - 2016-12-19 更新 kcptun 到 20161202 更新 kcptun 版本到 20161202,完善 README 中 kcptun 说明 - 2016-12-30 更新 kcptun 到 20161222 更新 kcptun 版本到 20161222,更新基础镜像 alpine 到 3.5 - 2017-01-20 升级 kcptun 到 20170117 更新 kcptun 到 20170117,kcptun 新版本 ack 结构中更准确的RTT估算,锁优化,更平滑的rtt计算jitter, 建议更新;同时 20170120 处于 Pre-release 暂不更新;**最近比较忙,可能 kcptun 配置已经有更新,具体 请参考 kcptun 官网及 [Github](https://github.com/xtaci/kcptun)** - 2017-01-25 升级 kcptun 到 20171222 更新 kcptun 到 2017...... 别的我忘了...... - 2017-02-08 升级 kcptun 到 20170120 更新 kcptun 到 20170120,**下个版本准备切换到 shadowsocks-libev 3.0,目前 3.0 还未正式发布,观望中!** - 2017-02-25 切换到 shadowsocks-libev 切换到 shadowsocks-libev 3.0 版本,同时更新 kcptun 和参数设定 - 2017-03-07 升级 kcptun 到 20170303 更新 kcptun 到 20170303 - 2017-03-09 升级 kcptun 到 20170308 更新 kcptun 到 20170308 - 2017-03-17 升级 kcptun 和 shadowsocks-libev 升级 shadowsocks-libev 到 3.0.4 版本,支持 `TCP Fast Open in ss-redir`、`TOS/DESCP in ss-redir` 和细化 MPTCP;升级 kcptun 到 315 打假版本 `(:` - 2017-03-21 增加多命令支持 新增 `-m` 参数用于指定使用那个 shadowsocks 命令,如果作为客户端使用 `-m ss-local`, 不写的情况下默认为服务端命令,即 `ss-server` - 2017-03-22 Bug 修复 修复增加 `-m` 参数后 SS_CONFIG 变量为空导致启动失败问题 - 2017-03-27 例行升级 升级 shadowsocks-libev 到 3.0.5、kcptun 到 20170322;kcptun 该版本主要做了 CPU 优化 - 2017-04-01 例行升级 升级 kcptun 到 20170329 - 2017-04-27 例行升级 升级 shadoscoks-libev 到 3.0.6 - 2017-05-31 例行升级 升级 kcptun 到 20150525 - 2017-06-28 例行升级 升级 shadowsocks 到 3.0.7 - 2017-07-28 例行升级 升级 shadowsocks 到 3.0.8 - 2017-08-09 obfs 支持 添加对 simple-obfs 支持 - 2017-08-23 kcptun client 支持 增加镜像对 kcptun client 支持 - 2017-11-38 例行升级 升级 shadowsocks-libev 到 3.1.0,升级 kcptun 到 20170904 - 2017-10-10 升级 kcptun 升级 kcptun 到 20170930 - 2017-11-2 update kcptun 升级 kcptun 到 20171021 - 2017-11-19 update kcptun 升级 kcptun 到 20171113 - 2017-11-22 Fix a security issue in ss-manager. (CVE-2017-15924) Fix a security issue in ss-manager. (CVE-2017-15924) - 2017-12-11 update base image update base image - 2017-12-27 update kcptun update kcptun to 20171201 - 2018-01-2 update shadowsocks update shadowsocks to 3.1.2(Fix a bug in DNS resolver;Add new TFO API support.) - 2018-01-22 update shadowsocks update shadowsocks to 3.1.3(Fix a bug in UDP relay.) - 2018-03-11 update kcptun update kcptun to 20180305 - 2018-03-23 update kcptun update kcptun to 20180316(fix 'too man open files') - 2018-05-29 update shadowsocks update shadowsocks to 3.2.0(Add MinGW,Refine c-ares integration...) - 2018-07-09 update base image update base image to alpine 3.8 - 2018-08-05 fix high-quality random numbers fix `system doesn't provide enough entropy to quickly generate high-quality random numbers` - 2018-08-16 update kcptun update kcptun to v20180810 - 2018-09-27 update kcptun update kcptun to v20180926 - 2018-11-06 add `-r` option update kcptun to v20181002 add `-r` option to fix GCE `system doesn't provide enough entropy...` error - 2018-11-14 update shadowsocks update shadowsocks to v3.2.1 - 2018-11-15 update kcptun update kcptun to v20181114 - 2018-12-14 update shadowsocks update shadowsocks to v3.2.3 - 2018-12-26 update kcptun update kcptun to v20181224 - 2019-01-10 update kcptun update kcptun to v20190109 - 2019-01-23 add v2ray-plugin add v2ray-plugin support - 2019-02-26 update to v3.2.4 update shadowsocks to v3.2.4 - 2019-04-14 update to v3.2.5 update shadowsocks to v3.2.5, update kcptun to v20190409 - 2019-04-24 update kcptun update kcptun to v20190424 - 2019-04-29 add runit add runit, remove rng-tools - 2019-06-16 update kcptun update kcptun to v20190611 - 2019-09-15 update shadowsocks to v3.3.1 update shadowsocks to v3.3.1 update kcptun to v20190905 update v2ray-plugin to v1.1.0 - 2019-09-24 update kcptun update kcptun to v20190923 - 2019-11-01 update shadowsocks update shadowsocks to v3.3.3 - 2019-12-17 fix port binding fix port binding update kcptun to v20191127 - 2020-01-01 update kcptun update kcptun to v20191229 update base image to alpine 3.11 - 2020-02-28 update shadowsocks to v3.3.4 update shadowsocks to v3.3.4 update kcptun to v20200226 update v2ray-plugin to 1.3.0 - 2020-04-13 update kcptun update kcptun to v20200409 - 2020-07-10 update kcptun update kcptun to v20200701 update base image to alpine 3.12 ================================================ FILE: shadowsocks/entrypoint.sh ================================================ #!/bin/bash SS_CONFIG=${SS_CONFIG:-""} SS_MODULE=${SS_MODULE:-"ss-server"} KCP_CONFIG=${KCP_CONFIG:-""} KCP_MODULE=${KCP_MODULE:-"kcpserver"} KCP_FLAG=${KCP_FLAG:-"false"} while getopts "s:m:k:e:x" OPT; do case $OPT in s) SS_CONFIG=$OPTARG;; m) SS_MODULE=$OPTARG;; k) KCP_CONFIG=$OPTARG;; e) KCP_MODULE=$OPTARG;; x) KCP_FLAG="true";; esac done export SS_CONFIG=${SS_CONFIG} export SS_MODULE=${SS_MODULE} export KCP_CONFIG=${KCP_CONFIG} export KCP_MODULE=${KCP_MODULE} export KCP_FLAG=${KCP_FLAG} exec runsvdir -P /etc/service ================================================ FILE: shadowsocks/runit/kcptun/run ================================================ #!/bin/bash exec 2>&1 if [ "${KCP_FLAG}" == "true" ]; then if [ -z "${KCP_MODULE}" ]; then echo "Warning: KCP_MODULE is empty, default to kcpserver!" KCP_MODULE="kcpserver" fi if [ -n "${KCP_CONFIG}" ]; then echo "starting kcptun..." exec chpst -u kcptun ${KCP_MODULE} ${KCP_CONFIG} else echo "Error: KCP_CONFIG is empty, exit!" exit 1 fi else exit 0 fi ================================================ FILE: shadowsocks/runit/shadowsocks/run ================================================ #!/bin/bash exec 2>&1 if [ -z "${SS_MODULE}" ]; then echo "Warning: SS_MODULE is empty, default to ss-server!" SS_MODULE="ss-server" fi if [ -n "${SS_CONFIG}" ]; then echo "starting shadowsocks..." exec chpst -u shadowsocks ${SS_MODULE} ${SS_CONFIG} else echo "Error: SS_CONFIG is empty, exit!" exit 1 fi ================================================ FILE: simple-obfs/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} ENV OBFS_DOWNLOAD_URL https://github.com/shadowsocks/simple-obfs.git RUN apk upgrade --update \ && apk add bash tzdata \ && apk add --virtual .build-deps \ asciidoc \ autoconf \ automake \ g++ \ gcc \ libev-dev \ libpcre32 \ libtool \ linux-headers \ make \ openssl \ xmlto \ zlib-dev \ git \ && git clone ${OBFS_DOWNLOAD_URL} \ && (cd simple-obfs \ && git submodule update --init --recursive \ && ./autogen.sh && ./configure --disable-documentation\ && make && make install) \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo $TZ > /etc/timezone \ && runDeps="$( \ scanelf --needed --nobanner /usr/local/bin/obfs-* \ | awk '{ gsub(/,/, "\nso:", $2); print "so:" $2 }' \ | xargs -r apk info --installed \ | sort -u \ )" \ && apk add --virtual .run-deps $runDeps \ && apk del .build-deps \ && rm -rf simple-obfs \ /var/cache/apk/* CMD ["obfs-server","--help"] ================================================ FILE: sniproxy/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " ENV TZ 'Asia/Shanghai' RUN apk upgrade --no-cache && \ apk add --no-cache bash tzdata sniproxy && \ ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ echo "Asia/Shanghai" > /etc/timezone && \ rm -rf /var/cache/apk/* COPY sniproxy.conf /etc/sniproxy.conf COPY entrypoint.sh /entrypoint.sh EXPOSE 443 ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: sniproxy/entrypoint.sh ================================================ #!/bin/sh set -e if [ -n "$1" ];then exec sniproxy "$@" else exec sniproxy -f -c /etc/sniproxy.conf fi ================================================ FILE: sniproxy/sniproxy.conf ================================================ user nobody listen 0.0.0.0:443 { proto tls table https_hosts } table https_hosts { .* *:443 } ================================================ FILE: swagger-editor/Dockerfile ================================================ FROM nginx:1.17.1-alpine LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV SWAGGERUI_VERSION 3.6.1 ENV SWAGGERUI_DOWNLOAD_URL https://github.com/swagger-api/swagger-editor/archive/v${SWAGGERUI_VERSION}.tar.gz RUN apk upgrade --update \ && apk add bash tzdata tar wget ca-certificates \ && wget ${SWAGGERUI_DOWNLOAD_URL} \ && tar -zxf v${SWAGGERUI_VERSION}.tar.gz \ && mv swagger-editor-${SWAGGERUI_VERSION}/index.html /usr/share/nginx/html \ && mv swagger-editor-${SWAGGERUI_VERSION}/dist /usr/share/nginx/html/dist \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del wget curl tar ca-certificates \ && rm -rf v${SWAGGERUI_VERSION}.tar.gz \ swagger-editor-${SWAGGERUI_VERSION} \ /var/cache/apk/* EXPOSE 80 STOPSIGNAL SIGTERM CMD ["nginx", "-g", "daemon off;"] ================================================ FILE: teleport/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV TELEPORT_VERSION v4.0.2 ENV TELEPORT_DOWNLOAD_URL https://get.gravitational.com/teleport-${TELEPORT_VERSION}-linux-amd64-bin.tar.gz RUN apk upgrade \ && apk add bash tzdata libc6-compat wget tar ca-certificates \ && wget -q ${TELEPORT_DOWNLOAD_URL} \ && tar -zxvf teleport-${TELEPORT_VERSION}-linux-amd64-bin.tar.gz \ && mv teleport/tctl teleport/teleport teleport/tsh /usr/bin \ && mkdir /etc/teleport \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del wget tar \ && rm -rf /*.tar.gz /teleport /var/cache/apk/* COPY teleport.yaml /etc/teleport/teleport.yaml VOLUME /var/lib/teleport /etc/teleport EXPOSE 3022-3026 3080 CMD ["teleport","start","-c","/etc/teleport/teleport.yaml"] ================================================ FILE: teleport/teleport.yaml ================================================ # By default, this file should be stored in /etc/teleport.yaml # This section of the configuration file applies to all teleport # services. teleport: # nodename allows to assign an alternative name this node can be reached by. # by default it's equal to hostname nodename: graviton # Data directory where Teleport daemon keeps its data. # See "Filesystem Layout" section above for more details. data_dir: /var/lib/teleport # Invitation token used to join a cluster. it is not used on # subsequent starts auth_token: xxxx-token-xxxx # When running in multi-homed or NATed environments Teleport nodes need # to know which IP it will be reachable at by other nodes # # This value can be specified as FQDN e.g. host.example.com advertise_ip: 10.1.0.5 # list of auth servers in a cluster. you will have more than one auth server # if you configure teleport auth to run in HA configuration auth_servers: - 10.1.0.5:3025 - 10.1.0.6:3025 # Teleport throttles all connections to avoid abuse. These settings allow # you to adjust the default limits connection_limits: max_connections: 1000 max_users: 250 # Logging configuration. Possible output values are 'stdout', 'stderr' and # 'syslog'. Possible severity values are INFO, WARN and ERROR (default). log: output: stderr severity: ERROR # Type of storage used for keys. You need to configure this to use etcd or # a DynamoDB backend if you want to run Teleport in HA configuration. storage: # By default teleport uses the `data_dir` directory on a local filesystem type: dir # Array of locations where the audit log events will be stored. by # default they are stored in `/var/lib/teleport/log` audit_events_uri: [file:///var/lib/teleport/log, dynamo://events_table_name] # Use this setting to configure teleport to store the recorded sessions in # an AWS S3 bucket. see "Using Amazon S3" chapter for more information. audit_sessions_uri: s3://name-of-s3-bucket # Cipher algorithms that the server supports. This section only needs to be # set if you want to override the defaults. ciphers: - aes128-ctr - aes192-ctr - aes256-ctr - aes128-gcm@openssh.com - arcfour256 - arcfour128 # Key exchange algorithms that the server supports. This section only needs # to be set if you want to override the defaults. kex_algos: - curve25519-sha256@libssh.org - ecdh-sha2-nistp256 - ecdh-sha2-nistp384 - ecdh-sha2-nistp521 - diffie-hellman-group14-sha1 - diffie-hellman-group1-sha1 # Message authentication code (MAC) algorithms that the server supports. # This section only needs to be set if you want to override the defaults. mac_algos: - hmac-sha2-256-etm@openssh.com - hmac-sha2-256 - hmac-sha1 - hmac-sha1-96 # List of the supported ciphersuites. If this section is not specified, # only the default ciphersuites are enabled. ciphersuites: - tls-rsa-with-aes-128-cbc-sha # default - tls-rsa-with-aes-256-cbc-sha # default - tls-rsa-with-aes-128-cbc-sha256 - tls-rsa-with-aes-128-gcm-sha256 - tls-rsa-with-aes-256-gcm-sha384 - tls-ecdhe-ecdsa-with-aes-128-cbc-sha - tls-ecdhe-ecdsa-with-aes-256-cbc-sha - tls-ecdhe-rsa-with-aes-128-cbc-sha - tls-ecdhe-rsa-with-aes-256-cbc-sha - tls-ecdhe-ecdsa-with-aes-128-cbc-sha256 - tls-ecdhe-rsa-with-aes-128-cbc-sha256 - tls-ecdhe-rsa-with-aes-128-gcm-sha256 - tls-ecdhe-ecdsa-with-aes-128-gcm-sha256 - tls-ecdhe-rsa-with-aes-256-gcm-sha384 - tls-ecdhe-ecdsa-with-aes-256-gcm-sha384 - tls-ecdhe-rsa-with-chacha20-poly1305 - tls-ecdhe-ecdsa-with-chacha20-poly1305 # This section configures the 'auth service': auth_service: # Turns 'auth' role on. Default is 'yes' enabled: yes # A cluster name is used as part of a signature in certificates # generated by this CA. # # We strongly recommend to explicitly set it to something meaningful as it # becomes important when configuring trust between multiple clusters. # # By default an automatically generated name is used (not recommended) # # IMPORTANT: if you change cluster_name, it will invalidate all generated # certificates and keys (may need to wipe out /var/lib/teleport directory) cluster_name: "main" authentication: # default authentication type. possible values are 'local', 'oidc' and 'saml' # only local authentication (Teleport's own user DB) is supported in the open # source version type: local # second_factor can be off, otp, or u2f second_factor: otp # this section is used if second_factor is set to 'u2f' u2f: # app_id must point to the URL of the Teleport Web UI (proxy) accessible # by the end users app_id: https://localhost:3080 # facets must list all proxy servers if there are more than one deployed facets: - https://localhost:3080 # IP and the port to bind to. Other Teleport nodes will be connecting to # this port (AKA "Auth API" or "Cluster API") to validate client # certificates listen_addr: 0.0.0.0:3025 # The optional DNS name the auth server if locataed behind a load balancer. # (see public_addr section below) public_addr: auth.example.com:3025 # Pre-defined tokens for adding new nodes to a cluster. Each token specifies # the role a new node will be allowed to assume. The more secure way to # add nodes is to use `ttl node add --ttl` command to generate auto-expiring # tokens. # # We recommend to use tools like `pwgen` to generate sufficiently random # tokens of 32+ byte length. tokens: - "proxy,node:xxxxx" - "auth:yyyy" # Optional setting for configuring session recording. Possible values are: # "node" : sessions will be recorded on the node level (the default) # "proxy" : recording on the proxy level, see "recording proxy mode" section. # "off" : session recording is turned off session_recording: "node" # This setting determines if a Teleport proxy performs strict host key checks. # Only applicable if session_recording=proxy, see "recording proxy mode" for details. proxy_checks_host_keys: yes # Determines if SSH sessions to cluster nodes are forcefully terminated # after no activity from a client (idle client). # Examples: "30m", "1h" or "1h30m" client_idle_timeout: never # Determines if the clients will be forcefully disconnected when their # certificates expire in the middle of an active SSH session. (default is 'no') disconnect_expired_cert: no # License file to start auth server with. Note that this setting is ignored # in open-source Teleport and is required only for Teleport Pro, Business # and Enterprise subscription plans. # # The path can be either absolute or relative to the configured `data_dir` # and should point to the license file obtained from Teleport Download Portal. # # If not set, by default Teleport will look for the `license.pem` file in # the configured `data_dir`. license_file: /var/lib/teleport/license.pem # If the auth service is deployed outside Kubernetes, but Kubernetes integration # is required, you have to specify a valid kubeconfig credentials: kubeconfig_file: /path/to/kubeconfig # This section configures the 'node service': ssh_service: # Turns 'ssh' role on. Default is 'yes' enabled: yes # IP and the port for SSH service to bind to. listen_addr: 0.0.0.0:3022 # The optional public address the SSH service. This is useful if administrators # want to allow users to connect to nodes directly, bypassing a Teleport proxy # (see public_addr section below) public_addr: node.example.com:3022 # See explanation of labels in "Labeling Nodes" section below labels: role: master type: postgres # List of the commands to periodically execute. Their output will be used as node labels. # See "Labeling Nodes" section below for more information. commands: - name: arch # this command will add a label like 'arch=x86_64' to a node command: [uname, -p] period: 1h0m0s # enables reading ~/.tsh/environment before creating a session. by default # set to false, can be set true here or as a command line flag. permit_user_env: false # configures PAM integration. see below for more details. pam: enabled: no service_name: teleport # This section configures the 'proxy servie' proxy_service: # Turns 'proxy' role on. Default is 'yes' enabled: yes # SSH forwarding/proxy address. Command line (CLI) clients always begin their # SSH sessions by connecting to this port listen_addr: 0.0.0.0:3023 # Reverse tunnel listening address. An auth server (CA) can establish an # outbound (from behind the firewall) connection to this address. # This will allow users of the outside CA to connect to behind-the-firewall # nodes. tunnel_listen_addr: 0.0.0.0:3024 # The HTTPS listen address to serve the Web UI and also to authenticate the # command line (CLI) users via password+HOTP web_listen_addr: 0.0.0.0:3080 # The DNS name the proxy server is accessible by cluster users. Defaults to # the proxy's hostname if not specified. If running multiple proxies behind # a load balancer, this name must point to the load balancer # (see public_addr section below) public_addr: proxy.example.com:3080 # TLS certificate for the HTTPS connection. Configuring these properly is # critical for Teleport security. https_key_file: /var/lib/teleport/webproxy_key.pem https_cert_file: /var/lib/teleport/webproxy_cert.pem ================================================ FILE: time-machine/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV NETATALK_VERSION 3.1.12 ENV NETATALK_DOWNLOAD_URL https://downloads.sourceforge.net/project/netatalk/netatalk/${NETATALK_VERSION}/netatalk-${NETATALK_VERSION}.tar.gz RUN set -ex \ && apk --update upgrade \ && apk add bash tzdata libldap libgcrypt python \ dbus dbus-glib py-dbus linux-pam cracklib db \ libevent file acl openssl avahi runit \ && apk add --no-cache --virtual .build-deps \ build-base autoconf automake libtool libgcrypt-dev \ linux-pam-dev cracklib-dev acl-dev db-dev dbus-dev libevent-dev \ && wget -q ${NETATALK_DOWNLOAD_URL} \ && tar -zxf netatalk-${NETATALK_VERSION}.tar.gz \ && (cd netatalk-${NETATALK_VERSION} \ && CFLAGS="-Wno-unused-result -O2" ./configure \ --prefix=/usr \ --localstatedir=/var/state \ --sysconfdir=/etc \ --with-dbus-sysconf-dir=/etc/dbus-1/system.d/ \ --with-init-style=debian-sysv \ --sbindir=/usr/bin \ --enable-quota \ --with-tdb \ --enable-silent-rules \ --with-cracklib \ --with-cnid-cdb-backend \ --enable-pgp-uam \ --with-acls \ && make && make install) \ && sed -i 's@#host-name.*@host-name=TimeMachine@g' /etc/avahi/avahi-daemon.conf \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del .build-deps \ && rm -rf /netatalk-${NETATALK_VERSION}* \ /etc/avahi/services/* \ /var/cache/apk/* EXPOSE 548 636 COPY entrypoint.sh /entrypoint.sh COPY conf/afp.conf /etc/afp.conf COPY conf/afpd.service /etc/avahi/services/afpd.service COPY services /etc/runit/services VOLUME ["/data"] ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: time-machine/README.md ================================================ ### Time Machine 一个基于 Alpine 系统的 netatalk 和 avahi 的用于 Mac Time Machine 备份镜像 ### 启动方式 > 由于 avahi 自动发现服务需要绑定网卡接口,所以容器需要使用 host 网络模式启动, 以保证同一局域网下的 Mac 设备能正确发现 TimeMachine 备份容器 - 纯 docker 启动 ``` sh docker run -dt --name time-machine -v /data:/data --network host mritd/time-machine -u testuser -p 12345678 ``` - docker-compoe 启动 ``` sh # docker-compose 文件如下 version: '3.5' services: time-machine: image: mritd/time-machine restart: always container_name: time-machine network_mode: "host" volumes: - /data:/data command: "-u testuser -p 12345678" # 最后启动即可 docker-compose up -d ``` ### 使用方法 容器启动后在**与宿主机同一局域网**的 Mac 机器能够在 Finder 中的 `共享的` 一栏中发现; 打开后右上角点击链接按钮,然后输入账户和密码即可成功链接;此时打开 TimeMachine 备份磁盘 中选择刚刚建立连接的磁盘即可 ### Update - 2019-07-21: 支持 `-i` 选项定义 user id,升级 netatalk 到 3.1.12 ================================================ FILE: time-machine/conf/afp.conf ================================================ [Global] mimic model = Xserve hostname = TimeMachine log file = /dev/stdout log level = default:warn zeroconf = no ================================================ FILE: time-machine/conf/afpd.service ================================================ %h _afpovertcp._tcp 548 _device-info._tcp 0 model=Xserve ================================================ FILE: time-machine/entrypoint.sh ================================================ #!/bin/bash USER_NAME=${USER_NAME:-"mritd"} USER_ID=${USER_ID:-"1000"} PASSWORD=${PASSWORD:-"123456"} MOUNT_POINT=${MOUNT_POINT-"/data"} VOL_SIZE_MB=${VOL_SIZE_MB-"512000"} while getopts "u:i:p:m:v" OPT; do case $OPT in u) USER_NAME=$OPTARG;; i) USER_ID=$OPTARG;; p) PASSWORD=$OPTARG;; m) MOUNT_POINT=$OPTARG;; v) VOL_SIZE_MB=$OPTARG;; esac done cat /etc/passwd | grep ${USER_NAME} >& /dev/null if [ $? -ne 0 ];then echo "Add user: ${USER_NAME}..." adduser -S -H -G root ${USER_NAME} -u ${USER_ID} echo ${USER_NAME}:${PASSWORD} | chpasswd &> /dev/null else echo "User: ${USER_NAME} already exists!" fi mkdir -p ${MOUNT_POINT} chown -R ${USER_NAME}:root ${MOUNT_POINT} cat /etc/afp.conf | grep "${USER_NAME}" >& /dev/null if [ $? -ne 0 ];then echo "Update /etc/afp.conf..." cat << EOF >> /etc/afp.conf [${USER_NAME}] valid users = ${USER_NAME} path = ${MOUNT_POINT} time machine = yes vol size limit = ${VOL_SIZE_MB} EOF else echo "afp.conf already modify!" fi echo "Starting..." if [ -e /var/run/dbus.pid ]; then rm -f /var/run/dbus.pid fi if [ -e /var/run/dbus/system_bus_socket ]; then rm -f /var/run/dbus/system_bus_socket fi dbus-daemon --system runsvdir /etc/runit/services ================================================ FILE: time-machine/services/avahi/run ================================================ #!/bin/bash avahi-daemon ================================================ FILE: time-machine/services/netatalk/run ================================================ #!/bin/bash rm -f /var/lock/netatalk netatalk -d ================================================ FILE: tor/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " ARG TZ='Asia/Shanghai' ENV TZ ${TZ} RUN apk upgrade --update \ && apk add tor privoxy bash tzdata su-exec \ && ln -sf /dev/stdout /var/log/tor/notices.log \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo "${TZ}" > /etc/timezone \ && rm -rf /var/cache/apk/* COPY torrc /etc/tor/torrc COPY entrypoint.sh /entrypoint.sh CMD ["/entrypoint.sh"] ================================================ FILE: tor/README.md ================================================ ## Alpine Tor > 这是一个匿名代理工具 Tor 的 docker 镜像,基于 alpine 制作 ### Tor 使用 启动 Tor 匿名代理 ``` sh docker run -d --name tor -p 9100:9100 mritd/tor ``` **默认情况下 Tor 监听在 `0.0.0.0:9100` 端口上,若想通过 Tor 代理访问网络,请将浏览器代理设置到 `socks5://TOR-DOCKER-IP:9100` 即可** ### Tor 相关设置 **该 Tor 镜像默认配置文件位于 `/etc/tor/torrc`,该文件中定义了 Tor 日志位置、代理模式、前置代理、排除节点等相关设置,一些相关选项如下:** - ExcludeNodes: 排除不可信节点,防止 Tor 蜜罐,目前预设 `{cn},{hk},{mo}` (中国、香港、澳门),其他国家请自行查阅 - HTTPProxy: Tor 前置 HTTP 代理,众所周知的原因国内 Tor 节点无法连接,所以如需设置前置代理请修改此项 - HTTPSProxy: Tor 前置 HTTPS 代理,作用同上 - Socks5Proxy: Tor 前置 Socks5 代理,作用同上 ================================================ FILE: tor/entrypoint.sh ================================================ #!/bin/bash chown tor:nogroup /var/log/tor/notices.log su-exec tor tor ================================================ FILE: tor/torrc ================================================ ## Configuration file for a typical Tor user ## Last updated 22 September 2015 for Tor 0.2.7.3-alpha. ## (may or may not work for much older or much newer versions of Tor.) ## ## Lines that begin with "## " try to explain what's going on. Lines ## that begin with just "#" are disabled commands: you can enable them ## by removing the "#" symbol. ## ## See 'man tor', or https://www.torproject.org/docs/tor-manual.html, ## for more options you can use in this file. ## ## Tor will look for this file in various places based on your platform: ## https://www.torproject.org/docs/faq#torrc ## Tor opens a SOCKS proxy on port 9050 by default -- even if you don't ## configure one below. Set "SOCKSPort 0" if you plan to run Tor only ## as a relay, and not make any local application connections yourself. #SOCKSPort 9050 # Default: Bind to localhost:9050 for local connections. SOCKSPort 0.0.0.0:9100 # Bind to this address:port too. ## Entry policies to allow/deny SOCKS requests based on IP address. ## First entry that matches wins. If no SOCKSPolicy is set, we accept ## all (and only) requests that reach a SOCKSPort. Untrusted users who ## can access your SOCKSPort may be able to learn about the connections ## you make. #SOCKSPolicy accept 192.168.0.0/16 #SOCKSPolicy accept6 FC00::/7 #SOCKSPolicy reject * ## Logs go to stdout at level "notice" unless redirected by something ## else, like one of the below lines. You can have as many Log lines as ## you want. ## ## We advise using "notice" in most cases, since anything more verbose ## may provide sensitive information to an attacker who obtains the logs. ## ## Send all messages of level 'notice' or higher to /var/log/tor/notices.log Log notice file /var/log/tor/notices.log ## Send every possible message to /var/log/tor/debug.log #Log debug file /var/log/tor/debug.log ## Use the system log instead of Tor's logfiles #Log notice syslog ## To send all messages to stderr: #Log debug stderr ## The directory for keeping all the keys/etc. By default, we store ## things in $HOME/.tor on Unix, and in Application Data\tor on Windows. DataDirectory /var/lib/tor ## The port on which Tor will listen for local connections from Tor ## controller applications, as documented in control-spec.txt. #ControlPort 9051 ## If you enable the controlport, be sure to enable one of these ## authentication methods, to prevent attackers from accessing it. #HashedControlPassword 16:872860B76453A77D60CA2BB8C1A7042072093276A3D701AD684053EC4C #CookieAuthentication 1 ############### This section is just for location-hidden services ### ## Once you have configured a hidden service, you can look at the ## contents of the file ".../hidden_service/hostname" for the address ## to tell people. ## ## HiddenServicePort x y:z says to redirect requests on port x to the ## address y:z. #HiddenServiceDir /var/lib/tor/hidden_service/ #HiddenServicePort 80 127.0.0.1:80 #HiddenServiceDir /var/lib/tor/other_hidden_service/ #HiddenServicePort 80 127.0.0.1:80 #HiddenServicePort 22 127.0.0.1:22 ################ This section is just for relays ##################### # ## See https://www.torproject.org/docs/tor-doc-relay for details. ## Required: what port to advertise for incoming Tor connections. #ORPort 9001 ## If you want to listen on a port other than the one advertised in ## ORPort (e.g. to advertise 443 but bind to 9090), you can do it as ## follows. You'll need to do ipchains or other port forwarding ## yourself to make this work. #ORPort 443 NoListen #ORPort 127.0.0.1:9090 NoAdvertise ## The IP address or full DNS name for incoming connections to your ## relay. Leave commented out and Tor will guess. #Address noname.example.com ## If you have multiple network interfaces, you can specify one for ## outgoing traffic to use. ## OutboundBindAddressExit will be used for all exit traffic, while ## OutboundBindAddressOR will be used for all other connections. ## If you do not wish to differentiate, use OutboundBindAddress to ## specify the same address for both in a single line. #OutboundBindAddressExit 10.0.0.4 #OutboundBindAddressOR 10.0.0.5 ## A handle for your relay, so people don't have to refer to it by key. ## Nicknames must be between 1 and 19 characters inclusive, and must ## contain only the characters [a-zA-Z0-9]. #Nickname ididnteditheconfig ## Define these to limit how much relayed traffic you will allow. Your ## own traffic is still unthrottled. Note that RelayBandwidthRate must ## be at least 75 kilobytes per second. ## Note that units for these config options are bytes (per second), not ## bits (per second), and that prefixes are binary prefixes, i.e. 2^10, ## 2^20, etc. #RelayBandwidthRate 100 KBytes # Throttle traffic to 100KB/s (800Kbps) #RelayBandwidthBurst 200 KBytes # But allow bursts up to 200KB (1600Kb) ## Use these to restrict the maximum traffic per day, week, or month. ## Note that this threshold applies separately to sent and received bytes, ## not to their sum: setting "40 GB" may allow up to 80 GB total before ## hibernating. ## ## Set a maximum of 40 gigabytes each way per period. #AccountingMax 40 GBytes ## Each period starts daily at midnight (AccountingMax is per day) #AccountingStart day 00:00 ## Each period starts on the 3rd of the month at 15:00 (AccountingMax ## is per month) #AccountingStart month 3 15:00 ## Administrative contact information for this relay or bridge. This line ## can be used to contact you if your relay or bridge is misconfigured or ## something else goes wrong. Note that we archive and publish all ## descriptors containing these lines and that Google indexes them, so ## spammers might also collect them. You may want to obscure the fact that ## it's an email address and/or generate a new address for this purpose. #ContactInfo Random Person ## You might also include your PGP or GPG fingerprint if you have one: #ContactInfo 0xFFFFFFFF Random Person ## Uncomment this to mirror directory information for others. Please do ## if you have enough bandwidth. #DirPort 9030 # what port to advertise for directory connections ## If you want to listen on a port other than the one advertised in ## DirPort (e.g. to advertise 80 but bind to 9091), you can do it as ## follows. below too. You'll need to do ipchains or other port ## forwarding yourself to make this work. #DirPort 80 NoListen #DirPort 127.0.0.1:9091 NoAdvertise ## Uncomment to return an arbitrary blob of html on your DirPort. Now you ## can explain what Tor is if anybody wonders why your IP address is ## contacting them. See contrib/tor-exit-notice.html in Tor's source ## distribution for a sample. #DirPortFrontPage /etc/tor/tor-exit-notice.html ## Uncomment this if you run more than one Tor relay, and add the identity ## key fingerprint of each Tor relay you control, even if they're on ## different networks. You declare it here so Tor clients can avoid ## using more than one of your relays in a single circuit. See ## https://www.torproject.org/docs/faq#MultipleRelays ## However, you should never include a bridge's fingerprint here, as it would ## break its concealability and potentially reveal its IP/TCP address. #MyFamily $keyid,$keyid,... ## A comma-separated list of exit policies. They're considered first ## to last, and the first match wins. ## ## If you want to allow the same ports on IPv4 and IPv6, write your rules ## using accept/reject *. If you want to allow different ports on IPv4 and ## IPv6, write your IPv6 rules using accept6/reject6 *6, and your IPv4 rules ## using accept/reject *4. ## ## If you want to _replace_ the default exit policy, end this with either a ## reject *:* or an accept *:*. Otherwise, you're _augmenting_ (prepending to) ## the default exit policy. Leave commented to just use the default, which is ## described in the man page or at ## https://www.torproject.org/documentation.html ## ## Look at https://www.torproject.org/faq-abuse.html#TypicalAbuses ## for issues you might encounter if you use the default exit policy. ## ## If certain IPs and ports are blocked externally, e.g. by your firewall, ## you should update your exit policy to reflect this -- otherwise Tor ## users will be told that those destinations are down. ## ## For security, by default Tor rejects connections to private (local) ## networks, including to the configured primary public IPv4 and IPv6 addresses, ## and any public IPv4 and IPv6 addresses on any interface on the relay. ## See the man page entry for ExitPolicyRejectPrivate if you want to allow ## "exit enclaving". ## #ExitPolicy accept *:6660-6667,reject *:* # allow irc ports on IPv4 and IPv6 but no more #ExitPolicy accept *:119 # accept nntp ports on IPv4 and IPv6 as well as default exit policy #ExitPolicy accept *4:119 # accept nntp ports on IPv4 only as well as default exit policy #ExitPolicy accept6 *6:119 # accept nntp ports on IPv6 only as well as default exit policy #ExitPolicy reject *:* # no exits allowed ## Bridge relays (or "bridges") are Tor relays that aren't listed in the ## main directory. Since there is no complete public list of them, even an ## ISP that filters connections to all the known Tor relays probably ## won't be able to block all the bridges. Also, websites won't treat you ## differently because they won't know you're running Tor. If you can ## be a real relay, please do; but if not, be a bridge! #BridgeRelay 1 ## By default, Tor will advertise your bridge to users through various ## mechanisms like https://bridges.torproject.org/. If you want to run ## a private bridge, for example because you'll give out your bridge ## address manually to your friends, uncomment this line: #PublishServerDescriptor 0 ExcludeNodes {cn},{hk},{mo} StrictNodes 1 #HTTPProxy 192.168.1.1:8080 #HTTPSProxy 192.168.1.1:8080 #Socks5Proxy 192.168.1.1:1080 ================================================ FILE: twemproxy/Dockerfile ================================================ FROM alpine:3.10 ENV TWEMPROXY_VERSION 0.4.1 ENV TWEMPROXY_CONFIG_DIR /etc/twemproxy ENV TWEMPROXY_DOWNLOAD_URL https://github.com/twitter/twemproxy/archive/v${TWEMPROXY_VERSION}.tar.gz RUN apk upgrade --update \ && apk add libtool build-base make automake autoconf wget ca-certificates \ && wget ${TWEMPROXY_DOWNLOAD_URL} -O twemproxy.tar.gz \ && tar -zxvf twemproxy.tar.gz \ && (cd twemproxy-${TWEMPROXY_VERSION} \ && autoreconf -fvi \ && ./configure --prefix=/ \ && make -j2 \ && make install) FROM alpine:3.10 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV TWEMPROXY_VERSION 0.4.1 ENV TWEMPROXY_CONFIG_DIR /etc/twemproxy RUN apk upgrade --update \ && apk add bash tzdata \ && mkdir ${TWEMPROXY_CONFIG_DIR} \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* COPY --from=0 /sbin/nutcracker /sbin/ COPY config.yml ${TWEMPROXY_CONFIG_DIR} COPY entrypoint.sh /entrypoint.sh CMD ["/entrypoint.sh"] ================================================ FILE: twemproxy/config.yml ================================================ alpha: listen: 127.0.0.1:22121 hash: fnv1a_64 distribution: ketama auto_eject_hosts: true redis: true server_retry_timeout: 2000 server_failure_limit: 1 servers: - 127.0.0.1:6379:1 beta: listen: 127.0.0.1:22122 hash: fnv1a_64 hash_tag: "{}" distribution: ketama auto_eject_hosts: false timeout: 400 redis: true servers: - 127.0.0.1:6380:1 server1 - 127.0.0.1:6381:1 server2 - 127.0.0.1:6382:1 server3 - 127.0.0.1:6383:1 server4 gamma: listen: 127.0.0.1:22123 hash: fnv1a_64 distribution: ketama timeout: 400 backlog: 1024 preconnect: true auto_eject_hosts: true server_retry_timeout: 2000 server_failure_limit: 3 servers: - 127.0.0.1:11212:1 - 127.0.0.1:11213:1 delta: listen: 127.0.0.1:22124 hash: fnv1a_64 distribution: ketama timeout: 100 auto_eject_hosts: true server_retry_timeout: 2000 server_failure_limit: 1 servers: - 127.0.0.1:11214:1 - 127.0.0.1:11215:1 - 127.0.0.1:11216:1 - 127.0.0.1:11217:1 - 127.0.0.1:11218:1 - 127.0.0.1:11219:1 - 127.0.0.1:11220:1 - 127.0.0.1:11221:1 - 127.0.0.1:11222:1 - 127.0.0.1:11223:1 omega: listen: /tmp/gamma hash: hsieh distribution: ketama auto_eject_hosts: false servers: - 127.0.0.1:11214:100000 - 127.0.0.1:11215:1 ================================================ FILE: twemproxy/entrypoint.sh ================================================ #!/bin/bash set -e if [ -z "$1" ]; then exec nutcracker -c ${TWEMPROXY_CONFIG_DIR}/config.yml else exec "$@" fi ================================================ FILE: upsource/Dockerfile ================================================ FROM mritd/alpine-glibc:3.5 LABEL maintainer="mritd " ENV TZ 'Asia/Shanghai' ENV UPSOURCE_VERSION 3.5.3616 RUN apk upgrade --no-cache \ && apk add --no-cache bash tzdata wget ca-certificates openjdk8-jre \ && wget https://download.jetbrains.com/upsource/upsource-${UPSOURCE_VERSION}.zip \ && unzip upsource-${UPSOURCE_VERSION}.zip \ && rm -f upsource-${UPSOURCE_VERSION}.zip \ && mkdir -p /data/{backups,data,logs,tmp} \ && /upsource-${UPSOURCE_VERSION}/bin/upsource.sh configure \ --backups-dir /data/backups \ --data-dir /data/data \ --logs-dir /data/logs \ --temp-dir /data/tmp \ --listen-port 8080 \ && ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo "Asia/Shanghai" > /etc/timezone \ && apk del wget \ && rm -rf /var/cache/apk/* WORKDIR /upsource-${UPSOURCE_VERSION}/bin EXPOSE 8080 VOLUME /data CMD ["./upsource.sh","run"] ================================================ FILE: v2ray/Dockerfile ================================================ FROM alpine:3.8 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV V2RAY_VERSION v3.29 ENV V2RAY_LOG_DIR /var/log/v2ray ENV V2RAY_CONFIG_DIR /etc/v2ray/ ENV V2RAY_DOWNLOAD_URL https://github.com/v2ray/v2ray-core/releases/download/${V2RAY_VERSION}/v2ray-linux-64.zip RUN apk upgrade --update \ && apk add \ bash \ tzdata \ curl \ && mkdir -p \ ${V2RAY_LOG_DIR} \ ${V2RAY_CONFIG_DIR} \ /tmp/v2ray \ && curl -L -H "Cache-Control: no-cache" -o /tmp/v2ray/v2ray.zip ${V2RAY_DOWNLOAD_URL} \ && unzip /tmp/v2ray/v2ray.zip -d /tmp/v2ray/ \ && mv /tmp/v2ray/v2ray-${V2RAY_VERSION}-linux-64/v2ray /usr/bin \ && mv /tmp/v2ray/v2ray-${V2RAY_VERSION}-linux-64/vpoint_vmess_freedom.json /etc/v2ray/config.json \ && chmod +x /usr/bin/v2ray \ && apk del curl \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /tmp/v2ray /var/cache/apk/* ADD entrypoint.sh /entrypoint.sh ENTRYPOINT ["/entrypoint.sh"] ================================================ FILE: v2ray/README.md ================================================ ## v2ray 最新版本 [![](https://images.microbadger.com/badges/version/mritd/v2ray.svg)](https://microbadger.com/images/mritd/v2ray "Get your own version badge on microbadger.com") [![](https://images.microbadger.com/badges/image/mritd/v2ray.svg)](https://microbadger.com/images/mritd/v2ray "Get your own image badge on microbadger.com") > 截至目前该镜像为 v2ray 3.29 版本 ### 打开姿势 ``` sh docker pull mritd/v2ray docker run -dt --name v2ray -p 10086:10086 mritd/v2ray ``` **Container 默认监听 10086 端口** **v2ray 默认 ID 为 `23ad6b10-8d1a-40f7-8ad0-e3e35cd38297`(不保证后期变动)** ### 自定义配置 **镜像支持写入自定义的 v2ray 配置,挂载覆盖 `/etc/v2ray/config.json` 或使用 `-c` 选项并跟上 JSON 字符串即可,如下所示** ``` sh docker run -dt --name v2ray mritd/v2ray -c "{\"log\" : { \"access\": \"/var/log/v2ray/access.log\", \"error\": \"/var/log/v2ray/error.log\", \"loglevel\": \"warning\" }, \"inbound\": { \"port\": 4500, \"protocol\": \"vmess\", \"settings\": { \"clients\": [ { \"id\": \"23ad6b10-8d1a-40f7-8ad0-e3e35cd38297\", \"level\": 1, \"alterId\": 64 } ] } }, \"outbound\": { \"protocol\": \"freedom\", \"settings\": {} }, \"outboundDetour\": [ { \"protocol\": \"blackhole\", \"settings\": {}, \"tag\": \"blocked\" } ], \"routing\": { \"strategy\": \"rules\", \"settings\": { \"rules\": [ { \"type\": \"field\", \"ip\": [ \"0.0.0.0/8\", \"10.0.0.0/8\", \"100.64.0.0/10\", \"127.0.0.0/8\", \"169.254.0.0/16\", \"172.16.0.0/12\", \"192.0.0.0/24\", \"192.0.2.0/24\", \"192.168.0.0/16\", \"198.18.0.0/15\", \"198.51.100.0/24\", \"203.0.113.0/24\", \"::1/128\", \"fc00::/7\", \"fe80::/10\" ], \"outboundTag\": \"blocked\" } ] } }, \"transport\": { \"kcpSettings\": { \"uplinkCapacity\": 10, \"downlinkCapacity\": 10 } } }" ``` **`-c` 选项后面的参数就是改好的配置文件中的 JSON 字符串** **实际上对于怎么处理那个 JSON 中引号懵逼的朋友可以借助 JSON 在线转换工具 [http://www.bejson.com/zhuanyi/](http://www.bejson.com/zhuanyi/) 完成 JSON 字符串转换** **也就是说先改好配置,然后将 JSON 复制到上面的网站,选择压缩并转义转换一下, 最后将压缩并转义后的内容拼接在 `-c` 选项后即可** **注意: 网站转换完的两边没有双引号,也就是说要 `-c "粘贴压缩并转义后的内容"`** ### 样例配置 **镜像使用官方的样例配置,来源于官方发布包,可执行以下命令获取样例配置** **具体修改设置请参考 [官方文档配置部分](https://www.v2ray.com/chapter_02/)** ``` sh docker run --rm mritd/v2ray "cat /etc/v2ray/config.json" > config.json ``` ================================================ FILE: v2ray/entrypoint.sh ================================================ #!/bin/bash CMD=$1 CONFIG=$2 if [ "$CONFIG" != "" ] && [ "$CMD" == "-c" ]; then echo "$CONFIG" > /etc/v2ray/config.json echo -e "\033[32mUse a custom configuration...\033[0m" fi if [ "$CMD" != "" ] && [ "$CMD" != "-c" ]; then $* else v2ray -config /etc/v2ray/config.json fi ================================================ FILE: videovip/Dockerfile ================================================ FROM nginx:1.13.6-alpine LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} RUN apk upgrade --update \ && apk add bash tzdata \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && rm -rf /var/cache/apk/* COPY vip /usr/share/nginx/html CMD ["nginx", "-g", "daemon off;"] ================================================ FILE: videovip/vip/index.html ================================================ 尊享VIP电影
================================================ FILE: yearning/Dockerfile ================================================ FROM alpine:3.12 LABEL maintainer="mritd " ARG TZ="Asia/Shanghai" ENV TZ ${TZ} ENV INSTALL_DIR /opt/yearning ENV DOWNLOAD_URL https://github.com/cookieY/Yearning/releases/download/v2.2.2/Yearning-2.2.2-4kstars.linux-amd64-patch-1.zip WORKDIR ${INSTALL_DIR} RUN set -ex \ && apk upgrade \ && apk add bash tzdata ca-certificates libc6-compat wget unzip \ && wget ${DOWNLOAD_URL} -O yearning.zip \ && unzip yearning.zip \ && mv Yearning-go/Yearning ./yearning \ && mv Yearning-go/dist ./dist \ && mv Yearning-go/conf.toml ./conf.toml \ && chmod +x ./yearning \ && ln -sf /usr/share/zoneinfo/${TZ} /etc/localtime \ && echo ${TZ} > /etc/timezone \ && apk del wget unzip \ && rm -rf Yearning-go yearning.zip __MACOSX /var/cache/apk/* EXPOSE 8000 ENTRYPOINT ["/opt/yearning/yearning"] CMD ["-m","-s"] ================================================ FILE: zeroclipboard/Dockerfile ================================================ FROM alpine:3.10 LABEL maintainer="mritd " COPY Gemfile Gemfile RUN apk upgrade --update \ && apk add bash build-base libffi zlib libxml2 \ libxslt ruby ruby-io-console ruby-json yaml \ nodejs git perl tzdata \ && apk add --no-cache --virtual .build-deps \ build-base libffi-dev zlib-dev libxml2-dev \ libxslt-dev ruby-dev \ && git clone -b gh-pages https://github.com/zeroclipboard/zeroclipboard.org.git /root/zeroclipboard.org \ && echo 'gem: --no-document' >> ~/.gemrc \ && cp ~/.gemrc /etc/gemrc \ && chmod uog+r /etc/gemrc \ && echo "gem 'ffi','1.9.18'" >> /root/zeroclipboard.org/Gemfile \ && echo "gem 'posix-spawn','0.3.13'" >> /root/zeroclipboard.org/Gemfile \ && gem install bundler \ && bundle config build.jekyll --no-rdoc \ && bundle install \ && cd /root/zeroclipboard.org \ && rm -f Gemfile.lock \ && bundle install \ && ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ && echo "Asia/Shanghai" > /etc/timezone \ && apk del .build-deps \ && rm -f /Gemfile* \ && rm -rf /var/cache/apk/* \ && rm -rf /usr/lib/lib/ruby/gems/*/cache/* \ && rm -rf ~/.gem WORKDIR /root/zeroclipboard.org CMD ["jekyll","serve","-H","0.0.0.0"] ================================================ FILE: zeroclipboard/Gemfile ================================================ source 'https://rubygems.org' require 'json' require 'open-uri' versions = JSON.parse(open('https://pages.github.com/versions.json').read) gem 'jekyll', versions['jekyll'] gem 'jekyll-sass-converter', versions['jekyll-sass-converter'] gem 'kramdown', versions['kramdown'] gem 'liquid', versions['liquid'] gem 'rouge', versions['rouge'] gem 'jemoji', versions['jemoji'] gem 'jekyll-mentions', versions['jekyll-mentions'] gem 'jekyll-redirect-from', versions['jekyll-redirect-from'] gem 'jekyll-sitemap', versions['jmekyll-sitemap'] gem 'jekyll-feed', versions['jekyll-feed'] gem 'jekyll-gist', versions['jekyll-gist'] gem 'jekyll-paginate', versions['jekyll-paginate'] gem 'github-pages-health-check', versions['github-pages-health-check'] gem 'jekyll-coffeescript', versions['jekyll-coffeescript'] gem 'jekyll-seo-tag', versions['jekyll-seo-tag'] gem 'github-pages', versions['github-pages'] gem 'jekyll-github-metadata', versions['jekyll-github-metadata'] gem 'html-pipeline', versions['html-pipeline'] gem 'listen', versions['listen'] gem 'sass', versions['sass'] gem 'safe_yaml', versions['safe_yaml'] gem 'html-proofer'