compose实例¶
Docker Compose examples: https://github.com/Haxxnet/Compose-Examples
ethereum实例¶
实例:
1# Build Geth in a stock Go builder container
2FROM golang:1.10-alpine as builder
3
4RUN apk add --no-cache make gcc musl-dev linux-headers
5
6ADD . /go-ethereum
7RUN cd /go-ethereum && make geth
8
9# Pull Geth into a second stage deploy alpine container
10FROM alpine:latest
11
12RUN apk add --no-cache ca-certificates
13COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
14
15EXPOSE 8545 8546 30303 30303/udp 30304/udp
16ENTRYPOINT ["geth"]
Docker工具-netshoot¶
a Docker + Kubernetes network trouble-shooting swiss-army container: https://github.com/nicolaka/netshoot
1FROM debian:stable-slim as fetcher
2COPY build/fetch_binaries.sh /tmp/fetch_binaries.sh
3
4RUN apt-get update && apt-get install -y \
5 curl \
6 wget
7
8RUN /tmp/fetch_binaries.sh
9
10FROM alpine:3.18.0
11
12RUN set -ex \
13 && echo "http://dl-cdn.alpinelinux.org/alpine/edge/main" >> /etc/apk/repositories \
14 && echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories \
15 && echo "http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories \
16 && apk update \
17 && apk upgrade \
18 && apk add --no-cache \
19 apache2-utils \
20 bash \
21 bind-tools \
22 bird \
23 bridge-utils \
24 busybox-extras \
25 conntrack-tools \
26 curl \
27 dhcping \
28 drill \
29 ethtool \
30 file\
31 fping \
32 iftop \
33 iperf \
34 iperf3 \
35 iproute2 \
36 ipset \
37 iptables \
38 iptraf-ng \
39 iputils \
40 ipvsadm \
41 httpie \
42 jq \
43 libc6-compat \
44 liboping \
45 ltrace \
46 mtr \
47 net-snmp-tools \
48 netcat-openbsd \
49 nftables \
50 ngrep \
51 nmap \
52 nmap-nping \
53 nmap-scripts \
54 openssl \
55 py3-pip \
56 py3-setuptools \
57 scapy \
58 socat \
59 speedtest-cli \
60 openssh \
61 oh-my-zsh \
62 strace \
63 tcpdump \
64 tcptraceroute \
65 tshark \
66 util-linux \
67 vim \
68 git \
69 zsh \
70 websocat \
71 swaks \
72 perl-crypt-ssleay \
73 perl-net-ssleay
74
75# Installing ctop - top-like container monitor
76COPY --from=fetcher /tmp/ctop /usr/local/bin/ctop
77
78# Installing calicoctl
79COPY --from=fetcher /tmp/calicoctl /usr/local/bin/calicoctl
80
81# Installing termshark
82COPY --from=fetcher /tmp/termshark /usr/local/bin/termshark
83
84# Installing grpcurl
85COPY --from=fetcher /tmp/grpcurl /usr/local/bin/grpcurl
86
87# Installing fortio
88COPY --from=fetcher /tmp/fortio /usr/local/bin/fortio
89
90# Setting User and Home
91USER root
92WORKDIR /root
93ENV HOSTNAME netshoot
94
95# ZSH Themes
96RUN curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh | sh
97RUN git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
98RUN git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k
99COPY zshrc .zshrc
100COPY motd motd
101
102# Fix permissions for OpenShift and tshark
103RUN chmod -R g=u /root
104RUN chown root:root /usr/bin/dumpcap
105
106# Running ZSH
107CMD ["zsh"]
guoxudongdocker/kubectl¶
FROM alpine
LABEL maintainer="sunnydog0826@gmail.com"
ENV KUBE_LATEST_VERSION="v1.14.1"
RUN apk add --update ca-certificates \
&& apk add --update -t deps curl \
&& curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
&& chmod +x /usr/local/bin/kubectl \
&& curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/v2.0.3/kustomize_2.0.3_linux_amd64 -o /usr/local/bin/kustomize \
&& chmod +x /usr/local/bin/kustomize \
&& curl -L https://dl.bintray.com/flant/kubedog/v0.2.0/kubedog-linux-amd64-v0.2.0 -o /usr/local/bin/kubedog \
&& chmod +x /usr/local/bin/kubedog \
&& apk del --purge deps \
&& rm /var/cache/apk/*
WORKDIR /root
ENTRYPOINT ["kubectl"]
CMD ["help"]
# syntax = docker/dockerfile:experimental
#
# NOTE: To build this you will need a docker version > 18.06 with
# experimental enabled and DOCKER_BUILDKIT=1
#
# If you do not use buildkit you are not going to have a good time
#
# For reference:
# https://docs.docker.com/develop/develop-images/build_enhancements/
# From: https://github.com/pytorch/pytorch/blob/main/Dockerfile
ARG BASE_IMAGE=ubuntu:20.04
ARG PYTHON_VERSION=3.8
FROM ${BASE_IMAGE} as dev-base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
cmake \
curl \
git \
libjpeg-dev \
libpng-dev && \
rm -rf /var/lib/apt/lists/*
RUN /usr/sbin/update-ccache-symlinks
RUN mkdir /opt/ccache && ccache --set-config=cache_dir=/opt/ccache
ENV PATH /opt/conda/bin:$PATH
FROM dev-base as conda
ARG PYTHON_VERSION=3.8
# Automatically set by buildx
ARG TARGETPLATFORM
# translating Docker's TARGETPLATFORM into miniconda arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MINICONDA_ARCH=aarch64 ;; \
*) MINICONDA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh"
COPY requirements.txt .
# Manually invoke bash on miniconda script per https://github.com/conda/conda/issues/10431
RUN chmod +x ~/miniconda.sh && \
bash ~/miniconda.sh -b -p /opt/conda && \
rm ~/miniconda.sh && \
/opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \
/opt/conda/bin/python -mpip install -r requirements.txt && \
/opt/conda/bin/conda clean -ya
FROM dev-base as submodule-update
WORKDIR /opt/pytorch
COPY . .
RUN git submodule update --init --recursive
FROM conda as build
ARG CMAKE_VARS
WORKDIR /opt/pytorch
COPY --from=conda /opt/conda /opt/conda
COPY --from=submodule-update /opt/pytorch /opt/pytorch
RUN make triton
RUN --mount=type=cache,target=/opt/ccache \
export eval ${CMAKE_VARS} && \
TORCH_CUDA_ARCH_LIST="3.5 5.2 6.0 6.1 7.0+PTX 8.0" TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \
python setup.py install
FROM conda as conda-installs
ARG PYTHON_VERSION=3.8
ARG CUDA_VERSION=12.1
ARG CUDA_CHANNEL=nvidia
ARG INSTALL_CHANNEL=pytorch-nightly
# Automatically set by buildx
# Note conda needs to be pinned to 23.5.2 see: https://github.com/pytorch/pytorch/issues/106470
RUN /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -y python=${PYTHON_VERSION} conda=23.5.2
ARG TARGETPLATFORM
# On arm64 we can only install wheel packages.
RUN case ${TARGETPLATFORM} in \
"linux/arm64") pip install --extra-index-url https://download.pytorch.org/whl/cpu/ torch torchvision torchaudio ;; \
*) /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" pytorch torchvision torchaudio "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \
esac && \
/opt/conda/bin/conda clean -ya
RUN /opt/conda/bin/pip install torchelastic
FROM ${BASE_IMAGE} as official
ARG PYTORCH_VERSION
ARG TRITON_VERSION
ARG TARGETPLATFORM
ARG CUDA_VERSION
LABEL com.nvidia.volumes.needed="nvidia_driver"
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
ca-certificates \
libjpeg-dev \
libpng-dev \
&& rm -rf /var/lib/apt/lists/*
COPY --from=conda-installs /opt/conda /opt/conda
RUN if test -n "${TRITON_VERSION}" -a "${TARGETPLATFORM}" != "linux/arm64"; then \
DEBIAN_FRONTEND=noninteractive apt install -y --no-install-recommends gcc; \
rm -rf /var/lib/apt/lists/*; \
fi
ENV PATH /opt/conda/bin:$PATH
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH
ENV PYTORCH_VERSION ${PYTORCH_VERSION}
WORKDIR /workspace
FROM official as dev
# Should override the already installed version from the official-image stage
COPY --from=build /opt/conda /opt/conda
LLamaFactory¶
docker 命令:
docker build -f ./docker/docker-cuda/Dockerfile \
--build-arg INSTALL_BNB=false \
--build-arg INSTALL_VLLM=false \
--build-arg INSTALL_DEEPSPEED=false \
--build-arg INSTALL_FLASHATTN=false \
--build-arg PIP_INDEX=https://pypi.org/simple \
-t llamafactory:latest .
docker run -dit --gpus=all \
-v ./hf_cache:/root/.cache/huggingface \
-v ./ms_cache:/root/.cache/modelscope \
-v ./data:/app/data \
-v ./output:/app/output \
-p 7860:7860 \
-p 8000:8000 \
--shm-size 16G \
--name llamafactory \
llamafactory:latest
docker exec -it llamafactory bash
docker-compose 命令:
docker compose up -d
docker compose exec llamafactory bash
# Use the NVIDIA official image with PyTorch 2.3.0
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-02.html
FROM nvcr.io/nvidia/pytorch:24.02-py3
# Define environments
ENV MAX_JOBS=4
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
# Define installation arguments
ARG INSTALL_BNB=false
ARG INSTALL_VLLM=false
ARG INSTALL_DEEPSPEED=false
ARG INSTALL_FLASHATTN=false
ARG PIP_INDEX=https://mirrors.aliyun.com/pypi/simple
# Set the working directory
WORKDIR /app
# Install the requirements
COPY requirements.txt /app
RUN pip config set global.index-url "$PIP_INDEX" && \
pip config set global.extra-index-url "$PIP_INDEX" && \
python -m pip install --upgrade pip && \
python -m pip install -r requirements.txt
# Copy the rest of the application into the image
COPY . /app
# Install the LLaMA Factory
RUN EXTRA_PACKAGES="metrics"; \
if [ "$INSTALL_BNB" == "true" ]; then \
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
fi; \
if [ "$INSTALL_VLLM" == "true" ]; then \
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
fi; \
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
fi; \
pip install -e ".[$EXTRA_PACKAGES]"
# Rebuild flash attention
RUN pip uninstall -y transformer-engine flash-attn && \
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
pip uninstall -y ninja && pip install ninja && \
pip install --no-cache-dir flash-attn --no-build-isolation; \
fi
# Set up volumes
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
# Expose port 7860 for the LLaMA Board
ENV GRADIO_SERVER_PORT 7860
EXPOSE 7860
# Expose port 8000 for the API service
ENV API_PORT 8000
EXPOSE 8000
services:
llamafactory:
build:
dockerfile: ./docker/docker-cuda/Dockerfile
context: ../..
args:
INSTALL_BNB: false
INSTALL_VLLM: false
INSTALL_DEEPSPEED: false
INSTALL_FLASHATTN: false
PIP_INDEX: https://pypi.org/simple
container_name: llamafactory
volumes:
- ../../hf_cache:/root/.cache/huggingface
- ../../ms_cache:/root/.cache/modelscope
- ../../data:/app/data
- ../../output:/app/output
ports:
- "7860:7860"
- "8000:8000"
ipc: host
tty: true
stdin_open: true
command: bash
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: "all"
capabilities: [gpu]
restart: unless-stopped