sudo apt-get update
    sudo apt-get install -y \
        ca-certificates \
        curl \
        gnupg \
    # Add docker's official GPG key
    sudo mkdir -p /etc/apt/keyrings
    curl -fsSL | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
    echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] $(lsb_release -cs) \
    stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
    sudo apt-get update
    sudo apt-get install -y docker-ce docker-ce-cli docker-compose-plugin

    sudo usermod -aG docker ubuntu



Docker run params

-d: Run as a daemonized process
--rm: When a container stops, it’s deleted
-i: Use STDIN
-t: Allocates sudo terminal to interact with the terminal
--add-host 'hostname:ip address': Add hostname and ip address to /etc/hosts in running container


Restart stopped docker container

docker start -ai <container name>

Push an image to the Docker repo

docker push <your name>/<image name>

Pull an image from the Docker repo

docker pull <name of image>

Build docker image

Run this command from the same directory as your Dockerfile.

docker build . -t $NAME/$IMAGE_NAME

Rebuild all cached layers (useful when debugging):

docker build . -t $NAME/$IMAGE_NAME --no-cache



If you want to start debugging a Dockerfile you’re working on, do the following:

  1. Add this to the bottom of your Dockerfile:

    ENTRYPOINT tail -F /etc/passwd
  2. Build and run the file:

    docker build --squash -t <your name>/<image name> && docker run -d --name=<container name> <your name>/<image name> watch "echo 'test' >> /var/log/test.log"
  3. Attach to it:

    docker exec -it <name of running container> bash ##/bin/sh for alpine

Makefile template

 docker build . -t <your name>/<image name>

 docker run -d --name=<container name> --rm -it <your name>/<image name> watch "echo 'test' >> /var/log/test.log"

 docker stop <container name>


Export container to tar

docker save <container name or id> > <name of tar>.tar

Import docker tar

docker load < <name of tar>.tar

Stop & Remove docker containers

docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)

Remove docker image

docker rmi <image name or id>

Get shell on running docker container

docker exec -it <container name or id> /bin/bash

Run multiple commands

docker run -d --name=<container name> <your name>/<image name> /bin/bash -c "cd /path/to/somewhere; python"

Essentially, use /bin/bash -c "commands" and separate each command with a semicolon.


Start container and get shell (will stop container if you stop or exit terminal)

docker start -ai <container name or id>

Start container

docker start <container name or id>

List docker volumes

docker volume ls

Get mount point of a docker volume

docker volume inspect --format '{{ .Mountpoint }}' <volume name>

Delete unused volumes

docker volume prune

Copy file from running container to host

docker cp <container name>:<path to file> <location on host machine to copy file to>

Copy file form host to running container

docker cp <file> <container name>:<where you want file to go on container>

Pass proxy settings as params to docker run

-e http_proxy='<proxy settings>' \
-e https_proxy="<your proxy settings>" \
-e no_proxy="<your proxy settings>" \
-e HTTP_PROXY="<your proxy settings>" \
-e HTTPS_PROXY="<your proxy settings>" \
-e NO_PROXY="<your proxy settings>"


Use proxy settings with a Docker Image

If you need proxy action in your Docker containers, add the following to your Dockerfile:

ENV http_proxy="<your proxy settings>" \
    https_proxy="<your proxy settings>" \
    no_proxy="<your proxy settings>" \
    HTTP_PROXY="<your proxy settings>" \
    HTTPS_PROXY="<your proxy settings>" \
    NO_PROXY="<your proxy settings>"

Change directory in a docker image

WORKDIR /path/to/cd/to

Mount directory using relative path

For this example, let’s imagine we have a container which takes an argument, and the output from running the container should go into a shared folder:

docker run -v $(PWD)/<folder to share w/ container>:/<location of folder to share in container>/<folder to share> <your name>/<image name> <argument container takes>

Get bash shell to container as a specific user

docker exec -it -u <user> <container name> /bin/bash

For example:

docker exec -it -u root jovial_mclean /bin/bash

Docker shortcuts in zsh

Under your ~/.zshrc, look for this field:


Once you’ve located it, change it to look something like this:

plugins=(git docker docker-compose nmap)

Welcome to the world of tab completion for your docker commands. You’ve leveled up. Bonus points for nmap and git shortcuts, which (of course) are completely unrelated to docker.

Install Docker on Kali

Use entrypoint script

Create a file, Make sure it has the following in it:

set -e

commands to run

exec "$@"

Be sure to run chmod +x on the host system.

At the bottom of the Dockerfile:




Docker Compose

Installation on Ubuntu

install_compose() {
    COMPOSE_VERSION=$(git ls-remote | \
        grep refs/tags | \
        grep -oE "v[0-9]\.[0-9]\.[0-9]$" | sort --version-sort | tail -n 1)
    sudo curl -L \
        "${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" \
        -o /usr/local/bin/docker-compose
    sudo chmod +x /usr/local/bin/docker-compose
    # Add command completion
    sudo curl -L \
        "${COMPOSE_VERSION}/contrib/completion/bash/docker-compose" \
        -o /etc/bash_completion.d/docker-compose


# To test:
docker-compose --version


Run bash script in container

Add this line at end the of the bash script:

exec bash


Remove named containers

docker-compose down -v


Exec into container

docker-compose exec <container name> bash

Debug container

Add this to the docker-compose.yml file to keep a container running that exits due to an error:

command: tail -F anything

Port mapping to localhost

  - ""

Watch logs

This is the equivalent of running tail -f on all of the containers in compose. Timestamps are included in the output through including the -t parameter.

docker-compose logs -t --tail="all" -f

Watch logs for specific service

You can get the service name from the docker-compose.yml file.

docker-compose logs -t --tail="all" -f $SERVICE_NAME


Static IP addresses

Add this to the bottom of the docker-compose.yml file:

      driver: default
        - subnet:

For each container, add this line and change the ipv4_address:



Force recreate deployment

docker-compose up -d --force-recreate --build

Clean up containers daily


sudo touch /Library/LaunchDaemons/DockerSystemPrune.plist

Add this content:

<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
<plist version="1.0">
    <string>/usr/local/bin/docker system prune -f</string>

Run this:

launchctl load -w /Library/LaunchDaemons/DockerSystemPrune.plist


Create a cronjob:

0 3 * * * /usr/bin/docker system prune -f


Assign static ips to docker containers

docker network create --subnet= mynet123
docker run --net mynet123 --ip -it ubuntu bash


Change password for container user

RUN echo "root:Docker!" | chpasswd


Connect to remote container via Docker Remote API

This will run docker ps on the api running on

docker -H tcp:// ps

To get a shell to a container:

docker -H tcp:// exec -it container_name bash



List all containers

curl -i -s -X GET

Prepare command to run

curl -i -s -X POST \
     -H "Content-Type: application/json" \
     --data-binary '{"AttachStdin": true,"AttachStdout": true,"AttachStderr": true,"Cmd": ["whoami"],"DetachKeys": "ctrl-p,ctrl-q","Privileged": true,"Tty": true}' \

Take the id that is output and set it in the command line:


Run the command

curl -i -s -X POST \
     -H 'Content-Type: application/json' \
     --data-binary '{"Detach": false,"Tty": false}' \$id/start


Get Docker container’s IP address

docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' container_name_or_id


Get specific image from running container

  1. Use docker inspect on the specific image id, i.e.

    docker inspect 7f60f7bfc58a
  2. Pull the repo digest sha from the output and use that to do a docker pull:

    docker pull chef/chefdk@sha256:d65c2597802d4a7336cd43cb3ed706701fcc9bdde122e53eacdead8b21a13591

Attach running container to existing docker network

docker network connect your-network-name container-name


Detect and cleanup running container

if [ ! "$(docker ps -q -f name=$NAME)" ]; then
    if [ "$(docker ps -aq -f status=exited -f name=$NAME)" ]; then
        # cleanup
        docker rm $NAME


Running commands in a container as part of a pipeline

Just use -i and skip the -t.


Remove all images that match a name

In this particular example, web_apps:

docker images |grep web_apps | cut -d' ' -f1 | xargs docker rmi

If you’re seeing weird discrepancies

This has bit me in the ass so many times. If functionality is not working properly in code that you’re running in a container and you’re using Docker for Mac or Windows, go ahead and increase the size of the VM before spending hours debugging your code that is working perfectly fine. ave yourself a lot of time and frustration.

Multiple authors in a Dockerfile

LABEL authors="first author,second author"


Multiple commands on start in a Dockerfile

This is an example of how to run multiple commands on start for a container specified in the Dockerfile:

CMD service nginx start; service php7.2-fpm start; /usr/sbin/sshd -D

Multiple commands on start via ENTRYPOINT

  1. Create with the commands you want to run, for example:

    set -e
    service nginx start
    service php7.2-fpm start
    /bin/bash /opt/ &
    /usr/sbin/sshd -D
  2. Add the following to your Dockerfile:

    COPY files/ /
    RUN chmod +x /
    ENTRYPOINT ["/bin/bash", "/"]


Commit container

This command will get the container ID of a running container and create an image called yourname/saved_container_image:

docker commit $(docker ps -aqf "name=container_name") yourname/saved-container-image


Run SSH as a specific user

Use the Dockerfile found here as the base for doing this.

Add the following to the following at the end of your Dockerfile:

USER youruser
WORKDIR /home/youruser


CMD ["/usr/bin/sudo", "/usr/sbin/sshd", "-D", "-o", "ListenAddress="]

Please note that this user will need to be in the sudoers file for it to work properly.



Do not under any circumstances expose /var/run/docker.sock to other containers. For docker-compose, you can looking for an entry like this:

  - "/var/run/docker.sock:/var/run/docker.sock"


Run command as user

docker exec -u <username> -it <container name> <command to run>


Kill all containers

docker kill $(docker ps -q)


Remove all containers

docker system prune

Docker logs

See live logs with the log file

  1. Find the docker container log files location:

    docker inspect --format='{{.LogPath}}' container_id
  2. Run tail -f against that file

See live logs with docker cli

docker logs container_id --follow

Find logs since a particular date and time

docker logs container_id --since YYYY-MM-DDTHH:MM

Note that you can also just do the date by omitting the :MM.


Execute binary on start

Add this to the end of your Dockerfile:

ENTRYPOINT ["./vuln"]


Dockerfile run multiple processes

CMD nohup ./service -port=8080 2>&1 & nohup ./servicedos 2>&1 & sleep infinity



  1. Create a classic Personal Access Token with the following permissions:
delete:packages, repo, workflow, write:packages

and assign it to the $GITHUB_TOKEN env var.

  1. Build the container from Dockerfile and tag it:
docker build . -t $IMG_NAME
docker tag $IMG_NAME "${IMG_NAME}:latest"
  1. Login to the repository and push the container image:
docker login -u CowDogMoo -p $GITHUB_TOKEN
docker push

Container Registry in Gitlab

Login to the repository:

docker login

Create container image:

docker build -t .
# To also have a latest tag:
docker build -t .

Upload container image:

docker push
# To also have a latest tag:
docker push

Logout of the repository:

docker logout

Detect if process running in container

if grep -sq 'docker\|lxc' /proc/1/cgroup; then
    echo "I am running on Docker."


Get the name of a running container

docker run -dit -p 5901:5901 ubuntu && CONTAINER=$(docker ps | awk -F '  ' '{print $7}' | xargs) && echo $CONTAINER

Use x86_64 image on an arm64 system

if [[ "$(uname -a | awk '{ print $NF }')" == "arm64" ]]; then
    docker pull --platform linux/x86_64
    docker pull


Get cgroup version

docker info | grep -i cgroup

Switch to Docker Container Driver for Multi-Platform Build

To enable multi-platform builds using Docker Buildx, switch to the docker-container driver:

# Create a new builder instance with the docker-container driver
docker buildx create --name mybuilder --use --driver docker-container

# Inspect the new builder to ensure it's set up correctly
docker buildx inspect --bootstrap

# Build the image using the new builder
docker buildx build --platform linux/amd64,linux/arm64 -t --push .

Pro-tip: If you’re having weird issues with the container between architectures, make sure you run these commands on an x86_64 machine.