version: "3" services: httpdtest: image: httpd:latest container_name: http-test restart: always ports: - "8080:80" volumes: - "/var/www/html:/usr/local/apache2/htdocs" gitea: image: gitea/gitea:1.22.4 container_name: gitea env_file: .gitea.env restart: always ports: - "8081:3000" volumes: - "/gitea:/data" - "/etc/localtime:/etc/timezone:ro" - "/etc/localtime:/etc/localtime:ro" noah-vscode: image: git.coldlightalchemist.com/bradley/code-server-wpilib-fedora:latest container_name: noah-vscode restart: always ports: - "8082:8080" immich-server: image: ghcr.io/immich-app/immich-server:v1.123.0 # extends: # file: hwaccel.transcoding.yml # service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding volumes: # Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file - "/immich:/usr/src/app/upload" - /etc/localtime:/etc/localtime:ro env_file: - .immich.env ports: - '2283:2283' depends_on: - redis restart: always healthcheck: disable: false networks: - immich immich-machine-learning: container_name: immich_machine_learning # For hardware acceleration, add one of -[armnn, cuda, openvino] to the image tag. # Example tag: ${IMMICH_VERSION:-release}-cuda image: ghcr.io/immich-app/immich-machine-learning:v1.123.0 # extends: # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration # file: hwaccel.ml.yml # service: cpu # set to one of [armnn, cuda, openvino, openvino-wsl] for accelerated inference - use the `-wsl` version for WSL2 where applicable volumes: - model-cache:/cache env_file: - .immich.env restart: always healthcheck: disable: false networks: - immich redis: container_name: immich_redis image: docker.io/redis:6.2-alpine@sha256:eaba718fecd1196d88533de7ba49bf903ad33664a92debb24660a922ecd9cac8 healthcheck: test: redis-cli ping || exit 1 restart: always networks: - immich networks: immich: volumes: model-cache: