# You should be at a node where you have docker access & available GPU from subprocess import run import os from pathlib import Path PRESET_NAME = "ruiyan_watercolor" DOCKER_COMPOSE_TEMPLATE = """ volumes: fooocus-data: services: app: build: . image: ghcr.io/lllyasviel/fooocus ports: - "7865:7865" environment: - CMDARGS=--listen --preset {PRESET_NAME} # Arguments for launch.py. - DATADIR=/content/data # Directory which stores models, outputs dir - config_path=/content/data/config.txt - config_example_path=/content/data/config_modification_tutorial.txt - path_checkpoints=/content/data/models/checkpoints/ - path_loras=/content/data/models/loras/ - path_embeddings=/content/data/models/embeddings/ - path_vae_approx=/content/data/models/vae_approx/ - path_upscale_models=/content/data/models/upscale_models/ - path_inpaint=/content/data/models/inpaint/ - path_controlnet=/content/data/models/controlnet/ - path_clip_vision=/content/data/models/clip_vision/ - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log! volumes: - fooocus-data:/content/data - ./presets:/content/app/presets # Bring in presets #- ./models:/import/models # Once you import files, you don't need to mount again. #- ./outputs:/import/outputs # Once you import files, you don't need to mount again. tty: true deploy: resources: reservations: devices: - driver: nvidia device_ids: ['{gpu_id}'] capabilities: [compute, utility] """ PRESET_TEMPLATE = """ { "default_model": "juggernautXL_v8Rundiffusion.safetensors", "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ [ true, "sd_xl_offset_example-lora_1.0.safetensors", 0.1 ], [ true, "ruiyan_watercolor_v0.safetensors", 1.0 ], [ true, "None", 1.0 ], [ true, "None", 1.0 ], [ true, "None", 1.0 ] ], "default_cfg_scale": 4.0, "default_sample_sharpness": 2.0, "default_sampler": "dpmpp_2m_sde_gpu", "default_scheduler": "karras", "default_performance": "Quality", "default_prompt": "watercolor \(medium\) of", "default_prompt_negative": "", "default_styles": [ "Fooocus V2", "Watercolor 2", "Artstyle Abstract" ], "default_aspect_ratio": "1152*896", "default_overwrite_step": -1, "checkpoint_downloads": { "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" }, "embeddings_downloads": {}, "lora_downloads": { "sd_xl_offset_example-lora_1.0.safetensors": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors", "ruiyan_watercolor_v0.safetensors": "https://3r2.hya.moe/ruiyan_watercolor_v0.safetensors" }, "previous_default_models": [ "juggernautXL_version8Rundiffusion.safetensors", "juggernautXL_version7Rundiffusion.safetensors", "juggernautXL_v7Rundiffusion.safetensors", "juggernautXL_version6Rundiffusion.safetensors", "juggernautXL_v6Rundiffusion.safetensors" ] } """ pwd = Path(os.getcwd()) def runcmd(cmd): return run(cmd, cwd=pwd, shell=True) runcmd("git clone https://github.com/lllyasviel/Fooocus") pwd = pwd / "Fooocus" Path(pwd / "presets" / f"{PRESET_NAME}.json").write_text(PRESET_TEMPLATE) gpu_id = input("Which GPU to use? Enter the gpu id: ") Path(pwd / "docker-compose.yml").write_text( DOCKER_COMPOSE_TEMPLATE.format(gpu_id=gpu_id, preset_name=PRESET_NAME) ) runcmd("docker compose up")