# image experiments ## stable diffusion ### build ```bash= git clone https://github.com/leejet/stable-diffusion.cpp.git ~/git/leejet/stable-diffusion.cpp cd ~/git/leejet/stable-diffusion.cpp git submodule init git submodule update podman run --rm --interactive --tty --userns=keep-id --volume $(pwd):/app:z --workdir /app oci.helexa.net/ci/builder:ubuntu22.04-cuda13.1.0 /bin/bash -c "\ cmake -B cuda-modern \ -DGGML_CUDA=ON \ -DGGML_NATIVE=OFF \ -DCMAKE_CUDA_ARCHITECTURES='89;90;100;120;120-virtual' \ -DBUILD_SHARED_LIBS=OFF \ -DCUDA_USE_STATIC_CUDA_RUNTIME=ON \ -DCMAKE_BUILD_TYPE=Release && \ cmake --build cuda-modern --config Release -j$(nproc)" rsync -acv --rsync-path 'sudo rsync' ~/git/leejet/stable-diffusion.cpp/cuda-modern/bin/ beast:/usr/local/bin/ ``` ### get models ```bash= pip install --user --upgrade huggingface_hub ``` #### diffusion ```bash= hf \ download \ city96/FLUX.2-dev-gguf \ --local-dir /models \ --include flux2-dev-Q4_K_S.gguf \ --token $(cat ~/.huggingface-reader) ``` #### vae ```bash= hf \ download \ black-forest-labs/FLUX.2-dev \ --local-dir /models \ --include ae.safetensors \ --token $(cat ~/.huggingface-reader) hf \ download \ black-forest-labs/FLUX.2-dev \ --local-dir /models \ --include flux2-dev.safetensors \ --token $(cat ~/.huggingface-reader) ``` #### llm ```bash= hf \ download \ unsloth/Mistral-Small-3.2-24B-Instruct-2506-GGUF \ --local-dir /models \ --include Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf \ --token $(cat ~/.huggingface-reader) ``` ### generate a reference image ```bash= pip install --user --upgrade huggingface_hub hf_transfer curl -fsLo ~/Pictures/cat.png https://raw.githubusercontent.com/leejet/stable-diffusion.cpp/master/assets/flux2/example.png sd-cli \ --diffusion-model /models/flux2-dev-Q4_K_S.gguf \ --vae /models/ae.safetensors \ --llm /models/Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf \ -r ~/Pictures/cat.png \ -p "change 'flux2-dev.cpp' to 'howdy human'" \ --cfg-scale 1.0 \ --sampling-method euler \ -v \ --diffusion-fa \ --offload-to-cpu ```