Instructions to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- llama-cpp-python
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with llama-cpp-python:
# !pip install llama-cpp-python from llama_cpp import Llama llm = Llama.from_pretrained( repo_id="isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp", filename="Wan2.2-TI2V-5B-Q2_K.gguf", )
output = llm( "Once upon a time,", max_tokens=512, echo=True ) print(output)
- Notebooks
- Google Colab
- Kaggle
- Local Apps
- llama.cpp
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with llama.cpp:
Install from brew
brew install llama.cpp # Start a local OpenAI-compatible server with a web UI: llama-server -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K # Run inference directly in the terminal: llama-cli -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
Install from WinGet (Windows)
winget install llama.cpp # Start a local OpenAI-compatible server with a web UI: llama-server -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K # Run inference directly in the terminal: llama-cli -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
Use pre-built binary
# Download pre-built binary from: # https://github.com/ggerganov/llama.cpp/releases # Start a local OpenAI-compatible server with a web UI: ./llama-server -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K # Run inference directly in the terminal: ./llama-cli -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
Build from source code
git clone https://github.com/ggerganov/llama.cpp.git cd llama.cpp cmake -B build cmake --build build -j --target llama-server llama-cli # Start a local OpenAI-compatible server with a web UI: ./build/bin/llama-server -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K # Run inference directly in the terminal: ./build/bin/llama-cli -hf isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
Use Docker
docker model run hf.co/isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
- LM Studio
- Jan
- Ollama
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with Ollama:
ollama run hf.co/isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
- Unsloth Studio new
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with Unsloth Studio:
Install Unsloth Studio (macOS, Linux, WSL)
curl -fsSL https://unsloth.ai/install.sh | sh # Run unsloth studio unsloth studio -H 0.0.0.0 -p 8888 # Then open http://localhost:8888 in your browser # Search for isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp to start chatting
Install Unsloth Studio (Windows)
irm https://unsloth.ai/install.ps1 | iex # Run unsloth studio unsloth studio -H 0.0.0.0 -p 8888 # Then open http://localhost:8888 in your browser # Search for isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp to start chatting
Using HuggingFace Spaces for Unsloth
# No setup required # Open https://huggingface.co/spaces/unsloth/studio in your browser # Search for isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp to start chatting
- Docker Model Runner
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with Docker Model Runner:
docker model run hf.co/isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
- Lemonade
How to use isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp with Lemonade:
Pull the model
# Download Lemonade from https://lemonade-server.ai/ lemonade pull isfs/wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp:Q2_K
Run and chat with the model
lemonade run user.wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp-Q2_K
List all available models
lemonade list
wan-2.2-5b-ti2v-gguf-stable-diffusion-cpp / stable-diffusion-cpp-nvidia /CMakeFiles /3.31.10 /CMakeCCompiler.cmake
| set(CMAKE_C_COMPILER "/usr/bin/cc") | |
| set(CMAKE_C_COMPILER_ARG1 "") | |
| set(CMAKE_C_COMPILER_ID "GNU") | |
| set(CMAKE_C_COMPILER_VERSION "11.4.0") | |
| set(CMAKE_C_COMPILER_VERSION_INTERNAL "") | |
| set(CMAKE_C_COMPILER_WRAPPER "") | |
| set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "17") | |
| set(CMAKE_C_EXTENSIONS_COMPUTED_DEFAULT "ON") | |
| set(CMAKE_C_STANDARD_LATEST "23") | |
| set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert;c_std_17;c_std_23") | |
| set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") | |
| set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") | |
| set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") | |
| set(CMAKE_C17_COMPILE_FEATURES "c_std_17") | |
| set(CMAKE_C23_COMPILE_FEATURES "c_std_23") | |
| set(CMAKE_C_PLATFORM_ID "Linux") | |
| set(CMAKE_C_SIMULATE_ID "") | |
| set(CMAKE_C_COMPILER_FRONTEND_VARIANT "GNU") | |
| set(CMAKE_C_SIMULATE_VERSION "") | |
| set(CMAKE_AR "/usr/bin/ar") | |
| set(CMAKE_C_COMPILER_AR "/usr/bin/gcc-ar-11") | |
| set(CMAKE_RANLIB "/usr/bin/ranlib") | |
| set(CMAKE_C_COMPILER_RANLIB "/usr/bin/gcc-ranlib-11") | |
| set(CMAKE_LINKER "/usr/bin/ld") | |
| set(CMAKE_LINKER_LINK "") | |
| set(CMAKE_LINKER_LLD "") | |
| set(CMAKE_C_COMPILER_LINKER "/usr/bin/ld") | |
| set(CMAKE_C_COMPILER_LINKER_ID "GNU") | |
| set(CMAKE_C_COMPILER_LINKER_VERSION 2.38) | |
| set(CMAKE_C_COMPILER_LINKER_FRONTEND_VARIANT GNU) | |
| set(CMAKE_MT "") | |
| set(CMAKE_TAPI "CMAKE_TAPI-NOTFOUND") | |
| set(CMAKE_COMPILER_IS_GNUCC 1) | |
| set(CMAKE_C_COMPILER_LOADED 1) | |
| set(CMAKE_C_COMPILER_WORKS TRUE) | |
| set(CMAKE_C_ABI_COMPILED TRUE) | |
| set(CMAKE_C_COMPILER_ENV_VAR "CC") | |
| set(CMAKE_C_COMPILER_ID_RUN 1) | |
| set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m) | |
| set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC) | |
| set(CMAKE_C_LINKER_PREFERENCE 10) | |
| set(CMAKE_C_LINKER_DEPFILE_SUPPORTED TRUE) | |
| set(CMAKE_LINKER_PUSHPOP_STATE_SUPPORTED TRUE) | |
| set(CMAKE_C_LINKER_PUSHPOP_STATE_SUPPORTED TRUE) | |
| # Save compiler ABI information. | |
| set(CMAKE_C_SIZEOF_DATA_PTR "8") | |
| set(CMAKE_C_COMPILER_ABI "ELF") | |
| set(CMAKE_C_BYTE_ORDER "LITTLE_ENDIAN") | |
| set(CMAKE_C_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") | |
| if(CMAKE_C_SIZEOF_DATA_PTR) | |
| set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}") | |
| endif() | |
| if(CMAKE_C_COMPILER_ABI) | |
| set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}") | |
| endif() | |
| if(CMAKE_C_LIBRARY_ARCHITECTURE) | |
| set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") | |
| endif() | |
| set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "") | |
| if(CMAKE_C_CL_SHOWINCLUDES_PREFIX) | |
| set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}") | |
| endif() | |
| set(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include") | |
| set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "gcc;gcc_s;c;gcc;gcc_s") | |
| set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/11;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib;/usr/local/cuda/lib64/stubs") | |
| set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") | |