AmdGoose's picture
Add INT8 weight-only quantized transformer for FLUX.2-dev
d98adcb verified
raw
history blame contribute delete
348 Bytes
from huggingface_hub import HfApi
repo_id = "AmdGoose/FLUX.2-dev-transformer-int8wo"
api = HfApi()
api.create_repo(repo_id=repo_id, repo_type="model", exist_ok=True)
api.upload_folder(
repo_id=repo_id,
folder_path=".",
commit_message="Add INT8 weight-only quantized transformer for FLUX.2-dev",
)
print("Upload complete:", repo_id)