File size: 3,836 Bytes
9722e96 cb00e91 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
{
"Id": "0B6D4B13-8BED-46B3-A149-99134497C8CC",
"FileVersion": "1",
"Created": "2025-03-07T00:00:00",
"IsProtected": false,
"Name": "FLUX.1-Dev",
"ImageIcon": "https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/Icon.png",
"Author": "BlackForestLabs",
"Description": "FLUX.1 [Dev] is a 12-billion parameter rectified flow transformer designed to generate detailed and high-quality images directly from text descriptions. This model offers impressive capabilities in transforming prompts into vivid and accurate visual representations.",
"Rank": 29,
"Group": "Online",
"Template": "FluxDev",
"Category": "StableDiffusion",
"StableDiffusionTemplate": {
"PipelineType": "Flux",
"ModelType": "Base",
"SampleSize": 1024,
"TokenizerLength": 768,
"Tokenizer2Limit": 512,
"Optimization": "None",
"DiffuserTypes": [
"TextToImage",
"ImageToImage"
],
"SchedulerDefaults": {
"SchedulerType": "FlowMatchEulerDiscrete",
"Steps": 28,
"StepsMin": 1,
"StepsMax": 100,
"Guidance": 1,
"GuidanceMin": 1,
"GuidanceMax": 2,
"Guidance2": 3.5,
"Guidance2Min": 0,
"Guidance2Max": 15,
"TimestepSpacing": "Linspace",
"BetaSchedule": "ScaledLinear",
"BetaStart": 0.00085,
"BetaEnd": 0.012
}
},
"MemoryMin": 26,
"MemoryMax": 36,
"DownloadSize": 34,
"Website": "https://blackforestlabs.ai",
"Licence": "https://github.com/black-forest-labs/flux/blob/main/model_licenses/LICENSE-FLUX1-dev",
"LicenceType": "NonCommercial",
"IsLicenceAccepted": false,
"Repository": "https://huggingface.co/TensorStack/FLUX.1-Dev-amuse",
"RepositoryFiles": [
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/text_encoder/model.onnx",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/text_encoder/model.onnx.data",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/text_encoder_2/model.onnx",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/text_encoder_2/model.onnx.data",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer/merges.txt",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer/special_tokens_map.json",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer/vocab.json",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer_2/special_tokens_map.json",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer_2/spiece.model",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/tokenizer_2/tokenizer.json",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/transformer/model.onnx",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/transformer/model.onnx.data",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/vae_decoder/model.onnx",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/vae_decoder/model.onnx.data",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/vae_encoder/model.onnx",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/vae_encoder/model.onnx.data",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/amuse_template.json",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/README.md"
],
"PreviewImages": [
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/Sample.png",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/Sample2.png",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/Sample3.png",
"https://huggingface.co/TensorStack/FLUX.1-Dev-amuse/resolve/main/Sample4.png"
],
"Tags": []
} |