Text-to-Image
Diffusers
VersatileDiffusionPipeline
image-to-text
image-to-image
text-to-text
image-editing
image-variation
generation
vision
Instructions to use shi-labs/versatile-diffusion with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use shi-labs/versatile-diffusion with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", dtype=torch.bfloat16, device_map="cuda") prompt = "A high tech solarpunk utopia in the Amazon rainforest" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
- Local Apps
- Draw Things
- DiffusionBee
| { | |
| "_class_name": "UNet2DConditionModel", | |
| "_diffusers_version": "0.8.0.dev0", | |
| "act_fn": "silu", | |
| "attention_head_dim": 8, | |
| "block_out_channels": [ | |
| 320, | |
| 640, | |
| 1280, | |
| 1280 | |
| ], | |
| "center_input_sample": false, | |
| "cross_attention_dim": 768, | |
| "down_block_types": [ | |
| "CrossAttnDownBlock2D", | |
| "CrossAttnDownBlock2D", | |
| "CrossAttnDownBlock2D", | |
| "DownBlock2D" | |
| ], | |
| "downsample_padding": 1, | |
| "flip_sin_to_cos": true, | |
| "freq_shift": 0, | |
| "in_channels": 4, | |
| "layers_per_block": 2, | |
| "mid_block_scale_factor": 1, | |
| "norm_eps": 1e-05, | |
| "norm_num_groups": 32, | |
| "out_channels": 4, | |
| "sample_size": 64, | |
| "up_block_types": [ | |
| "UpBlock2D", | |
| "CrossAttnUpBlock2D", | |
| "CrossAttnUpBlock2D", | |
| "CrossAttnUpBlock2D" | |
| ] | |
| } | |