Instructions to use ayan4m1/trinart_diffusers_v2 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use ayan4m1/trinart_diffusers_v2 with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("ayan4m1/trinart_diffusers_v2", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
| { | |
| "_class_name": "LDMTextToImagePipeline", | |
| "_diffusers_version": "0.4.0.dev0", | |
| "feature_extractor": [ | |
| "transformers", | |
| "CLIPImageProcessor" | |
| ], | |
| "safety_checker": [ | |
| "stable_diffusion", | |
| "StableDiffusionSafetyChecker" | |
| ], | |
| "scheduler": [ | |
| "diffusers", | |
| "DDIMScheduler" | |
| ], | |
| "text_encoder": [ | |
| "transformers", | |
| "CLIPTextModel" | |
| ], | |
| "tokenizer": [ | |
| "transformers", | |
| "CLIPTokenizer" | |
| ], | |
| "unet": [ | |
| "diffusers", | |
| "UNet2DConditionModel" | |
| ], | |
| "vae": [ | |
| "diffusers", | |
| "AutoencoderKL" | |
| ] | |
| } | |