Instructions to use ayushtues/blipdiffusion with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Diffusers
How to use ayushtues/blipdiffusion with Diffusers:
pip install -U diffusers transformers accelerate
import torch from diffusers import DiffusionPipeline # switch to "mps" for apple devices pipe = DiffusionPipeline.from_pretrained("ayushtues/blipdiffusion", dtype=torch.bfloat16, device_map="cuda") prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" image = pipe(prompt).images[0] - Notebooks
- Google Colab
- Kaggle
| { | |
| "_name_or_path": "E:/diffusers/cache/vit", | |
| "architectures": [ | |
| "Blip2VisionModel" | |
| ], | |
| "attention_dropout": 0.0, | |
| "hidden_act": "quick_gelu", | |
| "hidden_size": 1024, | |
| "image_size": 224, | |
| "initializer_range": 1e-10, | |
| "intermediate_size": 4096, | |
| "layer_norm_eps": 1e-05, | |
| "model_type": "blip_2_vision_model", | |
| "num_attention_heads": 16, | |
| "num_hidden_layers": 23, | |
| "patch_size": 14, | |
| "qkv_bias": true, | |
| "torch_dtype": "float32", | |
| "transformers_version": "4.31.0" | |
| } | |