| !pip install fastapi uvicorn diffusers transformers torch accelerate safetensors --quiet |
| !pip install nest_asyncio pyngrok --quiet |
|
|
| from pyngrok import ngrok |
| import base64 |
| from io import BytesIO |
| import torch |
| import uvicorn |
| import nest_asyncio |
| from fastapi import FastAPI |
| from diffusers import DiffusionPipeline |
|
|
| |
| authtoken = "2ufMyhX6auLeWfr87bkTdZHjTvj_5VijMy2cXE411GbRnJ13n" |
| ngrok.set_auth_token(authtoken) |
|
|
| |
| app = FastAPI() |
|
|
| |
| model_id = "cagliostrolab/animagine-xl-3.1" |
| pipe = DiffusionPipeline.from_pretrained( |
| model_id, |
| torch_dtype=torch.float16 |
| ) |
| |
| pipe = pipe.to("cuda") |
|
|
| @app.get("/") |
| def home(): |
| return {"message": "API Animagine-XL-3.1 is running on GPU"} |
|
|
| @app.get("/generate") |
| def generate_image(prompt: str = "1girl, anime style, masterpiece, best quality"): |
| |
| image = pipe( |
| prompt, |
| num_inference_steps=30, |
| guidance_scale=7.0, |
| ).images[0] |
| |
| |
| buffered = BytesIO() |
| image.save(buffered, format="PNG") |
| img_str = base64.b64encode(buffered.getvalue()).decode() |
| |
| |
| return { |
| "status": "success", |
| "base64_image": img_str |
| } |
|
|
| |
| if __name__ == "__main__": |
| nest_asyncio.apply() |
| ngrok_tunnel = ngrok.connect(8000) |
| print(f"API đang chạy tại: {ngrok_tunnel.public_url}") |
| |
| uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|