Instructions to use MiniMaxAI/MiniMax-Text-01-hf with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use MiniMaxAI/MiniMax-Text-01-hf with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-Text-01-hf") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf") model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf") messages = [ {"role": "user", "content": "Who are you?"}, ] inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) outputs = model.generate(**inputs, max_new_tokens=40) print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:])) - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use MiniMaxAI/MiniMax-Text-01-hf with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "MiniMaxAI/MiniMax-Text-01-hf" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "MiniMaxAI/MiniMax-Text-01-hf", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/MiniMaxAI/MiniMax-Text-01-hf
- SGLang
How to use MiniMaxAI/MiniMax-Text-01-hf with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "MiniMaxAI/MiniMax-Text-01-hf" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "MiniMaxAI/MiniMax-Text-01-hf", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "MiniMaxAI/MiniMax-Text-01-hf" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "MiniMaxAI/MiniMax-Text-01-hf", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use MiniMaxAI/MiniMax-Text-01-hf with Docker Model Runner:
docker model run hf.co/MiniMaxAI/MiniMax-Text-01-hf
| from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, QuantoConfig, GenerationConfig | |
| import torch | |
| import argparse | |
| """ | |
| usage: | |
| export SAFETENSORS_FAST_GPU=1 | |
| python main.py --quant_type int8 --world_size 8 --model_id <model_path> | |
| """ | |
| def generate_quanto_config(hf_config: AutoConfig, quant_type: str): | |
| QUANT_TYPE_MAP = { | |
| "default": None, | |
| "int8": QuantoConfig( | |
| weights="int8", | |
| modules_to_not_convert=[ | |
| "lm_head", | |
| "embed_tokens", | |
| ] + [f"model.layers.{i}.coefficient" for i in range(hf_config.num_hidden_layers)] | |
| + [f"model.layers.{i}.block_sparse_moe.gate" for i in range(hf_config.num_hidden_layers)] | |
| ), | |
| } | |
| return QUANT_TYPE_MAP[quant_type] | |
| def parse_args(): | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument("--quant_type", type=str, default="default", choices=["default", "int8"]) | |
| parser.add_argument("--model_id", type=str, required=True) | |
| parser.add_argument("--world_size", type=int, required=True) | |
| return parser.parse_args() | |
| def check_params(args, hf_config: AutoConfig): | |
| if args.quant_type == "int8": | |
| assert args.world_size >= 8, "int8 weight-only quantization requires at least 8 GPUs" | |
| assert hf_config.num_hidden_layers % args.world_size == 0, f"num_hidden_layers({hf_config.num_hidden_layers}) must be divisible by world_size({args.world_size})" | |
| def main(): | |
| args = parse_args() | |
| print("\n=============== Argument ===============") | |
| for key in vars(args): | |
| print(f"{key}: {vars(args)[key]}") | |
| print("========================================") | |
| model_id = args.model_id | |
| hf_config = AutoConfig.from_pretrained(model_id, trust_remote_code=True) | |
| check_params(args, hf_config) | |
| quantization_config = generate_quanto_config(hf_config, args.quant_type) | |
| device_map = { | |
| 'model.embed_tokens': 'cuda:0', | |
| 'model.norm': f'cuda:{args.world_size - 1}', | |
| 'lm_head': f'cuda:{args.world_size - 1}' | |
| } | |
| layers_per_device = hf_config.num_hidden_layers // args.world_size | |
| for i in range(args.world_size): | |
| for j in range(layers_per_device): | |
| device_map[f'model.layers.{i * layers_per_device + j}'] = f'cuda:{i}' | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| prompt = "Hello!" | |
| messages = [ | |
| {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant created by Minimax based on MiniMax-Text-01 model."}]}, | |
| {"role": "user", "content": [{"type": "text", "text": prompt}]}, | |
| ] | |
| text = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| model_inputs = tokenizer(text, return_tensors="pt").to("cuda") | |
| quantized_model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| torch_dtype="bfloat16", | |
| device_map=device_map, | |
| quantization_config=quantization_config, | |
| trust_remote_code=True, | |
| offload_buffers=True, | |
| ) | |
| generation_config = GenerationConfig( | |
| max_new_tokens=20, | |
| eos_token_id=200020, | |
| use_cache=True, | |
| ) | |
| generated_ids = quantized_model.generate(**model_inputs, generation_config=generation_config) | |
| print(f"generated_ids: {generated_ids}") | |
| generated_ids = [ | |
| output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) | |
| ] | |
| response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
| print(response) | |
| if __name__ == "__main__": | |
| main() | |