| --- |
| library_name: transformers |
| base_model: |
| - zai-org/GLM-OCR |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [zai-org/GLM-OCR](https://huggingface.co/zai-org/GLM-OCR). |
|
|
| | File path | Size | |
| |------|------| |
| | model.safetensors | 3.8MB | |
|
|
|
|
| ### Example usage: |
|
|
| ```python |
| import torch |
| from transformers import AutoModelForImageTextToText, AutoProcessor |
| |
| model_id = "tiny-random/glm-ocr" |
| model = AutoModelForImageTextToText.from_pretrained( |
| model_id, dtype=torch.bfloat16, device_map="cuda", |
| ) |
| processor = AutoProcessor.from_pretrained(model_id) |
| messages = [ |
| { |
| "role": "user", |
| "content": [ |
| { |
| "type": "image", |
| "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", |
| }, |
| {"type": "text", "text": "Describe this image."}, |
| ], |
| } |
| ] |
| |
| # Preparation for inference |
| inputs = processor.apply_chat_template( |
| messages, |
| tokenize=True, |
| add_generation_prompt=True, |
| return_dict=True, |
| return_tensors="pt" |
| ).to(model.device) |
| inputs.pop("token_type_ids", None) |
| generated_ids = model.generate(**inputs, max_new_tokens=16) |
| output_text = processor.decode(generated_ids[0], skip_special_tokens=False) |
| print(output_text) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| <details> |
| <summary>Click to expand</summary> |
|
|
| ```python |
| import json |
| from copy import deepcopy |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| import torch.nn as nn |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| GlmOcrForConditionalGeneration, |
| set_seed, |
| ) |
| |
| source_model_id = "zai-org/GLM-OCR" |
| save_folder = "/tmp/tiny-random/glm-ocr" |
| |
| processor = AutoProcessor.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json: dict = json.load(f) |
| |
| config_json['text_config'].update({ |
| "head_dim": 32, |
| "hidden_size": 8, |
| "intermediate_size": 64, |
| "num_attention_heads": 8, |
| "num_hidden_layers": 2, |
| "num_key_value_heads": 4, |
| "rope_parameters": { |
| "rope_type": "default", |
| "mrope_section": [4, 4, 8], |
| "partial_rotary_factor": 1.0, |
| "rope_theta": 10000, |
| }, |
| }) |
| config_json['vision_config'].update({ |
| "hidden_size": 32, |
| "depth": 2, |
| "num_heads": 1, |
| "intermediate_size": 64, |
| "out_hidden_size": 8, |
| }) |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = GlmOcrForConditionalGeneration(config) |
| torch.set_default_dtype(torch.float32) |
| |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| model.generation_config.do_sample = True |
| print(model.generation_config) |
| |
| model = model.cpu() |
| set_seed(42) |
| n_params = sum(p.numel() for p in model.parameters()) |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape, p.numel() / n_params * 100, '%') |
| # MTP |
| set_seed(42) |
| config = config.get_text_config() |
| model.model.language_model.layers.append(nn.ModuleDict(dict( |
| shared_head=nn.ModuleDict(dict( |
| norm=nn.RMSNorm(config.hidden_size), |
| head=deepcopy(model.model.language_model.embed_tokens), |
| )), |
| embed_tokens=deepcopy(model.model.language_model.embed_tokens), |
| eh_proj=nn.Linear(config.hidden_size * 2, |
| config.hidden_size, bias=False), |
| enorm=nn.RMSNorm(config.hidden_size), |
| hnorm=nn.RMSNorm(config.hidden_size), |
| input_layernorm=nn.RMSNorm(config.hidden_size), |
| post_mlp_layernorm=nn.RMSNorm(config.hidden_size), |
| post_attention_layernorm=nn.RMSNorm(config.hidden_size), |
| post_self_attn_layernorm=nn.RMSNorm(config.hidden_size), |
| self_attn=deepcopy(model.model.language_model.layers[1].self_attn), |
| mlp=deepcopy(model.model.language_model.layers[1].mlp), |
| ))) |
| # for i in range(1, len(model.model.language_model.layers)): |
| # model.model.language_model.layers[i].mlp.gate.e_score_correction_bias = torch.rand_like( |
| # model.model.language_model.layers[i].mlp.gate.e_score_correction_bias).float() |
| model.save_pretrained(save_folder) |
| print(model) |
| ``` |
|
|
| </details> |
|
|
| ### Printing the model: |
|
|
| <details><summary>Click to expand</summary> |
|
|
| ```text |
| GlmOcrForConditionalGeneration( |
| (model): GlmOcrModel( |
| (visual): GlmOcrVisionModel( |
| (patch_embed): GlmOcrVisionPatchEmbed( |
| (proj): Conv3d(3, 32, kernel_size=(2, 14, 14), stride=(2, 14, 14)) |
| ) |
| (rotary_pos_emb): GlmOcrVisionRotaryEmbedding() |
| (blocks): ModuleList( |
| (0-1): 2 x GlmOcrVisionBlock( |
| (norm1): GlmOcrRMSNorm((32,), eps=1e-05) |
| (norm2): GlmOcrRMSNorm((32,), eps=1e-05) |
| (attn): GlmOcrVisionAttention( |
| (qkv): Linear(in_features=32, out_features=96, bias=True) |
| (proj): Linear(in_features=32, out_features=32, bias=True) |
| (q_norm): GlmOcrRMSNorm((32,), eps=1e-05) |
| (k_norm): GlmOcrRMSNorm((32,), eps=1e-05) |
| ) |
| (mlp): GlmOcrVisionMlp( |
| (gate_proj): Linear(in_features=32, out_features=64, bias=True) |
| (up_proj): Linear(in_features=32, out_features=64, bias=True) |
| (down_proj): Linear(in_features=64, out_features=32, bias=True) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| ) |
| (merger): GlmOcrVisionPatchMerger( |
| (proj): Linear(in_features=8, out_features=8, bias=False) |
| (post_projection_norm): LayerNorm((8,), eps=1e-05, elementwise_affine=True) |
| (gate_proj): Linear(in_features=8, out_features=24, bias=False) |
| (up_proj): Linear(in_features=8, out_features=24, bias=False) |
| (down_proj): Linear(in_features=24, out_features=8, bias=False) |
| (act1): GELU(approximate='none') |
| (act_fn): SiLUActivation() |
| ) |
| (downsample): Conv2d(32, 8, kernel_size=(2, 2), stride=(2, 2)) |
| (post_layernorm): GlmOcrRMSNorm((32,), eps=1e-05) |
| ) |
| (language_model): GlmOcrTextModel( |
| (embed_tokens): Embedding(59392, 8, padding_idx=59246) |
| (layers): ModuleList( |
| (0-1): 2 x GlmOcrTextDecoderLayer( |
| (self_attn): GlmOcrTextAttention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=128, bias=False) |
| (v_proj): Linear(in_features=8, out_features=128, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (mlp): GlmOcrTextMLP( |
| (gate_up_proj): Linear(in_features=8, out_features=128, bias=False) |
| (down_proj): Linear(in_features=64, out_features=8, bias=False) |
| (activation_fn): SiLUActivation() |
| ) |
| (input_layernorm): GlmOcrRMSNorm((8,), eps=1e-05) |
| (post_attention_layernorm): GlmOcrRMSNorm((8,), eps=1e-05) |
| (post_self_attn_layernorm): GlmOcrRMSNorm((8,), eps=1e-05) |
| (post_mlp_layernorm): GlmOcrRMSNorm((8,), eps=1e-05) |
| ) |
| (2): ModuleDict( |
| (shared_head): ModuleDict( |
| (norm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (head): Embedding(59392, 8, padding_idx=59246) |
| ) |
| (embed_tokens): Embedding(59392, 8, padding_idx=59246) |
| (eh_proj): Linear(in_features=16, out_features=8, bias=False) |
| (enorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (hnorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (post_mlp_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (post_self_attn_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (self_attn): GlmOcrTextAttention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=128, bias=False) |
| (v_proj): Linear(in_features=8, out_features=128, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (mlp): GlmOcrTextMLP( |
| (gate_up_proj): Linear(in_features=8, out_features=128, bias=False) |
| (down_proj): Linear(in_features=64, out_features=8, bias=False) |
| (activation_fn): SiLUActivation() |
| ) |
| ) |
| ) |
| (norm): GlmOcrRMSNorm((8,), eps=1e-05) |
| (rotary_emb): GlmOcrTextRotaryEmbedding() |
| ) |
| ) |
| (lm_head): Linear(in_features=8, out_features=59392, bias=False) |
| ) |
| ``` |
|
|
| </details> |