model_name stringlengths 3 19 | checkpoint stringlengths 12 61 | original_modeling_code stringlengths 5.73k 154k β | original_source stringlengths 79 135 β | current_modeling_code stringlengths 17k 146k β | current_modular_code stringlengths 4.04k 125k β | bases listlengths 0 6 |
|---|---|---|---|---|---|---|
arcee | arcee-ai/model-id | null | null | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/arcee/modular_arcee.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_arcee.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional
import torch
from torch import nn
from transformers.utils import auto_docstring
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub, use_kernelized_func
from ...masking_utils import create_causal_mask
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, can_return_tuple
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from .configuration_arcee import ArceeConfig
class ArceeMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.up_proj(x)))
@use_kernel_forward_from_hub("RMSNorm")
class ArceeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
ArceeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class ArceeRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: ArceeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: ArceeConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class ArceeAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: ArceeConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class ArceeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: ArceeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ArceeAttention(config=config, layer_idx=layer_idx)
self.mlp = ArceeMLP(config)
self.input_layernorm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class ArceePreTrainedModel(PreTrainedModel):
config: ArceeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["ArceeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": ArceeDecoderLayer,
"attentions": ArceeAttention,
}
@auto_docstring
class ArceeModel(ArceePreTrainedModel):
def __init__(self, config: ArceeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[ArceeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = ArceeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = ArceeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForCausalLM(ArceePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_gather_output"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = ArceeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, ArceeForCausalLM
>>> model = ArceeForCausalLM.from_pretrained("meta-arcee/Arcee-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-arcee/Arcee-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForSequenceClassification(GenericForSequenceClassification, ArceePreTrainedModel):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForQuestionAnswering(GenericForQuestionAnswering, ArceePreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForTokenClassification(GenericForTokenClassification, ArceePreTrainedModel):
pass
__all__ = [
"ArceeForCausalLM",
"ArceeForQuestionAnswering",
"ArceeForSequenceClassification",
"ArceeForTokenClassification",
"ArceeModel",
"ArceePreTrainedModel",
]
| # Copyright 2025 Arcee AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Arcee model."""
from transformers.utils import auto_docstring, logging
from ...modeling_rope_utils import RopeParameters
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaForCausalLM,
LlamaForQuestionAnswering,
LlamaForSequenceClassification,
LlamaForTokenClassification,
)
from ..nemotron.modeling_nemotron import NemotronMLP
logger = logging.get_logger(__name__)
class ArceeConfig(LlamaConfig):
r"""
This is the configuration class to store the configuration of a [`ArceeModel`]. It is used to instantiate an Arcee
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the AFM-4.5B-Base.
Pre-trained weights are available at
[arcee-ai/AFM-4.5B](https://huggingface.co/arcee-ai/AFM-4.5B)
and were used to build the examples below.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Arcee model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ArceeModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 18432):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. AFM-4.5B-Base supports up to 16384 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 128000):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 128001):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import ArceeModel, ArceeConfig
>>> # Initializing an Arcee AFM-4.5B-Base style configuration
>>> configuration = ArceeConfig()
>>> # Initializing a model from the AFM-4.5B-Base style configuration
>>> model = ArceeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "arcee"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
def __init__(
self,
vocab_size: int | None = 32000,
hidden_size: int | None = 2560,
intermediate_size: int | None = 18432,
num_hidden_layers: int | None = 32,
num_attention_heads: int | None = 32,
num_key_value_heads: int | None = None,
hidden_act: str | None = "relu2",
max_position_embeddings: int | None = 4096,
initializer_range: float | None = 0.02,
rms_norm_eps: int | None = 1e-5,
use_cache: bool | None = True,
pad_token_id: int | None = None,
bos_token_id: int | None = 128000,
eos_token_id: int | None = 128001,
tie_word_embeddings: bool | None = False,
rope_parameters: RopeParameters | dict[str, RopeParameters] | None = None,
attention_bias: bool | None = False,
attention_dropout: float | None = 0.0,
mlp_bias: bool | None = False,
head_dim: int | None = None,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
rms_norm_eps=rms_norm_eps,
use_cache=use_cache,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
rope_parameters=rope_parameters,
attention_bias=attention_bias,
attention_dropout=attention_dropout,
mlp_bias=mlp_bias,
head_dim=head_dim,
**kwargs,
)
del self.pretraining_tp
class ArceeMLP(NemotronMLP):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForCausalLM(LlamaForCausalLM):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForSequenceClassification(LlamaForSequenceClassification):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForQuestionAnswering(LlamaForQuestionAnswering):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
class ArceeForTokenClassification(LlamaForTokenClassification):
pass
__all__ = [
"ArceeConfig",
"ArceeForCausalLM",
"ArceeForQuestionAnswering",
"ArceeForSequenceClassification",
"ArceeForTokenClassification",
"ArceeModel", # noqa: F822
"ArceePreTrainedModel", # noqa: F822
]
| [
"llama",
"nemotron"
] |
aria | rhymes-ai/Aria | null | null | # π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# This file was automatically generated from src/transformers/models/aria/modular_aria.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_aria.py file directly. One of our CI enforces this.
# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨
# Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub, use_kernelized_func
from ...masking_utils import create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithPast,
BaseModelOutputWithPooling,
CausalLMOutputWithPast,
ModelOutput,
)
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, torch_compilable_check
from ...utils.generic import maybe_autocast, merge_with_config_defaults
from ...utils.output_capturing import capture_outputs
from ..auto import AutoModel
from .configuration_aria import AriaConfig, AriaTextConfig
@use_kernel_forward_from_hub("RMSNorm")
class AriaTextRMSNorm(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
AriaTextRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class AriaProjectorMLP(nn.Module):
"""
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
"""
def __init__(self, in_features, hidden_features, output_dim):
super().__init__()
self.linear_in = nn.Linear(in_features, hidden_features, bias=False)
self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)
self.act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_states = self.act(self.linear_in(hidden_states))
hidden_states = self.linear_out(hidden_states)
return hidden_states
class AriaCrossAttention(nn.Module):
"""
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
"""
def __init__(self, config: AriaConfig, dropout_rate: float = 0):
super().__init__()
hidden_size = config.vision_config.hidden_size
num_heads = config.vision_config.num_attention_heads
self.num_heads = num_heads
self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False)
# Original code here: https://github.com/rhymes-ai/Aria/blob/719ff4e52b727443cba3793b0e27fe64e0244fe1/aria/model/projector.py#L48
self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.linear = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
self.layer_norm = nn.LayerNorm(hidden_size)
self.layer_norm_kv = nn.LayerNorm(hidden_size)
def forward(self, key_value_states, hidden_states, attn_mask=None):
"""
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
"""
query = self.q_proj(self.layer_norm(hidden_states))
key_value_states = self.layer_norm_kv(key_value_states)
key = self.k_proj(key_value_states)
value = self.v_proj(key_value_states)
attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)
attn_output = self.dropout(self.linear(attn_output))
return attn_output
class AriaProjector(nn.Module):
"""
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
"""
def __init__(
self,
config: AriaConfig,
):
super().__init__()
self.patch_to_query_dict = config.projector_patch_to_query_dict
self.in_features = config.vision_config.hidden_size
self.num_heads = config.vision_config.num_attention_heads
self.kv_dim = config.vision_config.hidden_size
self.hidden_features = config.text_config.hidden_size
self.output_dim = config.text_config.hidden_size
self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))
self.cross_attn = AriaCrossAttention(config)
self.layer_norm = nn.LayerNorm(self.in_features)
self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)
def forward(self, key_value_states: torch.Tensor, attn_mask: torch.Tensor | None = None):
"""
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
"""
batch_size, num_patches = key_value_states.shape[0], key_value_states.shape[1]
if num_patches not in self.patch_to_query_dict:
raise KeyError(
f"Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}."
)
query_num = self.patch_to_query_dict[num_patches]
queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)
if attn_mask is not None:
attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)
attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)
attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)
out = self.feed_forward(self.layer_norm(attention_out))
return out
class AriaSharedExpertsMLP(nn.Module):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert):
"""
Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts.
Args:
token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features).
expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features).
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
num_tokens = token_states.shape[0]
out_features = expert_weights.shape[-1]
output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device)
cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0)
# Insert zero at the beginning for offset index's convenience
zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device)
cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens))
for expert_num in range(expert_weights.shape[0]):
start = cumsum_num_tokens[expert_num]
end = cumsum_num_tokens[expert_num + 1]
tokens = token_states[start:end]
out = torch.matmul(tokens, expert_weights[expert_num])
output[start:end] = out
return output
class AriaGroupedExpertsGemm(nn.Module):
"""
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
"""
def __init__(self, in_features, out_features, groups):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.groups = groups
self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))
def forward(self, input, tokens_per_expert):
"""
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
return sequential_experts_gemm(
input,
self.weight,
tokens_per_expert.cpu(),
)
class AriaExperts(nn.Module):
def __init__(self, config: AriaTextConfig) -> None:
super().__init__()
self.config = config
self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)
self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)
def route_tokens_to_experts(self, router_logits):
top_logits, top_indices = torch.topk(router_logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
return top_indices, scores
def forward(self, hidden_states, router_logits) -> torch.Tensor:
top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
original_dtype = top_k_index.dtype
tokens_per_expert = torch.histc(
top_k_index.flatten().to(torch.float32),
bins=self.config.moe_num_experts,
min=0,
max=self.config.moe_num_experts - 1,
).to(original_dtype)
indices = top_k_index
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk)
fc1_output = self.fc1(permuted_tokens, tokens_per_expert)
projection, gate = torch.chunk(fc1_output, 2, dim=-1)
fc1_output = nn.functional.silu(projection) * gate
expert_output = self.fc2(fc1_output, tokens_per_expert)
unpermuted_tokens = torch.zeros(
(top_k_weights.shape[0] * self.config.moe_topk, expert_output.size(1)),
dtype=expert_output.dtype,
device=expert_output.device,
)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * top_k_weights.unsqueeze(-1)).sum(dim=1)
return output
class AriaTextMoELayer(nn.Module):
def __init__(self, config: AriaTextConfig):
super().__init__()
self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)
self.experts = AriaExperts(config)
self.shared_experts = AriaSharedExpertsMLP(config)
self.config = config
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
router_logits = self.router(hidden_states)
expert_output = self.experts(hidden_states, router_logits).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return expert_output + shared_expert_output
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class AriaTextAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
attention_mask: torch.Tensor | None = None,
past_key_values: Cache | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
self.config._attn_implementation, eager_attention_forward
)
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class AriaTextDecoderLayer(GradientCheckpointingLayer):
"""
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = AriaTextAttention(config=config, layer_idx=layer_idx)
self.mlp = AriaTextMoELayer(config)
self.input_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
use_cache: bool | None = False,
cache_position: torch.LongTensor | None = None,
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
_no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"]
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class AriaPreTrainedModel(PreTrainedModel):
config: AriaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["AriaDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (dynamic slicing)
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaProjector):
init.trunc_normal_(module.query, std=self.config.initializer_range)
class AriaTextRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: AriaTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: AriaTextConfig | None = None,
device: Optional["torch.device"] = None,
seq_len: int | None = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
class AriaTextModel(AriaTextPreTrainedModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = AriaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AriaTextRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@merge_with_config_defaults
@capture_outputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
cache_position: torch.LongTensor | None = None,
use_cache: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = (
torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class AriaTextForCausalLM(AriaTextPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_gather_output"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.model = AriaTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
logits_to_keep: int | torch.Tensor = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, AriaTextForCausalLM
>>> model = AriaTextForCausalLM.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-aria_text/AriaText-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for Aria causal language model (or autoregressive) outputs.
"""
)
class AriaCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: torch.FloatTensor | None = None
logits: torch.FloatTensor | None = None
past_key_values: Cache | None = None
hidden_states: tuple[torch.FloatTensor] | None = None
attentions: tuple[torch.FloatTensor] | None = None
image_hidden_states: torch.FloatTensor | None = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Aria outputs, with hidden states and attentions.
"""
)
class AriaModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: torch.FloatTensor | None = None
@auto_docstring(
custom_intro="""
The Aria model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
class AriaModel(AriaPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: AriaConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = AriaProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
@can_return_tuple
@merge_with_config_defaults
@auto_docstring(
custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection."
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.FloatTensor | None = None,
vision_feature_layer: int = -1,
output_hidden_states: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
patch_attention_mask = self._create_patch_attention_mask(pixel_mask)
image_outputs = self.vision_tower(
pixel_values,
patch_attention_mask=patch_attention_mask,
output_hidden_states=True, # Ignore arg on purpose
return_dict=True,
**kwargs,
)
image_attn_mask = None
if patch_attention_mask is not None:
flattened_mask = patch_attention_mask.flatten(1)
image_attn_mask = torch.logical_not(flattened_mask)
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
image_outputs.pooler_output = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)
return image_outputs
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
n_image_features = image_features.shape[0] * image_features.shape[1]
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
torch_compilable_check(
inputs_embeds[special_image_mask].numel() == image_features.numel(),
f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {n_image_features}",
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_mask: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple | AriaModelOutputWithPast:
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# 2. Merge text and images
if pixel_values is not None and inputs_embeds.shape[1] != 1:
image_features = self.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=self.config.vision_feature_layer,
return_dict=True,
).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AriaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
def _create_patch_attention_mask(self, pixel_mask):
if pixel_mask is None:
return None
patches_subgrid = pixel_mask.unfold(
dimension=1,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
patches_subgrid = patches_subgrid.unfold(
dimension=2,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
@auto_docstring(
custom_intro="""
Aria model for conditional generation tasks.
This model combines a vision tower, a multi-modal projector, and a language model
to perform tasks that involve both image and text inputs.
"""
)
class AriaForConditionalGeneration(AriaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: AriaConfig):
super().__init__(config)
self.model = AriaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.FloatTensor | None = None,
vision_feature_layer: int = -1,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
return self.model.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=vision_feature_layer,
**kwargs,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_mask: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | AriaCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import httpx
>>> from io import BytesIO
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return AriaCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_mask=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsquent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_mask"] = pixel_mask
return model_inputs
__all__ = [
"AriaForConditionalGeneration",
"AriaPreTrainedModel",
"AriaTextPreTrainedModel",
"AriaTextModel",
"AriaModel",
"AriaTextForCausalLM",
]
| # Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Iterable
import numpy as np
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...configuration_utils import PreTrainedConfig
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_patch_output_size, select_best_resolution
from ...image_transforms import PaddingMode, convert_to_rgb, pad, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_python import PreTokenizedInput, TextInput
from ...utils import TensorType, TransformersKwargs, auto_docstring, can_return_tuple, logging
from ..auto import CONFIG_MAPPING, AutoConfig, AutoTokenizer
from ..llama.configuration_llama import LlamaConfig
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
)
from ..llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
)
from ..llava_next.image_processing_llava_next import divide_to_patches
logger = logging.get_logger(__name__)
def sequential_experts_gemm(token_states, expert_weights, tokens_per_expert):
"""
Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts.
Args:
token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features).
expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features).
tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
num_tokens = token_states.shape[0]
out_features = expert_weights.shape[-1]
output = torch.zeros(num_tokens, out_features, dtype=token_states.dtype, device=token_states.device)
cumsum_num_tokens = torch.cumsum(tokens_per_expert, dim=0)
# Insert zero at the beginning for offset index's convenience
zero_tensor = torch.zeros(1, dtype=torch.long, device=cumsum_num_tokens.device)
cumsum_num_tokens = torch.cat((zero_tensor, cumsum_num_tokens))
for expert_num in range(expert_weights.shape[0]):
start = cumsum_num_tokens[expert_num]
end = cumsum_num_tokens[expert_num + 1]
tokens = token_states[start:end]
out = torch.matmul(tokens, expert_weights[expert_num])
output[start:end] = out
return output
class AriaTextConfig(LlamaConfig):
r"""
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 4096):
The size of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
Llama 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 2):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_heads
moe_num_experts (`int`, *optional*, defaults to 8):
The number of experts in the MoE layer.
moe_topk (`int`, *optional*, defaults to 2):
The number of top experts to route to for each token.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of shared experts.
"""
model_type = "aria_text"
base_config_key = "text_config"
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
}
def __init__(
self,
intermediate_size: int = 4096,
moe_num_experts: int = 8,
moe_topk: int = 2,
moe_num_shared_experts: int = 2,
pad_token_id=2,
**super_kwargs,
):
self.intermediate_size = intermediate_size
self.moe_num_experts = moe_num_experts
self.moe_topk = moe_topk
self.moe_num_shared_experts = moe_num_shared_experts
super().__init__(pad_token_id=pad_token_id, **super_kwargs)
class AriaConfig(PreTrainedConfig):
r"""
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`AriaVisionConfig` or `dict`, *optional*):
Configuration for the vision component.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
text_config (`AriaTextConfig` or `dict`, *optional*):
Configuration for the text component.
projector_patch_to_query_dict (`dict`, *optional*):
Mapping of patch sizes to query dimensions.
image_token_index (`int`, *optional*, defaults to 9):
Index used to represent image tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal initializer for initializing all weight matrices.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
"""
model_type = "aria"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AriaTextConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
vision_feature_layer: int = -1,
text_config: AriaTextConfig = None,
projector_patch_to_query_dict: dict | None = None,
image_token_index: int | None = 9,
initializer_range: float | None = 0.02,
tie_word_embeddings: bool | None = False,
**kwargs,
):
self.image_token_index = image_token_index
# Convert the keys and values of projector_patch_to_query_dict to integers
# This ensures consistency even if they were provided as strings
if projector_patch_to_query_dict is None:
projector_patch_to_query_dict = {
1225: 128,
4900: 256,
}
self.projector_patch_to_query_dict = {int(k): int(v) for k, v in projector_patch_to_query_dict.items()}
self.max_value_projector_patch_to_query_dict = max(self.projector_patch_to_query_dict.values())
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = "idefics3_vision"
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["idefics3_vision"]()
self.vision_config = vision_config
self.initializer_range = initializer_range
if isinstance(text_config, dict) and "model_type" in text_config:
text_config = AriaTextConfig(**text_config)
elif text_config is None:
text_config = AriaTextConfig()
self.text_config = text_config
self.tie_word_embeddings = tie_word_embeddings
super().__init__(**kwargs)
class AriaTextRMSNorm(LlamaRMSNorm):
pass
class AriaProjectorMLP(nn.Module):
"""
Feed-Forward Network module for the Aria Projector.
Args:
in_features (`int`):
Input embedding dimension.
hidden_features (`int`):
Hidden dimension of the feed-forward network.
output_dim (`int`):
Output dimension.
"""
def __init__(self, in_features, hidden_features, output_dim):
super().__init__()
self.linear_in = nn.Linear(in_features, hidden_features, bias=False)
self.linear_out = nn.Linear(hidden_features, output_dim, bias=False)
self.act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_states = self.act(self.linear_in(hidden_states))
hidden_states = self.linear_out(hidden_states)
return hidden_states
class AriaCrossAttention(nn.Module):
"""
Aria Cross-Attention module.
Args:
config (`AriaConfig`):
The configuration to use.
"""
def __init__(self, config: AriaConfig, dropout_rate: float = 0):
super().__init__()
hidden_size = config.vision_config.hidden_size
num_heads = config.vision_config.num_attention_heads
self.num_heads = num_heads
self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False)
self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False)
# Original code here: https://github.com/rhymes-ai/Aria/blob/719ff4e52b727443cba3793b0e27fe64e0244fe1/aria/model/projector.py#L48
self.multihead_attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.linear = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout_rate)
self.layer_norm = nn.LayerNorm(hidden_size)
self.layer_norm_kv = nn.LayerNorm(hidden_size)
def forward(self, key_value_states, hidden_states, attn_mask=None):
"""
Forward pass of the AriaCrossAttention module.
Args:
key_value_states (`torch.Tensor`):
Input tensor for key and value.
hidden_states (`torch.Tensor`):
Input tensor for query.
attn_mask (`torch.Tensor`, *optional*, defaults to None):
Attention mask.
Returns:
torch.Tensor:
Output tensor after cross-attention.
"""
query = self.q_proj(self.layer_norm(hidden_states))
key_value_states = self.layer_norm_kv(key_value_states)
key = self.k_proj(key_value_states)
value = self.v_proj(key_value_states)
attn_output, _ = self.multihead_attn(query, key, value, attn_mask=attn_mask)
attn_output = self.dropout(self.linear(attn_output))
return attn_output
class AriaProjector(nn.Module):
"""
Aria Projector module.
This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.
Args:
config (`AriaConfig`):
Configuration object for the model.
"""
def __init__(
self,
config: AriaConfig,
):
super().__init__()
self.patch_to_query_dict = config.projector_patch_to_query_dict
self.in_features = config.vision_config.hidden_size
self.num_heads = config.vision_config.num_attention_heads
self.kv_dim = config.vision_config.hidden_size
self.hidden_features = config.text_config.hidden_size
self.output_dim = config.text_config.hidden_size
self.query = nn.Parameter(torch.zeros(config.max_value_projector_patch_to_query_dict, self.in_features))
self.cross_attn = AriaCrossAttention(config)
self.layer_norm = nn.LayerNorm(self.in_features)
self.feed_forward = AriaProjectorMLP(self.in_features, self.hidden_features, self.output_dim)
def forward(self, key_value_states: torch.Tensor, attn_mask: torch.Tensor | None = None):
"""
Forward pass of the Projector module.
Args:
key_value_states (`torch.Tensor`):
Input tensor of shape (batch_size, num_patches, kv_dim).
attn_mask (`torch.Tensor`, *optional*, default is None):
Attention mask.
Returns:
`torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
"""
batch_size, num_patches = key_value_states.shape[0], key_value_states.shape[1]
if num_patches not in self.patch_to_query_dict:
raise KeyError(
f"Number of patches {num_patches} not found in patch_to_query_dict amongst possible values {self.patch_to_query_dict.keys()}."
)
query_num = self.patch_to_query_dict[num_patches]
queries = self.query[:query_num].unsqueeze(0).repeat(batch_size, 1, 1)
if attn_mask is not None:
attn_mask = attn_mask.repeat_interleave(self.num_heads, 0)
attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1)
attention_out = self.cross_attn(key_value_states, queries, attn_mask=attn_mask)
out = self.feed_forward(self.layer_norm(attention_out))
return out
class AriaImageProcessor(BaseImageProcessor):
"""
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.
Args:
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to 980):
Maximum image size.
min_image_size (`int`, *optional*, defaults to 336):
Minimum image size.
split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
The optimal resolutions for splitting the image.
split_image (`bool`, *optional*, defaults to `False`):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
The resampling filter to use if resizing the image.
"""
model_input_names = ["pixel_values", "pixel_mask", "num_crops"]
def __init__(
self,
image_mean: list[float] | None = None,
image_std: list[float] | None = None,
max_image_size: int = 980,
min_image_size: int = 336,
split_resolutions: list[tuple[int, int]] | None = None,
split_image: bool | None = False,
do_convert_rgb: bool | None = True,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool | None = True,
resample: PILImageResampling = PILImageResampling.BICUBIC,
**kwargs,
):
super().__init__(**kwargs)
if image_mean is None:
image_mean = [0.5, 0.5, 0.5]
if image_std is None:
image_std = [0.5, 0.5, 0.5]
self.max_image_size = max_image_size
self.min_image_size = min_image_size
self.image_mean = image_mean
self.image_std = image_std
self.split_image = split_image
if split_resolutions is None:
split_resolutions = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4), (2, 3), (2, 2), (2, 1), (3, 1), (3, 2), (4, 1), (4, 2), (5, 1), (6, 1), (7, 1), (8, 1)] # fmt: skip
split_resolutions = [(el[0] * 490, el[1] * 490) for el in split_resolutions]
self.split_resolutions = split_resolutions
self.do_convert_rgb = do_convert_rgb
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.resample = resample
def preprocess(
self,
images: ImageInput | list[ImageInput],
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
max_image_size: int | None = None,
min_image_size: int | None = None,
split_image: bool | None = None,
do_convert_rgb: bool | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
do_normalize: bool | None = None,
resample: PILImageResampling | None = None,
return_tensors: str | TensorType | None = "pt",
data_format: ChannelDimension | None = ChannelDimension.FIRST,
input_data_format: str | ChannelDimension | None = None,
):
"""
Process a list of images.
Args:
images (ImageInput or list of ImageInput):
The input image or a list of images.
image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Mean values for normalization.
image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
Standard deviation values for normalization.
max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
Maximum image size.
min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
Minimum image size.
split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
Whether to split the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
Whether to convert the image to RGB.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
Whether to normalize the image.
resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
The resampling filter to use if resizing the image.
return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
The type of tensor to return.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`:
image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`:
image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
BatchFeature:
A BatchFeature object containing:
- 'pixel_values':
Tensor of processed image pixel values.
- 'pixel_mask':
Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
- True (1) values indicate pixels that belong to the original resized image.
- False (0) values indicate pixels that are part of the padding.
The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
- 'num_crops':
The maximum number of crops across all images.
"""
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
max_image_size = max_image_size if max_image_size is not None else self.max_image_size
min_image_size = min_image_size if min_image_size is not None else self.min_image_size
split_image = split_image if split_image is not None else self.split_image
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
resample = resample if resample is not None else self.resample
if max_image_size not in [490, 980]:
raise ValueError("max_image_size must be either 490 or 980")
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
pixel_masks = []
num_crops = None
for image in images:
if split_image:
crop_images = self.get_image_patches(
image,
self.split_resolutions,
max_image_size,
resample,
data_format=input_data_format,
input_data_format=input_data_format,
)
else:
crop_images = [image]
if num_crops is None or len(crop_images) > num_crops:
num_crops = len(crop_images)
for crop_image in crop_images:
# At this point the scale is the rescaling factor that would bring the image to max_size in its larger dimension
h, w = get_image_size(crop_image)
scale = max_image_size / max(h, w)
if w >= h:
new_size = (max(int(h * scale), min_image_size), max_image_size) # h, w
else:
new_size = (max_image_size, max(int(w * scale), min_image_size)) # h, w
crop_image_resized = resize(
crop_image,
new_size,
resample=resample,
data_format=input_data_format,
input_data_format=input_data_format,
)
padding_bottom, padding_right = max_image_size - new_size[0], max_image_size - new_size[1]
crop_image_padded = pad(
crop_image_resized,
((0, padding_bottom), (0, padding_right)),
data_format=input_data_format,
input_data_format=input_data_format,
)
# Create a pixel mask
pixel_mask = np.zeros((max_image_size, max_image_size), dtype=bool)
pixel_mask[: new_size[0], : new_size[1]] = 1
pixel_masks.append(pixel_mask)
if do_rescale:
crop_image_padded = self.rescale(
image=crop_image_padded, scale=rescale_factor, input_data_format=input_data_format
)
if do_normalize:
crop_image_padded = self.normalize(
crop_image_padded,
self.image_mean,
self.image_std,
data_format=input_data_format,
input_data_format=input_data_format,
)
crop_image_padded = (
to_channel_dimension_format(crop_image_padded, data_format, input_data_format)
if data_format is not None
else crop_image_padded
)
pixel_values.append(crop_image_padded)
return BatchFeature(
data={
"pixel_values": np.stack(pixel_values, axis=0),
"pixel_mask": np.stack(pixel_masks, axis=0),
"num_crops": num_crops,
},
tensor_type=return_tensors,
)
def _resize_for_patching(
self, image: np.ndarray, target_resolution: tuple, resample, input_data_format: ChannelDimension
) -> np.ndarray:
"""
Resizes an image to a target resolution while maintaining aspect ratio.
Args:
image (np.ndarray):
The input image.
target_resolution (tuple):
The target resolution (height, width) of the image.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
np.ndarray: The resized and padded image.
"""
new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)
# Resize the image
resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)
return resized_image
def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple):
original_height, original_width = original_resolution
target_height, target_width = target_resolution
paste_x, r_x = divmod(target_width - original_width, 2)
paste_y, r_y = divmod(target_height - original_height, 2)
return (paste_y, paste_y + r_y), (paste_x, paste_x + r_x)
def _pad_for_patching(
self, image: np.ndarray, target_resolution: tuple, input_data_format: ChannelDimension
) -> np.ndarray:
"""
Pad an image to a target resolution while maintaining aspect ratio.
"""
new_resolution = get_patch_output_size(image, target_resolution, input_data_format)
padding = self._get_padding_size(new_resolution, target_resolution)
padded_image = self.pad(image, padding=padding)
return padded_image
def pad(
self,
image: np.ndarray,
padding: int | tuple[int, int] | Iterable[tuple[int, int]],
mode: PaddingMode = PaddingMode.CONSTANT,
constant_values: float | Iterable[float] = 0.0,
data_format: str | ChannelDimension | None = None,
input_data_format: str | ChannelDimension | None = None,
) -> np.ndarray:
"""
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.
Args:
image (`np.ndarray`):
The image to pad.
padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`):
Padding to apply to the edges of the height, width axes. Can be one of three formats:
- `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
- `((before, after),)` yields same before and after pad for height and width.
- `(pad,)` or int is a shortcut for before = after = pad width for all axes.
mode (`PaddingMode`):
The padding mode to use. Can be one of:
- `"constant"`: pads with a constant value.
- `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
vector along each axis.
- `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
- `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
`np.ndarray`: The padded image.
"""
# call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim
if isinstance(padding, int) or len(padding) != 4:
return pad(image, padding, mode, constant_values, data_format, input_data_format)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
padding_mode_mapping = {
PaddingMode.CONSTANT: "constant",
PaddingMode.REFLECT: "reflect",
PaddingMode.REPLICATE: "edge",
PaddingMode.SYMMETRIC: "symmetric",
}
image = np.pad(image, padding, mode=padding_mode_mapping[mode], constant_values=constant_values)
image = (
to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image
)
return image
def get_image_patches(
self,
image: np.ndarray,
grid_pinpoints: list[tuple[int, int]],
patch_size: int,
resample: PILImageResampling,
data_format: ChannelDimension,
input_data_format: ChannelDimension,
) -> list[np.ndarray]:
"""
Process an image with variable resolutions by dividing it into patches.
Args:
image (`np.ndarray`):
The input image to be processed.
grid_pinpoints (list[tuple[int, int]]):
A list of possible resolutions as tuples.
patch_size (`int`):
Size of the patches to divide the image into.
resample (`PILImageResampling`):
Resampling filter to use if resizing the image.
data_format (`ChannelDimension` or `str`):
The channel dimension format for the output image.
input_data_format (`ChannelDimension` or `str`):
The channel dimension format of the input image.
Returns:
`list[np.ndarray]`: A list of NumPy arrays containing the processed image patches.
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints must be a list of possible resolutions.")
possible_resolutions = grid_pinpoints
image_size = get_image_size(image, channel_dim=input_data_format)
best_resolution = select_best_resolution(image_size, possible_resolutions)
resized_image = self._resize_for_patching(
image, best_resolution, resample=resample, input_data_format=input_data_format
)
padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)
patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)
# make sure that all patches are in the input data format
patches = [
to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)
for patch in patches
]
return patches
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of patches per image.
"""
split_image = images_kwargs.get("split_image", self.split_image)
max_image_size = images_kwargs.get("max_image_size", self.max_image_size)
resized_height, resized_width = select_best_resolution((height, width), self.split_resolutions)
num_patches = 1 if not split_image else resized_height // max_image_size * resized_width // max_image_size
return num_patches
class AriaImagesKwargs(ImagesKwargs, total=False):
"""
split_image (`bool`, *optional*, defaults to `False`):
Whether to split large images into multiple crops. When enabled, images exceeding the maximum size are
divided into overlapping crops that are processed separately and then combined. This allows processing
of very high-resolution images that exceed the model's input size limits.
max_image_size (`int`, *optional*, defaults to `980`):
Maximum image size (in pixels) for a single image crop. Images larger than this will be split into
multiple crops when `split_image=True`, or resized if splitting is disabled. This parameter controls
the maximum resolution of individual image patches processed by the model.
min_image_size (`int`, *optional*):
Minimum image size (in pixels) for a single image crop. Images smaller than this will be upscaled to
meet the minimum requirement. If not specified, images are processed at their original size (subject
to the maximum size constraint).
"""
split_image: bool
max_image_size: int
min_image_size: int
class AriaProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: AriaImagesKwargs
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"max_image_size": 980,
"split_image": False,
},
"return_tensors": TensorType.PYTORCH,
}
@auto_docstring
class AriaProcessor(ProcessorMixin):
def __init__(
self,
image_processor=None,
tokenizer: AutoTokenizer | str = None,
chat_template: str | None = None,
size_conversion: dict[float | int, int] | None = None,
):
r"""
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
"""
if size_conversion is None:
size_conversion = {490: 128, 980: 256}
self.size_conversion = {int(k): v for k, v in size_conversion.items()}
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
if tokenizer is not None and tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.unk_token
super().__init__(image_processor, tokenizer, chat_template=chat_template)
@auto_docstring
def __call__(
self,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput],
images: ImageInput | None = None,
**kwargs: Unpack[AriaProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
AriaProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
# expand the image_token according to the num_crops and tokens per image
tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]]
prompt_strings = []
num_crops = image_inputs.pop("num_crops") * tokens_per_image
for sample in text:
sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops)
prompt_strings.append(sample)
else:
image_inputs = {}
prompt_strings = text
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AriaProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
max_size = images_kwargs.get("max_image_size", None) or self.image_processor.max_image_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# Remove `num_crops`, it is popped and used only when processing. Make a copy of list when removing
# otherwise `self.image_processor.model_input_names` is also modified
image_processor_input_names = [name for name in image_processor_input_names if name != "num_crops"]
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
class AriaSharedExpertsMLP(LlamaMLP):
"""
Shared Expert MLP for shared experts.
Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.
Args:
config (`AriaTextConfig`): Configuration object for the Aria language model.
"""
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts
class AriaGroupedExpertsGemm(nn.Module):
"""
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
groups (`int`):
Number of expert groups.
"""
def __init__(self, in_features, out_features, groups):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.groups = groups
self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))
def forward(self, input, tokens_per_expert):
"""
Perform grouped matrix multiplication.
Args:
input (`torch.Tensor`):
Input tensor of shape (num_tokens, in_features).
tokens_per_expert (`torch.Tensor`):
Number of tokens assigned to each expert.
Returns:
torch.Tensor: Output tensor of shape (num_tokens, out_features).
"""
return sequential_experts_gemm(
input,
self.weight,
tokens_per_expert.cpu(),
)
class AriaExperts(nn.Module):
def __init__(self, config: AriaTextConfig) -> None:
super().__init__()
self.config = config
self.fc1 = AriaGroupedExpertsGemm(config.hidden_size, config.intermediate_size * 2, config.moe_num_experts)
self.fc2 = AriaGroupedExpertsGemm(config.intermediate_size, config.hidden_size, config.moe_num_experts)
def route_tokens_to_experts(self, router_logits):
top_logits, top_indices = torch.topk(router_logits, k=self.config.moe_topk, dim=1)
scores = nn.functional.softmax(top_logits, dim=-1)
return top_indices, scores
def forward(self, hidden_states, router_logits) -> torch.Tensor:
top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
original_dtype = top_k_index.dtype
tokens_per_expert = torch.histc(
top_k_index.flatten().to(torch.float32),
bins=self.config.moe_num_experts,
min=0,
max=self.config.moe_num_experts - 1,
).to(original_dtype)
indices = top_k_index
flatten_indices = indices.view(-1)
sorted_indices = torch.argsort(flatten_indices)
permuted_tokens = hidden_states.index_select(0, sorted_indices // self.config.moe_topk)
fc1_output = self.fc1(permuted_tokens, tokens_per_expert)
projection, gate = torch.chunk(fc1_output, 2, dim=-1)
fc1_output = nn.functional.silu(projection) * gate
expert_output = self.fc2(fc1_output, tokens_per_expert)
unpermuted_tokens = torch.zeros(
(top_k_weights.shape[0] * self.config.moe_topk, expert_output.size(1)),
dtype=expert_output.dtype,
device=expert_output.device,
)
unpermuted_tokens.index_copy_(0, sorted_indices, expert_output)
unpermuted_tokens = unpermuted_tokens.view(-1, self.config.moe_topk, expert_output.size(1))
output = (unpermuted_tokens * top_k_weights.unsqueeze(-1)).sum(dim=1)
return output
class AriaTextMoELayer(nn.Module):
def __init__(self, config: AriaTextConfig):
super().__init__()
self.router = nn.Linear(config.hidden_size, config.moe_num_experts, bias=False)
self.experts = AriaExperts(config)
self.shared_experts = AriaSharedExpertsMLP(config)
self.config = config
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
original_shape = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_states.size(-1))
router_logits = self.router(hidden_states)
expert_output = self.experts(hidden_states, router_logits).view(original_shape)
shared_expert_output = self.shared_experts(hidden_states.view(original_shape))
return expert_output + shared_expert_output
class AriaTextAttention(LlamaAttention):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
class AriaTextDecoderLayer(LlamaDecoderLayer):
"""
Aria Text Decoder Layer.
This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.
Args:
config (`AriaTextConfig`):
Configuration object for the text component of the model.
layer_idx (`int`):
Index of the layer.
"""
def __init__(self, config: AriaTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.mlp = AriaTextMoELayer(config)
@auto_docstring
class AriaTextPreTrainedModel(PreTrainedModel):
config: AriaTextConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
_no_split_modules = ["AriaTextDecoderLayer", "AriaGroupedExpertsGemm"]
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": AriaTextDecoderLayer,
"attentions": AriaTextAttention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, AriaGroupedExpertsGemm):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
class AriaPreTrainedModel(LlamaPreTrainedModel):
config: AriaConfig
base_model_prefix = "model"
_can_compile_fullgraph = False # MoE models don't work with torch.compile (dynamic slicing)
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, AriaProjector):
init.trunc_normal_(module.query, std=self.config.initializer_range)
class AriaTextModel(LlamaModel):
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.layers = nn.ModuleList(
[AriaTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
self.post_init()
class AriaTextForCausalLM(AriaTextPreTrainedModel, LlamaForCausalLM):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config: AriaTextConfig):
super().__init__(config)
self.model = AriaTextModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(self, **super_kwargs):
super().forward(self, **super_kwargs)
class AriaCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
class AriaModelOutputWithPast(LlavaModelOutputWithPast):
pass
class AriaModel(LlavaModel):
def __init__(self, config: AriaConfig):
super().__init__(config)
self.multi_modal_projector = AriaProjector(config)
def _create_patch_attention_mask(self, pixel_mask):
if pixel_mask is None:
return None
patches_subgrid = pixel_mask.unfold(
dimension=1,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
patches_subgrid = patches_subgrid.unfold(
dimension=2,
size=self.vision_tower.config.patch_size,
step=self.vision_tower.config.patch_size,
)
return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.FloatTensor | None = None,
vision_feature_layer: int = -1,
output_hidden_states: bool | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
patch_attention_mask = self._create_patch_attention_mask(pixel_mask)
image_outputs = self.vision_tower(
pixel_values,
patch_attention_mask=patch_attention_mask,
output_hidden_states=True, # Ignore arg on purpose
return_dict=True,
**kwargs,
)
image_attn_mask = None
if patch_attention_mask is not None:
flattened_mask = patch_attention_mask.flatten(1)
image_attn_mask = torch.logical_not(flattened_mask)
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
image_outputs.pooler_output = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)
return image_outputs
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_mask: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple | AriaModelOutputWithPast:
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
# 2. Merge text and images
if pixel_values is not None and inputs_embeds.shape[1] != 1:
image_features = self.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=self.config.vision_feature_layer,
return_dict=True,
).pooler_output
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AriaModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values if use_cache else None,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
Aria model for conditional generation tasks.
This model combines a vision tower, a multi-modal projector, and a language model
to perform tasks that involve both image and text inputs.
"""
)
class AriaForConditionalGeneration(LlavaForConditionalGeneration):
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
@auto_docstring
def get_image_features(
self,
pixel_values: torch.FloatTensor,
pixel_mask: torch.FloatTensor | None = None,
vision_feature_layer: int = -1,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | BaseModelOutputWithPooling:
return self.model.get_image_features(
pixel_values=pixel_values,
pixel_mask=pixel_mask,
vision_feature_layer=vision_feature_layer,
**kwargs,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor | None = None,
pixel_values: torch.FloatTensor | None = None,
pixel_mask: torch.LongTensor | None = None,
attention_mask: torch.Tensor | None = None,
position_ids: torch.LongTensor | None = None,
past_key_values: Cache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
labels: torch.LongTensor | None = None,
use_cache: bool | None = None,
logits_to_keep: int | torch.Tensor = 0,
cache_position: torch.LongTensor | None = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple | AriaCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> import httpx
>>> from io import BytesIO
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO
>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image
>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")
>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", dtype=torch.bfloat16, device_map="auto")
>>> # Create inputs
>>> messages = [
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
... {"type": "image"},
... {"type": "text", "text": "What can we see in this image?"},
... ]
... },
... {
... "role": "user",
... "content": [
... {"type": "image"},
... {"type": "text", "text": "In which city is that bridge located?"},
... ]
... }
... ]
>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)
>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.
>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```"""
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return AriaCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
pixel_mask=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsquent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
model_inputs["pixel_mask"] = pixel_mask
return model_inputs
__all__ = [
"AriaConfig",
"AriaTextConfig",
"AriaImageProcessor",
"AriaProcessor",
"AriaForConditionalGeneration",
"AriaPreTrainedModel",
"AriaTextPreTrainedModel",
"AriaTextModel",
"AriaModel",
"AriaTextForCausalLM",
]
| [
"llama",
"llava"
] |
bert_generation | google/bert_for_seq_generation_L-24_bbc_encoder | "# coding=utf-8\n# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.(...TRUNCATED) | https://github.com/huggingface/transformers/blob/c89bdfbe72/src/transformers/models/bert_generation/modeling_bert_generation.py | "# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.\n#\n# Licensed (...TRUNCATED) | null | [] |
biogpt | microsoft/biogpt | "# coding=utf-8\n# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights (...TRUNCATED) | https://github.com/huggingface/transformers/blob/13e736685a/src/transformers/models/biogpt/modeling_biogpt.py | "# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨(...TRUNCATED) | "# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.\n#\n# (...TRUNCATED) | [
"bart",
"opt"
] |
bitnet | microsoft/bitnet-b1.58-2B-4T | null | null | "# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨(...TRUNCATED) | "# Copyright 2025 The BitNet Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed(...TRUNCATED) | [
"gemma",
"llama"
] |
blt | itazap/blt-1b-hf | null | null | "# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨(...TRUNCATED) | "# Copyright 2025 HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache Licens(...TRUNCATED) | [
"cohere2",
"llama",
"mllama"
] |
camembert | almanach/camembert-base | "# coding=utf-8\n# Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team.\n# Copy(...TRUNCATED) | https://github.com/huggingface/transformers/blob/c89bdfbe72/src/transformers/models/camembert/modeling_camembert.py | "# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨(...TRUNCATED) | "# Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team.\n# Copyright (c) 2018, (...TRUNCATED) | [
"roberta"
] |
chameleon | facebook/chameleon-7b | "# coding=utf-8\n# Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n(...TRUNCATED) | https://github.com/huggingface/transformers/blob/24cfcc2114/src/transformers/models/chameleon/modeling_chameleon.py | "# Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under(...TRUNCATED) | null | [] |
codegen | Salesforce/codegen-350M-mono | "# coding=utf-8\n# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rig(...TRUNCATED) | https://github.com/huggingface/transformers/blob/d6b6fb9963/src/transformers/models/codegen/modeling_codegen.py | "# Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.\n#(...TRUNCATED) | null | [] |
conditional_detr | microsoft/conditional-detr-resnet-50 | "# coding=utf-8\n# Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights (...TRUNCATED) | https://github.com/huggingface/transformers/blob/126a739058/src/transformers/models/conditional_detr/modeling_conditional_detr.py | "# π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨π¨(...TRUNCATED) | "# Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved.\n#\n# (...TRUNCATED) | [
"deformable_detr",
"detr"
] |
End of preview. Expand in Data Studio
README.md exists but content is empty.
- Downloads last month
- 16