1#![allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)]
2
3use candle_core::{DType, Device, Module, Result, Tensor, D};
7use candle_nn::{Activation, Embedding, Linear};
8use mistralrs_quant::ShardedVarBuilder;
9use serde::Deserialize;
10use std::sync::Arc;
11
12use crate::layers::{clamp_for_f16, embedding, linear_no_bias, MatMul};
13
14fn default_relative_attention_max_distance() -> usize {
15 128
16}
17
18fn default_is_decoder() -> bool {
19 false
20}
21
22fn default_use_cache() -> bool {
23 true
24}
25
26fn default_tie_word_embeddings() -> bool {
27 true
28}
29
30fn get_mask(size: usize, device: &Device) -> Result<Tensor> {
31 let mask: Vec<_> = (0..size)
32 .flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
33 .collect();
34 Tensor::from_slice(&mask, (size, size), device)
35}
36
37fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> {
38 let shape = mask.shape();
39 let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?;
40 let m = mask.where_cond(&on_true, on_false)?;
41 Ok(m)
42}
43
44#[derive(Debug, Deserialize, Default, Clone, PartialEq)]
45pub struct ActivationWithOptionalGating {
46 pub gated: bool,
47 pub activation: candle_nn::Activation,
48}
49
50pub fn deserialize_feed_forward_proj_activation<'de, D>(
51 deserializer: D,
52) -> std::result::Result<ActivationWithOptionalGating, D::Error>
53where
54 D: serde::de::Deserializer<'de>,
55{
56 match String::deserialize(deserializer)?.as_str() {
57 "gated-gelu" => Ok(ActivationWithOptionalGating {
58 gated: true,
59 activation: candle_nn::Activation::NewGelu,
60 }),
61 "gated-silu" => Ok(ActivationWithOptionalGating {
62 gated: true,
63 activation: candle_nn::Activation::Silu,
64 }),
65 buf => {
66 let activation = serde_plain::from_str(buf).map_err(serde::de::Error::custom)?;
67 Ok(ActivationWithOptionalGating {
68 gated: false,
69 activation,
70 })
71 }
72 }
73}
74
75#[derive(Debug, Clone, PartialEq, Deserialize)]
76pub struct Config {
77 pub vocab_size: usize,
78 pub d_model: usize,
79 pub d_kv: usize,
80 pub d_ff: usize,
81 pub num_layers: usize,
82 pub num_decoder_layers: Option<usize>,
83 pub num_heads: usize,
84 pub relative_attention_num_buckets: usize,
85 #[serde(default = "default_relative_attention_max_distance")]
86 pub relative_attention_max_distance: usize,
87 pub dropout_rate: f64,
88 pub layer_norm_epsilon: f64,
89 pub initializer_factor: f64,
90 #[serde(default, deserialize_with = "deserialize_feed_forward_proj_activation")]
91 pub feed_forward_proj: ActivationWithOptionalGating,
92 #[serde(default = "default_tie_word_embeddings")]
93 pub tie_word_embeddings: bool,
94 #[serde(default = "default_is_decoder")]
95 pub is_decoder: bool,
96 pub is_encoder_decoder: bool,
97 #[serde(default = "default_use_cache")]
98 pub use_cache: bool,
99 pub pad_token_id: usize,
100 pub eos_token_id: usize,
101 pub decoder_start_token_id: Option<usize>,
102}
103
104impl Default for Config {
105 fn default() -> Self {
106 Self {
107 vocab_size: 32128,
108 d_model: 512,
109 d_kv: 64,
110 d_ff: 2048,
111 num_layers: 6,
112 num_decoder_layers: None,
113 num_heads: 8,
114 relative_attention_num_buckets: 32,
115 relative_attention_max_distance: 128,
116 dropout_rate: 0.1,
117 layer_norm_epsilon: 1e-6,
118 initializer_factor: 1.0,
119 feed_forward_proj: ActivationWithOptionalGating {
120 gated: false,
121 activation: Activation::Relu,
122 },
123 tie_word_embeddings: true,
124 is_decoder: false,
125 is_encoder_decoder: true,
126 use_cache: true,
127 pad_token_id: 0,
128 eos_token_id: 1,
129 decoder_start_token_id: Some(0),
130 }
131 }
132}
133
134#[derive(Debug, Clone)]
135struct T5LayerNorm {
136 weight: Tensor,
137 variance_epsilon: f64,
138}
139
140impl T5LayerNorm {
141 fn load(h: usize, eps: f64, vb: ShardedVarBuilder) -> Result<Self> {
142 let weight = vb.get(h, "weight")?;
143 Ok(Self {
144 weight,
145 variance_epsilon: eps,
146 })
147 }
148}
149
150impl Module for T5LayerNorm {
151 fn forward(&self, xs: &Tensor) -> Result<Tensor> {
152 let dtype = xs.dtype();
153 let xs_f32 = xs.to_dtype(DType::F32)?;
154 let variance = xs_f32.sqr()?.mean_keepdim(D::Minus1)?;
156 let xs = xs_f32.broadcast_div(&(variance + self.variance_epsilon)?.sqrt()?)?;
157 let xs = xs.to_dtype(dtype)?;
158 let xs = xs.broadcast_mul(&self.weight)?;
159 Ok(xs)
160 }
161}
162
163#[derive(Debug, Clone)]
164struct T5DenseActDense {
165 wi: Linear,
166 wo: Linear,
167 act: Activation,
168}
169
170impl T5DenseActDense {
171 fn load(vb: ShardedVarBuilder, cfg: &Config) -> Result<Self> {
172 let wi = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi"))?;
173 let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
174 Ok(Self {
175 wi,
176 wo,
177 act: Activation::Relu,
178 })
179 }
180}
181
182impl Module for T5DenseActDense {
183 fn forward(&self, xs: &Tensor) -> Result<Tensor> {
184 let xs = self.wi.forward(xs)?;
185 let xs = self.act.forward(&xs)?;
186 let xs = self.wo.forward(&xs)?;
187 Ok(xs)
188 }
189}
190
191#[derive(Debug, Clone)]
192struct T5DenseGatedActDense {
193 wi_0: Linear,
194 wi_1: Linear,
195 wo: Linear,
196 act: Activation,
197}
198
199impl T5DenseGatedActDense {
200 fn load(vb: ShardedVarBuilder, cfg: &Config) -> Result<Self> {
201 let wi_0 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_0"))?;
202 let wi_1 = linear_no_bias(cfg.d_model, cfg.d_ff, vb.pp("wi_1"))?;
203 let wo = linear_no_bias(cfg.d_ff, cfg.d_model, vb.pp("wo"))?;
204 Ok(Self {
205 wi_0,
206 wi_1,
207 wo,
208 act: cfg.feed_forward_proj.activation,
209 })
210 }
211}
212
213impl Module for T5DenseGatedActDense {
214 fn forward(&self, xs: &Tensor) -> Result<Tensor> {
215 let hidden_gelu = self.act.forward(&self.wi_0.forward(xs)?)?;
216 let hidden_linear = self.wi_1.forward(xs)?;
217 let xs = hidden_gelu.broadcast_mul(&hidden_linear)?;
218 let xs = self.wo.forward(&xs)?;
219 Ok(xs)
220 }
221}
222
223#[derive(Debug, Clone)]
224struct T5LayerFF {
225 dense_act: Option<T5DenseActDense>,
226 gated_dense_act: Option<T5DenseGatedActDense>,
227 layer_norm: T5LayerNorm,
228}
229
230impl T5LayerFF {
231 fn load(vb: ShardedVarBuilder, cfg: &Config) -> Result<Self> {
232 let layer_norm =
233 T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
234 let (dense_act, gated_dense_act) = if cfg.feed_forward_proj.gated {
235 (
236 None,
237 Some(T5DenseGatedActDense::load(vb.pp("DenseReluDense"), cfg)?),
238 )
239 } else {
240 (
241 Some(T5DenseActDense::load(vb.pp("DenseReluDense"), cfg)?),
242 None,
243 )
244 };
245 Ok(Self {
246 dense_act,
247 gated_dense_act,
248 layer_norm,
249 })
250 }
251
252 fn cast_to(&mut self, device: &Device) -> Result<()> {
253 self.layer_norm = T5LayerNorm {
254 weight: self.layer_norm.weight.to_device(device)?,
255 variance_epsilon: self.layer_norm.variance_epsilon,
256 };
257 if let Some(dense) = &mut self.dense_act {
258 dense.wi = Linear::new(
259 dense.wi.weight().to_device(device)?,
260 dense.wi.bias().map(|x| x.to_device(device).unwrap()),
261 );
262 dense.wo = Linear::new(
263 dense.wo.weight().to_device(device)?,
264 dense.wo.bias().map(|x| x.to_device(device).unwrap()),
265 );
266 }
267 if let Some(dense) = &mut self.gated_dense_act {
268 dense.wi_0 = Linear::new(
269 dense.wi_0.weight().to_device(device)?,
270 dense.wi_0.bias().map(|x| x.to_device(device).unwrap()),
271 );
272 dense.wi_1 = Linear::new(
273 dense.wi_1.weight().to_device(device)?,
274 dense.wi_1.bias().map(|x| x.to_device(device).unwrap()),
275 );
276 dense.wo = Linear::new(
277 dense.wo.weight().to_device(device)?,
278 dense.wo.bias().map(|x| x.to_device(device).unwrap()),
279 );
280 }
281 Ok(())
282 }
283}
284
285impl Module for T5LayerFF {
286 fn forward(&self, xs: &Tensor) -> Result<Tensor> {
287 let ys = self.layer_norm.forward(xs)?;
288 let ys = match &self.dense_act {
289 Some(dense_act) => dense_act.forward(&ys)?,
290 None => self.gated_dense_act.as_ref().unwrap().forward(&ys)?,
291 };
292 let xs = (xs + ys)?;
293 Ok(xs)
294 }
295}
296
297#[derive(Debug, Clone)]
298struct T5Attention {
299 q: Linear,
300 k: Linear,
301 v: Linear,
302 o: Linear,
303 n_heads: usize,
304 d_kv: usize,
305 relative_attention_bias: Option<Embedding>,
306 relative_attention_num_buckets: usize,
307 relative_attention_max_distance: usize,
308 inner_dim: usize,
309 use_cache: bool,
310}
311
312impl T5Attention {
313 fn load(
314 has_relative_attention_bias: bool,
315 decoder: bool,
316 vb: ShardedVarBuilder,
317 cfg: &Config,
318 ) -> Result<Self> {
319 let inner_dim = cfg.num_heads * cfg.d_kv;
320 let q = linear_no_bias(cfg.d_model, inner_dim, vb.pp("q"))?;
321 let k = linear_no_bias(cfg.d_model, inner_dim, vb.pp("k"))?;
322 let v = linear_no_bias(cfg.d_model, inner_dim, vb.pp("v"))?;
323 let o = linear_no_bias(inner_dim, cfg.d_model, vb.pp("o"))?;
324 let relative_attention_bias = if has_relative_attention_bias {
325 let emb = embedding(
326 cfg.relative_attention_num_buckets,
327 cfg.num_heads,
328 vb.pp("relative_attention_bias"),
329 )?;
330 Some(emb)
331 } else {
332 None
333 };
334 Ok(Self {
335 q,
336 k,
337 v,
338 o,
339 n_heads: cfg.num_heads,
340 d_kv: cfg.d_kv,
341 relative_attention_bias,
342 relative_attention_num_buckets: cfg.relative_attention_num_buckets,
343 relative_attention_max_distance: cfg.relative_attention_max_distance,
344 inner_dim,
345 use_cache: cfg.use_cache && decoder,
346 })
347 }
348
349 fn forward(
350 &self,
351 xs: &Tensor,
352 position_bias: Option<&Tensor>,
353 key_value_states: Option<&Tensor>,
354 mask: Option<&Tensor>,
355 ) -> Result<(Tensor, Option<Tensor>)> {
356 let kv_input = match key_value_states {
359 None => xs,
360 Some(key_value_states) => key_value_states,
361 };
362 let (b_sz, q_len) = (xs.dim(0)?, xs.dim(1)?);
363 let kv_len = kv_input.dim(1)?;
364 let q = self.q.forward(xs)?;
365 let k = self.k.forward(kv_input)?;
366 let v = self.v.forward(kv_input)?;
367 let q = q
368 .reshape((b_sz, q_len, self.n_heads, self.d_kv))?
369 .transpose(1, 2)?
370 .contiguous()?;
371 let k = k
372 .reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
373 .transpose(1, 2)?;
374 let v = v
375 .reshape((b_sz, kv_len, self.n_heads, self.d_kv))?
376 .transpose(1, 2)?;
377
378 let k = k.contiguous()?;
379 let v = v.contiguous()?;
380 let scores = { MatMul.matmul(&q, &k.t()?)? };
382 let scores = match mask {
383 None => scores,
384 Some(mask) => masked_fill(
385 &scores,
386 &mask
387 .unsqueeze(0)?
388 .unsqueeze(0)?
389 .repeat((b_sz, self.n_heads))?,
390 f32::NEG_INFINITY,
391 )?,
392 };
393
394 let (scores, position_bias) = match position_bias {
395 Some(position_bias) => (
396 scores.broadcast_add(position_bias)?,
397 Some(position_bias.clone()),
398 ),
399 None => match &self.relative_attention_bias {
400 None => (scores, None),
401 Some(relative_attention_bias) => {
402 let kv_len = k.dim(2)?;
404 let (q_start, q_end) = match self.use_cache {
405 true => ((kv_len - q_len) as u32, kv_len as u32),
406 false => (0_u32, kv_len as u32),
407 };
408 let num_buckets = self.relative_attention_num_buckets as u32 / 2;
409 let max_exact = num_buckets / 2;
410 let relative_position = (q_start..q_end)
411 .map(|i| {
412 (0..kv_len as u32)
413 .map(|j| {
414 if i < j {
415 if j - i < max_exact {
416 j - i + num_buckets
417 } else {
418 let b = f32::log(
419 (j - i) as f32 / max_exact as f32,
420 self.relative_attention_max_distance as f32
421 / max_exact as f32,
422 ) * (num_buckets - max_exact) as f32;
423 u32::min(
424 max_exact + num_buckets + b as u32,
425 self.relative_attention_num_buckets as u32 - 1,
426 )
427 }
428 } else if i - j < max_exact {
429 i - j
430 } else {
431 let b = f32::log(
432 (i - j) as f32 / max_exact as f32,
433 self.relative_attention_max_distance as f32
434 / max_exact as f32,
435 ) * (num_buckets - max_exact) as f32;
436 u32::min(max_exact + b as u32, num_buckets - 1)
437 }
438 })
439 .collect::<Vec<u32>>()
440 })
441 .collect::<Vec<Vec<_>>>();
442 let relative_buckets = Tensor::new(relative_position, q.device())?;
443 let position_bias = relative_attention_bias
444 .forward(&relative_buckets)?
445 .permute((2, 0, 1))?
446 .unsqueeze(0)?;
447 (scores.broadcast_add(&position_bias)?, Some(position_bias))
448 }
450 },
451 };
452
453 let attn_weights = { candle_nn::ops::softmax_last_dim(&scores)? };
454 let attn_output = MatMul.matmul(&attn_weights, &v)?;
455 let attn_output = attn_output
456 .transpose(1, 2)?
457 .reshape((b_sz, q_len, self.inner_dim))?;
458 let attn_output = self.o.forward(&attn_output)?;
459 Ok((attn_output, position_bias))
460 }
461}
462
463#[derive(Debug, Clone)]
464struct T5LayerSelfAttention {
465 self_attention: T5Attention,
466 layer_norm: T5LayerNorm,
467}
468
469impl T5LayerSelfAttention {
470 fn load(h: bool, d: bool, vb: ShardedVarBuilder, cfg: &Config) -> Result<Self> {
471 let self_attention = T5Attention::load(h, d, vb.pp("SelfAttention"), cfg)?;
472 let layer_norm =
473 T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
474 Ok(Self {
475 self_attention,
476 layer_norm,
477 })
478 }
479
480 fn forward(
481 &self,
482 xs: &Tensor,
483 position_bias: Option<&Tensor>,
484 mask: Option<&Tensor>,
485 ) -> Result<(Tensor, Option<Tensor>)> {
486 let normed_xs = self.layer_norm.forward(xs)?;
487 let (ys, position_bias) =
488 self.self_attention
489 .forward(&normed_xs, position_bias, None, mask)?;
490 let ys = (xs + ys)?;
491 Ok((ys, position_bias))
492 }
493
494 fn cast_to(&mut self, device: &Device) -> Result<()> {
495 self.self_attention.q = Linear::new(
496 self.self_attention.q.weight().to_device(device)?,
497 self.self_attention
498 .q
499 .bias()
500 .map(|x| x.to_device(device).unwrap()),
501 );
502 self.self_attention.k = Linear::new(
503 self.self_attention.k.weight().to_device(device)?,
504 self.self_attention
505 .k
506 .bias()
507 .map(|x| x.to_device(device).unwrap()),
508 );
509 self.self_attention.v = Linear::new(
510 self.self_attention.v.weight().to_device(device)?,
511 self.self_attention
512 .v
513 .bias()
514 .map(|x| x.to_device(device).unwrap()),
515 );
516 self.self_attention.o = Linear::new(
517 self.self_attention.o.weight().to_device(device)?,
518 self.self_attention
519 .o
520 .bias()
521 .map(|x| x.to_device(device).unwrap()),
522 );
523 if let Some(embed) = &mut self.self_attention.relative_attention_bias {
524 *embed = Embedding::new(embed.embeddings().to_device(device)?, embed.hidden_size());
525 }
526 self.layer_norm = T5LayerNorm {
527 weight: self.layer_norm.weight.to_device(device)?,
528 variance_epsilon: self.layer_norm.variance_epsilon,
529 };
530 Ok(())
531 }
532}
533
534#[derive(Debug, Clone)]
535struct T5LayerCrossAttention {
536 cross_attention: T5Attention,
537 layer_norm: T5LayerNorm,
538}
539
540impl T5LayerCrossAttention {
541 fn load(decoder: bool, vb: ShardedVarBuilder, cfg: &Config) -> Result<Self> {
542 let cross_attention = T5Attention::load(false, decoder, vb.pp("EncDecAttention"), cfg)?;
543 let layer_norm =
544 T5LayerNorm::load(cfg.d_model, cfg.layer_norm_epsilon, vb.pp("layer_norm"))?;
545 Ok(Self {
546 cross_attention,
547 layer_norm,
548 })
549 }
550
551 fn forward(
552 &self,
553 hidden_states: &Tensor,
554 position_bias: Option<&Tensor>,
555 key_value_states: &Tensor,
556 ) -> Result<(Tensor, Option<Tensor>)> {
557 let normed_hidden_states = self.layer_norm.forward(hidden_states)?;
558 let (ys, position_bias) = self.cross_attention.forward(
559 &normed_hidden_states,
560 position_bias,
561 Some(key_value_states),
562 None,
563 )?;
564 let ys = (hidden_states + ys)?;
565 Ok((ys, position_bias))
566 }
567
568 fn cast_to(&mut self, device: &Device) -> Result<()> {
569 self.cross_attention.q = Linear::new(
570 self.cross_attention.q.weight().to_device(device)?,
571 self.cross_attention
572 .q
573 .bias()
574 .map(|x| x.to_device(device).unwrap()),
575 );
576 self.cross_attention.k = Linear::new(
577 self.cross_attention.k.weight().to_device(device)?,
578 self.cross_attention
579 .k
580 .bias()
581 .map(|x| x.to_device(device).unwrap()),
582 );
583 self.cross_attention.v = Linear::new(
584 self.cross_attention.v.weight().to_device(device)?,
585 self.cross_attention
586 .v
587 .bias()
588 .map(|x| x.to_device(device).unwrap()),
589 );
590 self.cross_attention.o = Linear::new(
591 self.cross_attention.o.weight().to_device(device)?,
592 self.cross_attention
593 .o
594 .bias()
595 .map(|x| x.to_device(device).unwrap()),
596 );
597 if let Some(embed) = &mut self.cross_attention.relative_attention_bias {
598 *embed = Embedding::new(embed.embeddings().to_device(device)?, embed.hidden_size());
599 }
600 self.layer_norm = T5LayerNorm {
601 weight: self.layer_norm.weight.to_device(device)?,
602 variance_epsilon: self.layer_norm.variance_epsilon,
603 };
604 Ok(())
605 }
606}
607
608#[derive(Debug, Clone)]
609struct T5Block {
610 self_attn: T5LayerSelfAttention,
611 cross_attn: Option<T5LayerCrossAttention>,
612 ff: T5LayerFF,
613}
614
615impl T5Block {
616 fn load(
617 has_relative_attention_bias: bool,
618 decoder: bool,
619 vb: ShardedVarBuilder,
620 cfg: &Config,
621 ) -> Result<Self> {
622 let vb = vb.pp("layer");
623 let self_attn =
624 T5LayerSelfAttention::load(has_relative_attention_bias, decoder, vb.pp("0"), cfg)?;
625 let cross_attn = if cfg.is_decoder {
626 Some(T5LayerCrossAttention::load(decoder, vb.pp("1"), cfg)?)
627 } else {
628 None
629 };
630 let ff_i = if cross_attn.is_some() { 2 } else { 1 };
631 let ff = T5LayerFF::load(vb.pp(ff_i.to_string()), cfg)?;
632 Ok(Self {
633 self_attn,
634 cross_attn,
635 ff,
636 })
637 }
638
639 fn forward(
640 &self,
641 xs: &Tensor,
642 position_bias: Option<&Tensor>,
643 encoder_hidden_states: Option<&Tensor>,
644 ) -> Result<(Tensor, Option<Tensor>)> {
645 let mask = match self.cross_attn.is_some() {
647 true => {
648 let mask_len = xs.dim(1)?;
649 if mask_len <= 1 {
652 None
653 } else {
654 Some(get_mask(mask_len, xs.device())?)
655 }
656 }
657 false => None,
658 };
659 let (mut xs, position_bias) = self.self_attn.forward(xs, position_bias, mask.as_ref())?;
660 if xs.dtype() == DType::F16 {
662 xs = clamp_for_f16(&xs)?;
663 }
664 if let Some(cross_attn) = &self.cross_attn {
665 (xs, _) = cross_attn.forward(&xs, None, encoder_hidden_states.unwrap())?;
666 if xs.dtype() == DType::F16 {
668 xs = clamp_for_f16(&xs)?;
669 }
670 }
671 let mut xs = self.ff.forward(&xs)?;
672 if xs.dtype() == DType::F16 {
674 xs = clamp_for_f16(&xs)?;
675 }
676 Ok((xs, position_bias))
677 }
678
679 fn cast_to(&mut self, device: &Device) -> Result<()> {
680 self.self_attn.cast_to(device)?;
681 if let Some(cross_attn) = &mut self.cross_attn {
682 cross_attn.cast_to(device)?;
683 }
684 self.ff.cast_to(device)?;
685 Ok(())
686 }
687}
688
689#[derive(Debug, Clone)]
690struct T5Stack {
691 block: Vec<T5Block>,
692 shared: Arc<Embedding>,
693 final_layer_norm: T5LayerNorm,
694 device: Device,
695 offloaded: bool,
696}
697
698impl T5Stack {
699 fn load(
700 decoder: bool,
701 vb: ShardedVarBuilder,
702 shared: &Arc<Embedding>,
703 cfg: &Config,
704 device: &Device,
705 offloaded: bool,
706 ) -> Result<Self> {
707 let block = (0..cfg.num_layers)
708 .map(|i| T5Block::load(i == 0, decoder, vb.pp(format!("block.{i}")), cfg))
709 .collect::<Result<Vec<_>>>()?;
710 let final_layer_norm = T5LayerNorm::load(
711 cfg.d_model,
712 cfg.layer_norm_epsilon,
713 vb.pp("final_layer_norm").set_device(device.clone()),
714 )?;
715 Ok(Self {
716 block,
717 shared: shared.clone(),
718 final_layer_norm,
719 device: device.clone(),
720 offloaded,
721 })
722 }
723
724 fn forward(
725 &mut self,
726 input_ids: &Tensor,
727 encoder_hidden_states: Option<&Tensor>,
728 ) -> Result<Tensor> {
729 let input_embeds = self.shared.as_ref().forward(input_ids)?;
730 let mut hidden_states = input_embeds;
731 let mut position_bias = None;
732 for block in self.block.iter_mut() {
733 if self.offloaded {
734 block.cast_to(&self.device)?;
735 }
736 (hidden_states, position_bias) = block.forward(
737 &hidden_states,
738 position_bias.as_ref(),
739 encoder_hidden_states,
740 )?;
741 if self.offloaded {
742 block.cast_to(&Device::Cpu)?;
743 }
744 }
745 self.final_layer_norm.forward(&hidden_states)
746 }
747}
748
749#[derive(Debug, Clone)]
750pub struct T5EncoderModel {
751 encoder: T5Stack,
752}
753
754impl T5EncoderModel {
755 pub fn load(
756 vb: ShardedVarBuilder,
757 cfg: &Config,
758 device: &Device,
759 offloaded: bool,
760 ) -> Result<Self> {
761 let shared_vb = if vb.contains_tensor("shared.weight") {
762 vb.pp("shared")
763 } else if vb.contains_tensor("decoder.embed_tokens") {
764 vb.pp("decoder").pp("embed_tokens")
765 } else {
766 vb.pp("encoder").pp("embed_tokens")
767 };
768 let shared = embedding(
769 cfg.vocab_size,
770 cfg.d_model,
771 shared_vb.set_device(device.clone()),
772 )?;
773 let shared = Arc::new(shared);
774 let encoder = T5Stack::load(false, vb.pp("encoder"), &shared, cfg, device, offloaded)?;
775 Ok(Self { encoder })
776 }
777
778 pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> {
779 self.encoder.forward(input_ids, None)
780 }
781}