1use super::llg::build_llg_factory;
2use super::{
3 get_model_paths, get_xlora_paths, text_models_inputs_processor::ModelInputs, AdapterKind,
4 CacheManager, GeneralMetadata, Loader, ModelKind, ModelPaths, QuantizationKind, TokenSource,
5};
6use super::{
7 AnyMoePipelineMixin, CacheManagerMixin, EitherCache, ForwardInputsResult, IsqPipelineMixin,
8 MetadataMixin, ModelCategory, PreProcessingMixin,
9};
10use crate::device_map::DeviceMapper;
11use crate::kv_cache::FullCacheManager;
12use crate::lora::Ordering;
13use crate::pipeline::chat_template::{calculate_eos_tokens, GenerationConfig};
14use crate::pipeline::inputs_processor::DEFAULT_PROMPT_CHUNK_SIZE;
15use crate::pipeline::sampling::sample_and_add_toks;
16use crate::pipeline::{get_chat_template, Modalities, SupportedModality};
17use crate::pipeline::{ChatTemplate, LocalModelPaths};
18use crate::prefix_cacher::PrefixCacheManagerV2;
19use crate::sequence::Sequence;
20use crate::utils::debug::DeviceRepr;
21use crate::utils::model_config as ModelConfig;
22use crate::utils::tokenizer::get_tokenizer;
23use crate::xlora_models::NonGranularState;
24use crate::{
25 get_mut_arcmutex, get_paths, DeviceMapSetting, PagedAttentionConfig, Pipeline, Topology,
26 TryIntoDType, DEBUG,
27};
28use crate::{
29 models::quantized_llama::ModelWeights as QLlama, utils::tokens::get_token,
30 xlora_models::XLoraQLlama,
31};
32use anyhow::Result;
33use candle_core::quantized::ggml_file;
34use candle_core::{Device, Tensor};
35use hf_hub::{api::sync::ApiBuilder, Repo, RepoType};
36use mistralrs_quant::IsqType;
37use rand_isaac::Isaac64Rng;
38use std::any::Any;
39use std::fs;
40use std::num::{NonZero, NonZeroUsize};
41use std::path::PathBuf;
42use std::str::FromStr;
43use std::sync::Arc;
44use tokenizers::Tokenizer;
45use tokio::sync::Mutex;
46use tracing::{info, warn};
47
48enum Model {
49 Llama(QLlama),
50 XLoraLlama(Box<XLoraQLlama>),
51}
52
53pub struct GGMLPipeline {
54 model: Model,
55 tokenizer: Arc<Tokenizer>,
56 no_kv_cache: bool,
57 chat_template: Arc<ChatTemplate>,
58 model_id: String,
59 non_granular_state: Option<NonGranularState>,
60 metadata: Arc<GeneralMetadata>,
61}
62
63pub struct GGMLLoader {
65 model_id: String,
66 config: GGMLSpecificConfig,
67 quantized_model_id: Option<String>,
68 quantized_filename: Option<String>,
69 xlora_model_id: Option<String>,
70 xlora_order: Option<Ordering>,
71 no_kv_cache: bool,
72 chat_template: Option<String>,
73 tokenizer_json: Option<String>,
74 kind: ModelKind,
75 tgt_non_granular_index: Option<usize>,
76 jinja_explicit: Option<String>,
77 lora_adapter_ids: Option<Vec<String>>,
78}
79
80#[derive(Clone, Default)]
81pub struct GGMLSpecificConfig {
83 pub gqa: usize,
84 pub prompt_chunksize: Option<NonZeroUsize>,
85 pub topology: Option<Topology>,
86}
87
88#[derive(Default)]
89pub struct GGMLLoaderBuilder {
91 model_id: Option<String>,
92 config: GGMLSpecificConfig,
93 quantized_model_id: String,
94 quantized_filename: String,
95 xlora_model_id: Option<String>,
96 kind: ModelKind,
97 xlora_order: Option<Ordering>,
98 no_kv_cache: bool,
99 chat_template: Option<String>,
100 tokenizer_json: Option<String>,
101 tgt_non_granular_index: Option<usize>,
102 jinja_explicit: Option<String>,
103}
104
105impl GGMLLoaderBuilder {
106 #[allow(clippy::too_many_arguments)]
107 pub fn new(
108 config: GGMLSpecificConfig,
109 chat_template: Option<String>,
110 tokenizer_json: Option<String>,
111 model_id: Option<String>,
112 quantized_model_id: String,
113 quantized_filename: String,
114 no_kv_cache: bool,
115 jinja_explicit: Option<String>,
116 ) -> Self {
117 let kind = ModelKind::GgufQuantized {
118 quant: QuantizationKind::Ggml,
119 };
120
121 Self {
122 config,
123 chat_template,
124 tokenizer_json,
125 model_id,
126 kind,
127 quantized_filename,
128 quantized_model_id,
129 no_kv_cache,
130 jinja_explicit,
131 ..Default::default()
132 }
133 }
134
135 fn with_adapter(
136 mut self,
137 xlora_model_id: String,
138 xlora_order: Ordering,
139 no_kv_cache: bool,
140 tgt_non_granular_index: Option<usize>,
141 ) -> Self {
142 self.xlora_model_id = Some(xlora_model_id);
143 self.xlora_order = Some(xlora_order);
144 self.no_kv_cache = no_kv_cache;
145 self.tgt_non_granular_index = tgt_non_granular_index;
146 self.model_id = if let Some(id) = self.model_id {
147 Some(id)
148 } else {
149 info!(
150 "Using adapter base model ID: `{}`",
151 self.xlora_order.as_ref().unwrap().base_model_id
152 );
153 Some(self.xlora_order.as_ref().unwrap().base_model_id.clone())
154 };
155 self
156 }
157
158 pub fn with_xlora(
159 mut self,
160 xlora_model_id: String,
161 xlora_order: Ordering,
162 no_kv_cache: bool,
163 tgt_non_granular_index: Option<usize>,
164 ) -> Self {
165 self.kind = (AdapterKind::XLora, QuantizationKind::Ggml).into();
166
167 self.with_adapter(
168 xlora_model_id,
169 xlora_order,
170 no_kv_cache,
171 tgt_non_granular_index,
172 )
173 }
174
175 pub fn with_lora(mut self, lora_model_id: String, lora_order: Ordering) -> Self {
176 self.kind = (AdapterKind::Lora, QuantizationKind::Ggml).into();
177
178 self.with_adapter(lora_model_id, lora_order, false, None)
179 }
180
181 pub fn build(self) -> Box<dyn Loader> {
182 Box::new(GGMLLoader {
183 model_id: self.model_id.unwrap(),
184 config: self.config,
185 xlora_model_id: self.xlora_model_id,
186 kind: self.kind,
187 xlora_order: self.xlora_order,
188 no_kv_cache: self.no_kv_cache,
189 chat_template: self.chat_template,
190 tokenizer_json: self.tokenizer_json,
191 tgt_non_granular_index: self.tgt_non_granular_index,
192 quantized_filename: Some(self.quantized_filename),
193 quantized_model_id: Some(self.quantized_model_id),
194 jinja_explicit: self.jinja_explicit,
195 lora_adapter_ids: None,
196 })
197 }
198}
199
200impl GGMLLoader {
201 #[allow(clippy::too_many_arguments)]
202 pub fn new(
203 model_id: Option<String>,
204 config: GGMLSpecificConfig,
205 quantized_model_id: Option<String>,
206 quantized_filename: Option<String>,
207 xlora_model_id: Option<String>,
208 kind: ModelKind,
209 xlora_order: Option<Ordering>,
210 no_kv_cache: bool,
211 chat_template: Option<String>,
212 tokenizer_json: Option<String>,
213 tgt_non_granular_index: Option<usize>,
214 jinja_explicit: Option<String>,
215 ) -> Self {
216 let model_id = if let Some(id) = model_id {
217 id
218 } else {
219 info!(
220 "Using adapter base model ID: `{}`",
221 xlora_order.as_ref().unwrap().base_model_id
222 );
223 xlora_order.as_ref().unwrap().base_model_id.clone()
224 };
225 Self {
226 model_id,
227 config,
228 quantized_model_id,
229 quantized_filename,
230 xlora_model_id,
231 xlora_order,
232 no_kv_cache,
233 chat_template,
234 tokenizer_json,
235 kind,
236 tgt_non_granular_index,
237 jinja_explicit,
238 lora_adapter_ids: None,
239 }
240 }
241}
242
243impl Loader for GGMLLoader {
244 #[allow(clippy::type_complexity, clippy::too_many_arguments)]
245 fn load_model_from_path(
246 &self,
247 paths: &Box<dyn ModelPaths>,
248 dtype: &dyn TryIntoDType,
249 device: &Device,
250 silent: bool,
251 mapper: DeviceMapSetting,
252 in_situ_quant: Option<IsqType>,
253 mut paged_attn_config: Option<PagedAttentionConfig>,
254 ) -> Result<Arc<Mutex<dyn Pipeline + Send + Sync>>> {
255 if in_situ_quant.is_some() {
256 anyhow::bail!(
257 "You are trying to in-situ quantize a GGML model. This will not do anything."
258 );
259 }
260
261 if matches!(mapper, DeviceMapSetting::Map(_)) {
262 anyhow::bail!("Device mapping is not supported for diffusion models.")
263 }
264
265 if paged_attn_config.is_some() {
266 warn!("PagedAttention is not supported for GGML models, disabling it.");
267
268 paged_attn_config = None;
269 }
270
271 let prompt_chunksize = self
273 .config
274 .prompt_chunksize
275 .unwrap_or(DEFAULT_PROMPT_CHUNK_SIZE.try_into().unwrap())
276 .get();
277
278 info!("Prompt chunk size is {prompt_chunksize}.",);
279
280 info!(
281 "Loading model `{}` on {}.",
282 self.get_id(),
283 device.device_pretty_repr()
284 );
285
286 let mut file = std::fs::File::open(paths.get_weight_filenames().first().unwrap())?;
287 let model = ggml_file::Content::read(&mut file, device)
288 .map_err(|e| e.with_path(paths.get_weight_filenames().first().unwrap()))?;
289
290 info!("Model config: {:?}", model.hparams);
291
292 if DEBUG.load(std::sync::atomic::Ordering::Relaxed) {
293 let mut tensors = Vec::new();
294 for (name, t) in &model.tensors {
295 tensors.push(format!(
296 "name = `{name}`, shape = {:?}, dtype = {:?}",
297 t.shape().clone(),
298 t.dtype(),
299 ));
300 }
301 fs::write(
302 "mistralrs_ggml_tensors.txt",
303 serde_json::to_string_pretty(&tensors).expect("Serialization failed."),
304 )?;
305
306 info!("Debug is enabled, wrote the names and information about each tensor to `mistralrs_ggml_tensors.txt`.");
307 }
308
309 let _ = if paged_attn_config.is_none() {
310 warn!("GGML does not currently support PagedAttention, running without");
311 None
312 } else {
313 paged_attn_config
314 };
315
316 let has_adapter = self.kind.is_adapted();
317 let is_xlora = self.kind.is_adapted_and(|a| a.is_x_lora());
318 let internal_dtype = dtype.try_into_dtype(&[device]).unwrap();
319
320 let model_config = {
321 let quant = ModelConfig::ParamsGGML((model, self.config.gqa, internal_dtype).into());
323
324 let mut adapter = None;
326 if has_adapter {
327 adapter.replace(ModelConfig::Adapter::try_new(
328 paths, device, silent, is_xlora,
329 )?);
330 }
331
332 ModelConfig::ModelParams::new(quant, adapter)
333 };
334
335 let model = match self.kind {
338 ModelKind::GgufQuantized { .. } => Model::Llama(QLlama::try_from(model_config)?),
339 ModelKind::GgufAdapter { .. } => {
340 Model::XLoraLlama(Box::new(XLoraQLlama::try_from(model_config)?))
341 }
342 _ => unreachable!(),
343 };
344
345 let tokenizer = get_tokenizer(paths.get_tokenizer_filename(), None)?;
346 let gen_conf: Option<GenerationConfig> = paths
347 .get_gen_conf_filename()
348 .map(|f| serde_json::from_str(&fs::read_to_string(f).unwrap()).unwrap());
349 let chat_template_explicit = paths
350 .get_chat_template_explicit()
351 .as_ref()
352 .map(|x| x.to_string_lossy().to_string());
353 let chat_template = get_chat_template(
354 paths,
355 self.jinja_explicit.as_ref(),
356 chat_template_explicit.as_ref(),
357 self.chat_template.as_ref(),
358 None,
359 );
360
361 let max_seq_len = match model {
362 Model::Llama(ref l) => l.max_seq_len,
363 Model::XLoraLlama(ref xl) => xl.max_seq_len,
364 };
365 let llg_factory = build_llg_factory(tokenizer.clone())?;
366 let num_hidden_layers = match model {
367 Model::Llama(ref model) => model.cache.normal().0.len(),
368 Model::XLoraLlama(ref model) => model.cache.full().lock().len(),
369 };
370 let eos = calculate_eos_tokens(&chat_template, gen_conf, &tokenizer);
371 Ok(Arc::new(Mutex::new(GGMLPipeline {
372 model,
373 tokenizer: tokenizer.into(),
374 no_kv_cache: self.no_kv_cache,
375 chat_template: Arc::new(chat_template),
376 model_id: self.model_id.clone(),
377 non_granular_state: self.tgt_non_granular_index.map(|tgt_non_granular_index| {
378 NonGranularState {
379 non_granular_index: Arc::new(Mutex::new(0)),
380 tgt_non_granular_index,
381 }
382 }),
383 metadata: Arc::new(GeneralMetadata {
384 max_seq_len,
385 llg_factory: Some(llg_factory),
386 no_kv_cache: self.no_kv_cache,
387 no_prefix_cache: false,
388 num_hidden_layers,
389 eos_tok: eos,
390 kind: self.kind.clone(),
391 is_xlora,
392 activation_dtype: internal_dtype,
393 sliding_window: None,
394 cache_config: None,
395 cache_engine: None,
396 prompt_chunksize: Some(NonZero::new(prompt_chunksize).unwrap()),
397 model_metadata: None,
398 modalities: Modalities {
399 input: vec![SupportedModality::Text],
400 output: vec![SupportedModality::Text],
401 },
402 }),
403 })))
404 }
405
406 #[allow(clippy::type_complexity, clippy::too_many_arguments)]
407 fn load_model_from_hf(
408 &self,
409 revision: Option<String>,
410 token_source: TokenSource,
411 dtype: &dyn TryIntoDType,
412 device: &Device,
413 silent: bool,
414 mapper: DeviceMapSetting,
415 in_situ_quant: Option<IsqType>,
416 paged_attn_config: Option<PagedAttentionConfig>,
417 ) -> Result<Arc<Mutex<dyn Pipeline + Send + Sync>>> {
418 let paths: anyhow::Result<Box<dyn ModelPaths>> = get_paths!(
419 LocalModelPaths,
420 &token_source,
421 revision,
422 self,
423 self.quantized_model_id,
424 Some(vec![self.quantized_filename.as_ref().unwrap().clone()]),
425 silent,
426 false );
428 self.load_model_from_path(
429 &paths?,
430 dtype,
431 device,
432 silent,
433 mapper,
434 in_situ_quant,
435 paged_attn_config,
436 )
437 }
438
439 fn get_id(&self) -> String {
440 self.xlora_model_id
441 .as_deref()
442 .unwrap_or(&self.model_id)
443 .to_string()
444 }
445
446 fn get_kind(&self) -> ModelKind {
447 self.kind.clone()
448 }
449}
450
451impl PreProcessingMixin for GGMLPipeline {
452 fn get_chat_template(&self) -> Option<Arc<ChatTemplate>> {
453 Some(self.chat_template.clone())
454 }
455 fn get_input_processor_config(&self) -> Option<Arc<dyn Any>> {
456 None
457 }
458}
459
460impl IsqPipelineMixin for GGMLPipeline {
461 fn re_isq_model(&mut self, _dtype: IsqType) -> Result<()> {
462 anyhow::bail!(
463 "You are trying to in-situ requantize a GGML model. This will not do anything."
464 )
465 }
466}
467
468impl CacheManagerMixin for GGMLPipeline {
469 fn clone_in_cache(&self, seqs: &mut [&mut Sequence]) {
470 FullCacheManager.clone_in_cache(self, seqs, false)
471 }
472 fn clone_out_cache(&self, seqs: &mut [&mut Sequence]) {
473 FullCacheManager.clone_out_cache(self, seqs, false)
474 }
475 fn set_none_cache(
476 &self,
477 seqs: &mut [&mut Sequence],
478 reset_non_granular: bool,
479 modify_draft_cache: bool,
480
481 load_preallocated_cache: bool,
482 ) {
483 FullCacheManager.set_none_cache(self, seqs, modify_draft_cache, load_preallocated_cache);
484 if reset_non_granular {
485 self.reset_non_granular_state()
486 }
487 }
488 fn cache(&self) -> &EitherCache {
489 match self.model {
490 Model::Llama(ref model) => &model.cache,
491 Model::XLoraLlama(ref model) => &model.cache,
492 }
493 }
494}
495
496impl MetadataMixin for GGMLPipeline {
497 fn device(&self) -> Device {
498 match self.model {
499 Model::Llama(ref model) => model.device.clone(),
500 Model::XLoraLlama(ref model) => model.device.clone(),
501 }
502 }
503 fn tokenizer(&self) -> Option<Arc<Tokenizer>> {
504 Some(self.tokenizer.clone())
505 }
506 fn name(&self) -> String {
507 self.model_id.clone()
508 }
509 fn reset_non_granular_state(&self) {
510 if let Some(s) = self.non_granular_state.as_ref() {
511 *self.cache().full().get_scalings_cache() = None;
512 *get_mut_arcmutex!(s.non_granular_index) = 0;
513 }
514 }
515 fn get_metadata(&self) -> Arc<GeneralMetadata> {
516 self.metadata.clone()
517 }
518 fn device_mapper(&self) -> Option<&dyn DeviceMapper> {
519 None
520 }
521}
522
523#[async_trait::async_trait]
524impl Pipeline for GGMLPipeline {
525 fn forward_inputs(
526 &mut self,
527 inputs: Box<dyn Any>,
528 return_raw_logits: bool,
529 ) -> Result<ForwardInputsResult, candle_core::Error> {
530 let ModelInputs {
531 input_ids,
532 input_ids_full,
533 seqlen_offsets,
534 seqlen_offsets_full,
535 context_lens,
536 position_ids: _, paged_attn_meta: _, flash_meta, flash_meta_full, } = *inputs.downcast().expect("Downcast failed.");
541 let logits = match self.model {
542 Model::Llama(ref model) => {
543 model.forward(&input_ids, &seqlen_offsets, context_lens, None)?
544 }
545 Model::XLoraLlama(ref model) => model.forward(
546 &input_ids,
547 input_ids_full.as_ref().unwrap_or(&input_ids),
548 &seqlen_offsets,
549 seqlen_offsets_full.as_ref().unwrap_or(&seqlen_offsets),
550 self.no_kv_cache,
551 &self.non_granular_state,
552 context_lens,
553 &flash_meta,
554 flash_meta_full.as_ref().unwrap_or(&flash_meta),
555 )?,
556 };
557 if return_raw_logits {
558 Ok(ForwardInputsResult::RawLogits { logits })
559 } else {
560 Ok(ForwardInputsResult::CausalGeneration { logits })
561 }
562 }
563 async fn sample_causal_gen(
564 &self,
565 seqs: &mut [&mut Sequence],
566 logits: Vec<Tensor>,
567 prefix_cacher: &mut PrefixCacheManagerV2,
568 disable_eos_stop: bool,
569 rng: Arc<std::sync::Mutex<Isaac64Rng>>,
570 ) -> Result<(), candle_core::Error> {
571 sample_and_add_toks(self, seqs, logits, prefix_cacher, disable_eos_stop, rng).await
572 }
573 fn category(&self) -> ModelCategory {
574 ModelCategory::Text
575 }
576}
577
578impl AnyMoePipelineMixin for GGMLPipeline {}