1use super::cache_manager::FullCacheManager;
2use super::llg::build_tok_env;
3use super::{
4 get_model_paths, get_xlora_paths, text_models_inputs_processor::ModelInputs, AdapterKind,
5 CacheManager, GeneralMetadata, Loader, ModelKind, ModelPaths, QuantizationKind, TokenSource,
6};
7use super::{
8 AnyMoePipelineMixin, CacheManagerMixin, EitherCache, ForwardInputsResult, IsqPipelineMixin,
9 MetadataMixin, ModelCategory, PreProcessingMixin,
10};
11use crate::device_map::DeviceMapper;
12use crate::lora::Ordering;
13use crate::pipeline::chat_template::{calculate_eos_tokens, GenerationConfig};
14use crate::pipeline::get_chat_template;
15use crate::pipeline::inputs_processor::DEFAULT_PROMPT_CHUNK_SIZE;
16use crate::pipeline::sampling::sample_and_add_toks;
17use crate::pipeline::{ChatTemplate, LocalModelPaths};
18use crate::prefix_cacher::PrefixCacheManagerV2;
19use crate::sequence::Sequence;
20use crate::utils::debug::DeviceRepr;
21use crate::utils::model_config as ModelConfig;
22use crate::utils::tokenizer::get_tokenizer;
23use crate::xlora_models::NonGranularState;
24use crate::{
25 get_mut_arcmutex, get_paths, DeviceMapSetting, PagedAttentionConfig, Pipeline, Topology,
26 TryIntoDType, DEBUG,
27};
28use crate::{
29 models::quantized_llama::ModelWeights as QLlama, utils::tokens::get_token,
30 xlora_models::XLoraQLlama,
31};
32use anyhow::Result;
33use candle_core::quantized::ggml_file;
34use candle_core::{Device, Tensor};
35use hf_hub::{api::sync::ApiBuilder, Repo, RepoType};
36use mistralrs_quant::IsqType;
37use rand_isaac::Isaac64Rng;
38use std::any::Any;
39use std::fs;
40use std::num::{NonZero, NonZeroUsize};
41use std::path::PathBuf;
42use std::str::FromStr;
43use std::sync::Arc;
44use tokenizers::Tokenizer;
45use tokio::sync::Mutex;
46use tracing::{info, warn};
47
48enum Model {
49 Llama(QLlama),
50 XLoraLlama(XLoraQLlama),
51}
52
53pub struct GGMLPipeline {
54 model: Model,
55 tokenizer: Arc<Tokenizer>,
56 no_kv_cache: bool,
57 chat_template: Arc<ChatTemplate>,
58 model_id: String,
59 non_granular_state: Option<NonGranularState>,
60 metadata: Arc<GeneralMetadata>,
61}
62
63pub struct GGMLLoader {
65 model_id: String,
66 config: GGMLSpecificConfig,
67 quantized_model_id: Option<String>,
68 quantized_filename: Option<String>,
69 xlora_model_id: Option<String>,
70 xlora_order: Option<Ordering>,
71 no_kv_cache: bool,
72 chat_template: Option<String>,
73 tokenizer_json: Option<String>,
74 kind: ModelKind,
75 tgt_non_granular_index: Option<usize>,
76 jinja_explicit: Option<String>,
77 lora_adapter_ids: Option<Vec<String>>,
78}
79
80#[derive(Clone, Default)]
81pub struct GGMLSpecificConfig {
83 pub gqa: usize,
84 pub prompt_chunksize: Option<NonZeroUsize>,
85 pub topology: Option<Topology>,
86}
87
88#[derive(Default)]
89pub struct GGMLLoaderBuilder {
91 model_id: Option<String>,
92 config: GGMLSpecificConfig,
93 quantized_model_id: String,
94 quantized_filename: String,
95 xlora_model_id: Option<String>,
96 kind: ModelKind,
97 xlora_order: Option<Ordering>,
98 no_kv_cache: bool,
99 chat_template: Option<String>,
100 tokenizer_json: Option<String>,
101 tgt_non_granular_index: Option<usize>,
102 jinja_explicit: Option<String>,
103}
104
105impl GGMLLoaderBuilder {
106 #[allow(clippy::too_many_arguments)]
107 pub fn new(
108 config: GGMLSpecificConfig,
109 chat_template: Option<String>,
110 tokenizer_json: Option<String>,
111 model_id: Option<String>,
112 quantized_model_id: String,
113 quantized_filename: String,
114 no_kv_cache: bool,
115 jinja_explicit: Option<String>,
116 ) -> Self {
117 let kind = ModelKind::GgufQuantized {
118 quant: QuantizationKind::Ggml,
119 };
120
121 Self {
122 config,
123 chat_template,
124 tokenizer_json,
125 model_id,
126 kind,
127 quantized_filename,
128 quantized_model_id,
129 no_kv_cache,
130 jinja_explicit,
131 ..Default::default()
132 }
133 }
134
135 fn with_adapter(
136 mut self,
137 xlora_model_id: String,
138 xlora_order: Ordering,
139 no_kv_cache: bool,
140 tgt_non_granular_index: Option<usize>,
141 ) -> Self {
142 self.xlora_model_id = Some(xlora_model_id);
143 self.xlora_order = Some(xlora_order);
144 self.no_kv_cache = no_kv_cache;
145 self.tgt_non_granular_index = tgt_non_granular_index;
146 self.model_id = if let Some(id) = self.model_id {
147 Some(id)
148 } else {
149 info!(
150 "Using adapter base model ID: `{}`",
151 self.xlora_order.as_ref().unwrap().base_model_id
152 );
153 Some(self.xlora_order.as_ref().unwrap().base_model_id.clone())
154 };
155 self
156 }
157
158 pub fn with_xlora(
159 mut self,
160 xlora_model_id: String,
161 xlora_order: Ordering,
162 no_kv_cache: bool,
163 tgt_non_granular_index: Option<usize>,
164 ) -> Self {
165 self.kind = (AdapterKind::XLora, QuantizationKind::Ggml).into();
166
167 self.with_adapter(
168 xlora_model_id,
169 xlora_order,
170 no_kv_cache,
171 tgt_non_granular_index,
172 )
173 }
174
175 pub fn with_lora(mut self, lora_model_id: String, lora_order: Ordering) -> Self {
176 self.kind = (AdapterKind::Lora, QuantizationKind::Ggml).into();
177
178 self.with_adapter(lora_model_id, lora_order, false, None)
179 }
180
181 pub fn build(self) -> Box<dyn Loader> {
182 Box::new(GGMLLoader {
183 model_id: self.model_id.unwrap(),
184 config: self.config,
185 xlora_model_id: self.xlora_model_id,
186 kind: self.kind,
187 xlora_order: self.xlora_order,
188 no_kv_cache: self.no_kv_cache,
189 chat_template: self.chat_template,
190 tokenizer_json: self.tokenizer_json,
191 tgt_non_granular_index: self.tgt_non_granular_index,
192 quantized_filename: Some(self.quantized_filename),
193 quantized_model_id: Some(self.quantized_model_id),
194 jinja_explicit: self.jinja_explicit,
195 lora_adapter_ids: None,
196 })
197 }
198}
199
200impl GGMLLoader {
201 #[allow(clippy::too_many_arguments)]
202 pub fn new(
203 model_id: Option<String>,
204 config: GGMLSpecificConfig,
205 quantized_model_id: Option<String>,
206 quantized_filename: Option<String>,
207 xlora_model_id: Option<String>,
208 kind: ModelKind,
209 xlora_order: Option<Ordering>,
210 no_kv_cache: bool,
211 chat_template: Option<String>,
212 tokenizer_json: Option<String>,
213 tgt_non_granular_index: Option<usize>,
214 jinja_explicit: Option<String>,
215 ) -> Self {
216 let model_id = if let Some(id) = model_id {
217 id
218 } else {
219 info!(
220 "Using adapter base model ID: `{}`",
221 xlora_order.as_ref().unwrap().base_model_id
222 );
223 xlora_order.as_ref().unwrap().base_model_id.clone()
224 };
225 Self {
226 model_id,
227 config,
228 quantized_model_id,
229 quantized_filename,
230 xlora_model_id,
231 xlora_order,
232 no_kv_cache,
233 chat_template,
234 tokenizer_json,
235 kind,
236 tgt_non_granular_index,
237 jinja_explicit,
238 lora_adapter_ids: None,
239 }
240 }
241}
242
243impl Loader for GGMLLoader {
244 #[allow(clippy::type_complexity, clippy::too_many_arguments)]
245 fn load_model_from_path(
246 &self,
247 paths: &Box<dyn ModelPaths>,
248 dtype: &dyn TryIntoDType,
249 device: &Device,
250 silent: bool,
251 mapper: DeviceMapSetting,
252 in_situ_quant: Option<IsqType>,
253 mut paged_attn_config: Option<PagedAttentionConfig>,
254 ) -> Result<Arc<Mutex<dyn Pipeline + Send + Sync>>> {
255 if in_situ_quant.is_some() {
256 anyhow::bail!(
257 "You are trying to in-situ quantize a GGML model. This will not do anything."
258 );
259 }
260
261 if matches!(mapper, DeviceMapSetting::Map(_)) {
262 anyhow::bail!("Device mapping is not supported for diffusion models.")
263 }
264
265 if paged_attn_config.is_some() {
266 warn!("PagedAttention is not supported for GGML models, disabling it.");
267
268 paged_attn_config = None;
269 }
270
271 let prompt_chunksize = self
273 .config
274 .prompt_chunksize
275 .unwrap_or(DEFAULT_PROMPT_CHUNK_SIZE.try_into().unwrap())
276 .get();
277
278 info!("Prompt chunk size is {prompt_chunksize}.",);
279
280 info!(
281 "Loading model `{}` on {}.",
282 self.get_id(),
283 device.device_pretty_repr()
284 );
285
286 let mut file = std::fs::File::open(paths.get_weight_filenames().first().unwrap())?;
287 let model = ggml_file::Content::read(&mut file, device)
288 .map_err(|e| e.with_path(paths.get_weight_filenames().first().unwrap()))?;
289
290 info!("Model config: {:?}", model.hparams);
291
292 if DEBUG.load(std::sync::atomic::Ordering::Relaxed) {
293 let mut tensors = Vec::new();
294 for (name, t) in &model.tensors {
295 tensors.push(format!(
296 "name = `{name}`, shape = {:?}, dtype = {:?}",
297 t.shape().clone(),
298 t.dtype(),
299 ));
300 }
301 fs::write(
302 "mistralrs_ggml_tensors.txt",
303 serde_json::to_string_pretty(&tensors).expect("Serialization failed."),
304 )?;
305
306 info!("Debug is enabled, wrote the names and information about each tensor to `mistralrs_ggml_tensors.txt`.");
307 }
308
309 let _ = if paged_attn_config.is_none() {
310 warn!("GGML does not currently support PagedAttention, running without");
311 None
312 } else {
313 paged_attn_config
314 };
315
316 let has_adapter = self.kind.is_adapted();
317 let is_xlora = self.kind.is_adapted_and(|a| a.is_x_lora());
318 let internal_dtype = dtype.try_into_dtype(&[device]).unwrap();
319
320 let model_config = {
321 let quant = ModelConfig::ParamsGGML((model, self.config.gqa, internal_dtype).into());
323
324 let mut adapter = None;
326 if has_adapter {
327 adapter.replace(ModelConfig::Adapter::try_new(
328 paths, device, silent, is_xlora,
329 )?);
330 }
331
332 ModelConfig::ModelParams::new(quant, adapter)
333 };
334
335 let model = match self.kind {
338 ModelKind::GgufQuantized { .. } => Model::Llama(QLlama::try_from(model_config)?),
339 ModelKind::GgufAdapter { .. } => {
340 Model::XLoraLlama(XLoraQLlama::try_from(model_config)?)
341 }
342 _ => unreachable!(),
343 };
344
345 let tokenizer = get_tokenizer(paths.get_tokenizer_filename(), None)?;
346 let gen_conf: Option<GenerationConfig> = paths.get_gen_conf_filename().map(|f| {
347 serde_json::from_str(&fs::read_to_string(f).unwrap())
348 .expect("bos_token_id/eos_token_id missing in generation_config.json")
349 });
350 let chat_template = get_chat_template(
351 paths,
352 &self.jinja_explicit,
353 &paths
354 .get_chat_template_explicit()
355 .as_ref()
356 .map(|x| x.to_string_lossy().to_string())
357 .clone(),
358 &self.chat_template,
359 None,
360 );
361
362 let max_seq_len = match model {
363 Model::Llama(ref l) => l.max_seq_len,
364 Model::XLoraLlama(ref xl) => xl.max_seq_len,
365 };
366 let tok_env = build_tok_env(tokenizer.clone());
367 let num_hidden_layers = match model {
368 Model::Llama(ref model) => model.cache.normal().0.len(),
369 Model::XLoraLlama(ref model) => model.cache.full().lock().len(),
370 };
371 let eos = calculate_eos_tokens(&chat_template, gen_conf, &tokenizer);
372 Ok(Arc::new(Mutex::new(GGMLPipeline {
373 model,
374 tokenizer: tokenizer.into(),
375 no_kv_cache: self.no_kv_cache,
376 chat_template: Arc::new(chat_template),
377 model_id: self.model_id.clone(),
378 non_granular_state: self.tgt_non_granular_index.map(|tgt_non_granular_index| {
379 NonGranularState {
380 non_granular_index: Arc::new(Mutex::new(0)),
381 tgt_non_granular_index,
382 }
383 }),
384 metadata: Arc::new(GeneralMetadata {
385 max_seq_len,
386 tok_env: Some(tok_env),
387 no_kv_cache: self.no_kv_cache,
388 no_prefix_cache: false,
389 num_hidden_layers,
390 eos_tok: eos,
391 kind: self.kind.clone(),
392 is_xlora,
393 activation_dtype: internal_dtype,
394 sliding_window: None,
395 cache_config: None,
396 cache_engine: None,
397 prompt_chunksize: Some(NonZero::new(prompt_chunksize).unwrap()),
398 model_metadata: None,
399 }),
400 })))
401 }
402
403 #[allow(clippy::type_complexity, clippy::too_many_arguments)]
404 fn load_model_from_hf(
405 &self,
406 revision: Option<String>,
407 token_source: TokenSource,
408 dtype: &dyn TryIntoDType,
409 device: &Device,
410 silent: bool,
411 mapper: DeviceMapSetting,
412 in_situ_quant: Option<IsqType>,
413 paged_attn_config: Option<PagedAttentionConfig>,
414 ) -> Result<Arc<Mutex<dyn Pipeline + Send + Sync>>> {
415 let paths: anyhow::Result<Box<dyn ModelPaths>> = get_paths!(
416 LocalModelPaths,
417 &token_source,
418 revision,
419 self,
420 self.quantized_model_id,
421 Some(vec![self.quantized_filename.as_ref().unwrap().clone()]),
422 silent,
423 false );
425 self.load_model_from_path(
426 &paths?,
427 dtype,
428 device,
429 silent,
430 mapper,
431 in_situ_quant,
432 paged_attn_config,
433 )
434 }
435
436 fn get_id(&self) -> String {
437 self.xlora_model_id
438 .as_deref()
439 .unwrap_or(&self.model_id)
440 .to_string()
441 }
442
443 fn get_kind(&self) -> ModelKind {
444 self.kind.clone()
445 }
446}
447
448impl PreProcessingMixin for GGMLPipeline {
449 fn get_chat_template(&self) -> Option<Arc<ChatTemplate>> {
450 Some(self.chat_template.clone())
451 }
452 fn get_input_processor_config(&self) -> Option<Arc<dyn Any>> {
453 None
454 }
455}
456
457impl IsqPipelineMixin for GGMLPipeline {
458 fn re_isq_model(&mut self, _dtype: IsqType) -> Result<()> {
459 anyhow::bail!(
460 "You are trying to in-situ requantize a GGML model. This will not do anything."
461 )
462 }
463}
464
465impl CacheManagerMixin for GGMLPipeline {
466 fn clone_in_cache(&self, seqs: &mut [&mut Sequence]) {
467 FullCacheManager.clone_in_cache(self, seqs, false)
468 }
469 fn clone_out_cache(&self, seqs: &mut [&mut Sequence]) {
470 FullCacheManager.clone_out_cache(self, seqs, false)
471 }
472 fn set_none_cache(
473 &self,
474 seqs: &mut [&mut Sequence],
475 reset_non_granular: bool,
476 modify_draft_cache: bool,
477
478 load_preallocated_cache: bool,
479 ) {
480 FullCacheManager.set_none_cache(self, seqs, modify_draft_cache, load_preallocated_cache);
481 if reset_non_granular {
482 self.reset_non_granular_state()
483 }
484 }
485 fn cache(&self) -> &EitherCache {
486 match self.model {
487 Model::Llama(ref model) => &model.cache,
488 Model::XLoraLlama(ref model) => &model.cache,
489 }
490 }
491}
492
493impl MetadataMixin for GGMLPipeline {
494 fn device(&self) -> Device {
495 match self.model {
496 Model::Llama(ref model) => model.device.clone(),
497 Model::XLoraLlama(ref model) => model.device.clone(),
498 }
499 }
500 fn tokenizer(&self) -> Option<Arc<Tokenizer>> {
501 Some(self.tokenizer.clone())
502 }
503 fn name(&self) -> String {
504 self.model_id.clone()
505 }
506 fn reset_non_granular_state(&self) {
507 if let Some(s) = self.non_granular_state.as_ref() {
508 *self.cache().full().get_scalings_cache() = None;
509 *get_mut_arcmutex!(s.non_granular_index) = 0;
510 }
511 }
512 fn get_metadata(&self) -> Arc<GeneralMetadata> {
513 self.metadata.clone()
514 }
515 fn device_mapper(&self) -> Option<&dyn DeviceMapper> {
516 None
517 }
518}
519
520#[async_trait::async_trait]
521impl Pipeline for GGMLPipeline {
522 fn forward_inputs(
523 &mut self,
524 inputs: Box<dyn Any>,
525 return_raw_logits: bool,
526 ) -> Result<ForwardInputsResult, candle_core::Error> {
527 let ModelInputs {
528 input_ids,
529 input_ids_full,
530 seqlen_offsets,
531 seqlen_offsets_full,
532 context_lens,
533 position_ids: _, paged_attn_meta: _, flash_meta, flash_meta_full, } = *inputs.downcast().expect("Downcast failed.");
538 let logits = match self.model {
539 Model::Llama(ref model) => {
540 model.forward(&input_ids, &seqlen_offsets, context_lens, None)?
541 }
542 Model::XLoraLlama(ref model) => model.forward(
543 &input_ids,
544 input_ids_full.as_ref().unwrap_or(&input_ids),
545 &seqlen_offsets,
546 seqlen_offsets_full.as_ref().unwrap_or(&seqlen_offsets),
547 self.no_kv_cache,
548 &self.non_granular_state,
549 context_lens,
550 &flash_meta,
551 flash_meta_full.as_ref().unwrap_or(&flash_meta),
552 )?,
553 };
554 if return_raw_logits {
555 Ok(ForwardInputsResult::RawLogits { logits })
556 } else {
557 Ok(ForwardInputsResult::CausalGeneration { logits })
558 }
559 }
560 async fn sample_causal_gen(
561 &self,
562 seqs: &mut [&mut Sequence],
563 logits: Vec<Tensor>,
564 prefix_cacher: &mut PrefixCacheManagerV2,
565 disable_eos_stop: bool,
566 rng: Arc<std::sync::Mutex<Isaac64Rng>>,
567 ) -> Result<(), candle_core::Error> {
568 sample_and_add_toks(self, seqs, logits, prefix_cacher, disable_eos_stop, rng).await
569 }
570 fn category(&self) -> ModelCategory {
571 ModelCategory::Text
572 }
573}
574
575impl AnyMoePipelineMixin for GGMLPipeline {}