anemll commited on
Commit
01a207f
·
verified ·
1 Parent(s): edd7407

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (8.2 kB). View file
 
chat.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+
32
+ class TokenPrinter:
33
+ """Handles background printing of generated tokens."""
34
+ def __init__(self, tokenizer):
35
+ self.tokenizer = tokenizer
36
+ self.token_queue = queue.Queue()
37
+ self.stop_event = threading.Event()
38
+ self.thread = None
39
+ self.buffer = ""
40
+ self.lock = threading.Lock()
41
+ self.thinking = True # Track if we're still in thinking mode
42
+ self.decoding_buffer = [] # Buffer for token IDs
43
+ # Add token counting and timing
44
+ self.start_time = time.time()
45
+ self.token_count = 0
46
+ self.start()
47
+
48
+ def start(self):
49
+ """Start the printer thread."""
50
+ if self.thread is None:
51
+ self.thread = threading.Thread(target=self._print_worker)
52
+ self.thread.daemon = True
53
+ self.thread.start()
54
+
55
+ def add_token(self, token_id):
56
+ """Add a token to the print queue."""
57
+ if not self.stop_event.is_set():
58
+ self.token_queue.put(token_id)
59
+ self.token_count += 1
60
+
61
+ def drain_buffer(self):
62
+ """Decode token IDs from decoding_buffer in the main thread."""
63
+ if not self.decoding_buffer:
64
+ return
65
+
66
+ # Decode all tokens at once in the main thread
67
+ token_str = self.tokenizer.decode(self.decoding_buffer)
68
+ self.decoding_buffer.clear()
69
+
70
+ # Color-handling logic
71
+ if self.thinking and "</think>" in token_str:
72
+ self.thinking = False
73
+ parts = token_str.split("</think>")
74
+ if len(parts) > 0:
75
+ print(parts[0] + "</think>", end='', flush=True)
76
+ if len(parts) > 1:
77
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
78
+ else:
79
+ if not self.thinking:
80
+ print(LIGHT_BLUE + token_str, end='', flush=True)
81
+ else:
82
+ print(token_str, end='', flush=True)
83
+
84
+ def _print_worker(self):
85
+ """Worker thread that takes token_ids from the queue."""
86
+ while not self.stop_event.is_set():
87
+ try:
88
+ token_id = self.token_queue.get(timeout=0.01)
89
+ with self.lock:
90
+ self.decoding_buffer.append(token_id)
91
+ self.token_queue.task_done()
92
+ except queue.Empty:
93
+ continue
94
+ except Exception as e:
95
+ print(f"\nError: Token printer error: {str(e)}")
96
+ break
97
+
98
+ def stop(self):
99
+ """Stop the printer thread."""
100
+ if self.thread and self.thread.is_alive():
101
+ self.stop_event.set()
102
+ try:
103
+ self.thread.join(timeout=1.0)
104
+ except Exception:
105
+ pass
106
+ # Calculate and print tokens/s with shorter format in blue
107
+ elapsed = time.time() - self.start_time
108
+ if elapsed > 0 and self.token_count > 0:
109
+ tokens_per_sec = self.token_count / elapsed
110
+ print(f"\n{DARK_BLUE}{tokens_per_sec:.1f} t/s{RESET_COLOR}")
111
+ else:
112
+ print(RESET_COLOR) # Reset color at the end
113
+ return self.buffer
114
+
115
+ def parse_model_path(path):
116
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
117
+ path = Path(path)
118
+
119
+ # If path exists exactly as specified, return it
120
+ if path.exists():
121
+ return str(path)
122
+
123
+ # Try with both extensions
124
+ candidates = [
125
+ path, # Original path
126
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
127
+ path.with_suffix('.mlpackage'), # With .mlpackage
128
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
129
+ Path(str(path) + '.mlpackage')
130
+ ]
131
+
132
+ # Try all possible paths
133
+ for candidate in candidates:
134
+ if candidate.exists():
135
+ print(f"Found model at: {candidate}")
136
+ return str(candidate)
137
+
138
+ # If we get here, no valid path was found
139
+ print("\nError: Model not found. Tried following paths:")
140
+ for candidate in candidates:
141
+ print(f" {candidate}")
142
+ raise FileNotFoundError(f"Model not found: {path}")
143
+
144
+ def parse_ffn_filename(path):
145
+ """Parse FFN model filename to extract chunk information."""
146
+ path = Path(path)
147
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
148
+ match = re.search(pattern, path.name)
149
+
150
+ if match:
151
+ current_chunk = int(match.group(1))
152
+ total_chunks = int(match.group(2))
153
+ return current_chunk, total_chunks
154
+ return None, None
155
+
156
+ def find_all_chunks(base_path):
157
+ """Find all chunk files matching the base FFN path pattern."""
158
+ path = Path(base_path)
159
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
160
+ return sorted(glob.glob(pattern))
161
+
162
+ def load_model(path, function_name=None):
163
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
164
+ path = Path(path)
165
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
166
+
167
+ try:
168
+ if path.suffix == '.mlmodelc':
169
+ # For compiled models (.mlmodelc), use CompiledMLModel
170
+ if function_name:
171
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
172
+ else:
173
+ return ct.models.CompiledMLModel(str(path), compute_unit)
174
+ else:
175
+ # For packages (.mlpackage)
176
+ if function_name:
177
+ return ct.models.MLModel(str(path), function_name=function_name)
178
+ else:
179
+ return ct.models.MLModel(str(path))
180
+
181
+ except RuntimeError as e:
182
+ if "valid manifest does not exist" in str(e):
183
+ print(f"\nError: Could not load compiled model at {path}")
184
+ print("This might be because:")
185
+ print("1. The model is not properly compiled")
186
+ print("2. The model was compiled for a different OS version")
187
+ print("3. The model needs to be recompiled")
188
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
189
+ raise
190
+
191
+ def load_metadata(model,args):
192
+ # Extract metadata and config parameters
193
+ metadata = {}
194
+ if hasattr(model, 'user_defined_metadata'):
195
+ meta = model.user_defined_metadata
196
+
197
+ # Extract key parameters with defaults
198
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
199
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
200
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
201
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
202
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
203
+
204
+ print("\nExtracted Parameters:")
205
+ print(f" Context Length: {metadata['context_length']}")
206
+ print(f" State Length: {metadata['state_length']}")
207
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
208
+ print(f" LUT Bits: {metadata['lut_bits']}")
209
+ print(f" Number of Chunks: {metadata['num_chunks']}")
210
+
211
+ # Print model info
212
+ print("\nModel Info:")
213
+ if 'com.anemll.info' in meta:
214
+ print(f" {meta['com.anemll.info']}")
215
+ if 'com.github.apple.coremltools.version' in meta:
216
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
217
+
218
+ # Print model input/output shapes
219
+ print("\nModel Shapes:")
220
+ if hasattr(model, 'input_description'):
221
+ print(" Inputs:")
222
+ for name, desc in model.input_description.items():
223
+ print(f" {name}: {desc}")
224
+ if hasattr(model, 'output_description'):
225
+ print(" Outputs:")
226
+ for name, desc in model.output_description.items():
227
+ print(f" {name}: {desc}")
228
+ else:
229
+ print("\nWarning: No metadata found in model")
230
+
231
+ # Check if model directory name contains context length pattern (ctxXXX)
232
+ ctx_len = 512
233
+ if args.context_length is None:
234
+ import re
235
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
236
+ if ctx_match:
237
+ ctx_len0 = int(ctx_match.group(1))
238
+ if 512 <= ctx_len0 <= 8096:
239
+ ctx_len = ctx_len0
240
+ print(f"\nDetected context length {ctx_len} from directory name")
241
+ else:
242
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
243
+ else:
244
+ ctx_len = args.context_length
245
+
246
+ # Use defaults or values from args
247
+ metadata['context_length'] = ctx_len
248
+ metadata['state_length'] = ctx_len
249
+ # Get batch size from args or use default
250
+ metadata['batch_size'] = getattr(args, 'batch_size', 64)
251
+ metadata['lut_bits'] = 4
252
+ metadata['num_chunks'] = getattr(args, 'num_chunks', 4)
253
+ print("\nUsing parameters:")
254
+ print(f" Context Length: {metadata['context_length']}")
255
+ print(f" State Length: {metadata['state_length']}")
256
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
257
+ print(f" LUT Bits: {metadata['lut_bits']}")
258
+ print(f" Number of Chunks: {metadata['num_chunks']}")
259
+
260
+ # Override with values from args if they exist
261
+ if hasattr(args, 'batch_size') and args.batch_size is not None:
262
+ metadata['batch_size'] = args.batch_size
263
+ print(f"\nOverriding batch size from args: {args.batch_size}")
264
+ if hasattr(args, 'num_chunks') and args.num_chunks is not None:
265
+ metadata['num_chunks'] = args.num_chunks
266
+ print(f"\nOverriding num chunks from args: {args.num_chunks}")
267
+
268
+ return metadata
269
+
270
+ def load_models(args,metadata):
271
+ """Load all required models and extract metadata."""
272
+ print("\nLoading models...")
273
+
274
+ try:
275
+ # Load embeddings model
276
+ print("\nLoading embeddings model...")
277
+ embed_path = parse_model_path(args.embed)
278
+ print(f"Loading from: {embed_path}")
279
+ embed_model = load_model(embed_path)
280
+ print("Embeddings model loaded successfully")
281
+ metadata = load_metadata(embed_model,args)
282
+
283
+
284
+
285
+ # Load LM head model
286
+ print("\nLoading LM head model...")
287
+ lmhead_path = parse_model_path(args.lmhead)
288
+ print(f"Loading from: {lmhead_path}")
289
+ lmhead_model = load_model(lmhead_path)
290
+ print("LM head model loaded successfully")
291
+
292
+ # Parse FFN path and find chunks if needed
293
+ print("\nLoading FFN+PREFILL model(s)...")
294
+ ffn_path = parse_model_path(args.ffn)
295
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
296
+
297
+ ffn_models = []
298
+ if chunk_no and total_chunks:
299
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
300
+ # Find and load all chunks
301
+ chunk_paths = find_all_chunks(ffn_path)
302
+ if len(chunk_paths) != total_chunks:
303
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
304
+
305
+ for chunk_path in chunk_paths:
306
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
307
+ try:
308
+ # For chunked models, we need both infer and prefill functions
309
+ ffn_models.append({
310
+ 'infer': load_model(chunk_path, function_name='infer'),
311
+ 'prefill': load_model(chunk_path, function_name='prefill')
312
+ })
313
+ print("Chunk loaded successfully")
314
+ except Exception as e:
315
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
316
+ raise
317
+ metadata = load_metadata(ffn_models[0],args)
318
+
319
+ else:
320
+ print("\nLoading single FFN model...")
321
+ ffn_models.append(load_model(ffn_path))
322
+ print("FFN model loaded successfully")
323
+
324
+ return embed_model, ffn_models, lmhead_model, metadata
325
+
326
+ except Exception as e:
327
+ print(f"\nError loading models: {str(e)}")
328
+ print("\nPlease ensure all model files exist and are accessible.")
329
+ print("Expected files:")
330
+ print(f" Embeddings: {args.embed}")
331
+ print(f" LM Head: {args.lmhead}")
332
+ print(f" FFN: {args.ffn}")
333
+ raise
334
+
335
+ # At the top of the file, make this a default path
336
+
337
+ def initialize_tokenizer(model_path=None):
338
+ """Initialize and configure the tokenizer."""
339
+ try:
340
+
341
+
342
+ tokenizer = AutoTokenizer.from_pretrained(
343
+ str(model_path),
344
+ use_fast=False,
345
+ trust_remote_code=True
346
+ )
347
+
348
+ print("\nTokenizer Configuration:")
349
+ print(f"Tokenizer type: {type(tokenizer)}")
350
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
351
+ print(f"Vocabulary size: {len(tokenizer)}")
352
+ print(f"Model max length: {tokenizer.model_max_length}")
353
+
354
+ if tokenizer.pad_token is None:
355
+ tokenizer.pad_token = tokenizer.eos_token
356
+ tokenizer.pad_token_id = tokenizer.eos_token_id
357
+ print("Set PAD token to EOS token")
358
+
359
+ tokenizer.padding_side = "left"
360
+
361
+ print(f"\nSpecial Tokens:")
362
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
363
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
364
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
365
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
366
+
367
+ return tokenizer
368
+
369
+ except Exception as e:
370
+ print(f"\nError: Failed to load tokenizer from {model_path}")
371
+ print(f"Error details: {str(e)}")
372
+ print(f"Error type: {type(e)}")
373
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
374
+ print("Please provide the path to a Llama 3.2 model directory.")
375
+ import traceback
376
+ traceback.print_exc()
377
+ raise
378
+
379
+
380
+
381
+ def make_causal_mask(length, start):
382
+ """Create causal attention mask."""
383
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
384
+ row_indices = np.arange(length).reshape(length, 1)
385
+ col_indices = np.arange(length).reshape(1, length)
386
+ mask[:, :, col_indices <= (row_indices + start)] = 0
387
+ return mask
388
+
389
+ def initialize_causal_mask(context_length):
390
+ """Initialize causal mask for transformer attention."""
391
+ causal_mask = make_causal_mask(context_length, 0)
392
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
393
+ print(f"\nInitialized causal mask for context length {context_length}")
394
+ return causal_mask
395
+
396
+ def run_prefill(embed_model, ffn_models, input_ids, context_pos, context_length, batch_size=64, state=None, causal_mask=None):
397
+ """Run prefill on the input sequence."""
398
+ # Use provided causal mask or create one if not provided
399
+ if causal_mask is None:
400
+ causal_mask = make_causal_mask(context_length, 0)
401
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
402
+
403
+ # Process in batches
404
+ batch_pos = 0
405
+ while batch_pos < context_pos:
406
+ batch_end = min(batch_pos + batch_size, context_pos)
407
+ current_batch_size = batch_end - batch_pos
408
+
409
+ # Get current batch
410
+ batch_input = input_ids[:, batch_pos:batch_end]
411
+
412
+ # Always pad to full batch size for prefill
413
+ batch_input = F.pad(
414
+ batch_input,
415
+ (0, batch_size - current_batch_size),
416
+ value=0
417
+ )
418
+
419
+ # Generate position IDs for full batch size
420
+ position_ids = torch.arange(batch_size, dtype=torch.int32) # Changed: Always use full batch size
421
+ batch_causal_mask = causal_mask[:, :, :batch_size, :] # Changed: Use full batch size
422
+
423
+ # Run embeddings with proper batch size
424
+ hidden_states = torch.from_numpy(
425
+ embed_model.predict({
426
+ 'input_ids': batch_input.numpy(),
427
+ 'batch_size': np.array([batch_size], dtype=np.int32) # Add batch_size parameter
428
+ })['hidden_states']
429
+ )
430
+
431
+ # Run through FFN chunks with state
432
+ for ffn_model in ffn_models:
433
+ if isinstance(ffn_model, dict):
434
+ inputs = {
435
+ 'hidden_states': hidden_states.numpy(), # [1, 64, hidden_size]
436
+ 'position_ids': position_ids.numpy(), # [64]
437
+ 'causal_mask': batch_causal_mask.numpy(), # [1, 1, 64, context_length]
438
+ 'current_pos': np.array([batch_pos], dtype=np.int32) # [1]
439
+ }
440
+ output = ffn_model['prefill'].predict(inputs, state)
441
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
442
+
443
+ batch_pos = batch_end
444
+
445
+ return torch.tensor([context_pos], dtype=torch.int32)
446
+
447
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state=None, causal_mask=None, temperature=0.0):
448
+ """Generate the next token."""
449
+ # Get current token
450
+ current_token = input_ids[:, pos-1:pos] # [1, 1]
451
+
452
+ # Run embeddings
453
+ hidden_states = torch.from_numpy(
454
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
455
+ ) # [1, 1, hidden_size]
456
+
457
+ # Create masks
458
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
459
+ update_mask[0, 0, pos-1, 0] = 1.0
460
+ position_ids = torch.tensor([pos-1], dtype=torch.int32) # [1]
461
+
462
+ # Use provided causal mask or create one if not provided
463
+ if causal_mask is None:
464
+ causal_mask_data = make_causal_mask(context_length, 0)
465
+ single_causal_mask = torch.tensor(causal_mask_data[:, :, pos-1:pos, :], dtype=torch.float16) # [1, 1, 1, context_length]
466
+ else:
467
+ single_causal_mask = causal_mask[:, :, pos-1:pos, :]
468
+
469
+ # Run through FFN chunks with state
470
+ for ffn_model in ffn_models:
471
+ if isinstance(ffn_model, dict):
472
+ inputs = {
473
+ 'hidden_states': hidden_states.numpy(),
474
+ 'update_mask': update_mask.numpy(),
475
+ 'position_ids': position_ids.numpy(),
476
+ 'causal_mask': single_causal_mask.numpy(),
477
+ 'current_pos': position_ids.numpy()
478
+ }
479
+ output = ffn_model['infer'].predict(inputs, state)
480
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
481
+
482
+ # Run LM head
483
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
484
+ # Debug print
485
+ #print("\nLM Head output keys:", list(lm_output.keys()))
486
+
487
+ # Combine logits1-8 if they exist
488
+ if 'logits1' in lm_output:
489
+ # Concatenate all logits parts
490
+ logits_parts = []
491
+ for i in range(1, 9):
492
+ key = f'logits{i}'
493
+ if key in lm_output:
494
+ logits_parts.append(torch.from_numpy(lm_output[key]))
495
+ logits = torch.cat(logits_parts, dim=-1) # Concatenate along vocab dimension
496
+ else:
497
+ # Try output_logits as fallback
498
+ logits = torch.from_numpy(lm_output['output_logits'])
499
+
500
+ # Apply temperature and sample
501
+ if temperature > 0:
502
+ logits = logits / temperature
503
+ probs = F.softmax(logits[0, -1, :], dim=-1)
504
+ next_token = torch.multinomial(probs, num_samples=1).item()
505
+ else:
506
+ next_token = torch.argmax(logits[0, -1, :]).item()
507
+
508
+ return next_token
509
+
510
+ def create_unified_state(ffn_models, context_length):
511
+ """Create unified KV cache state for transformer."""
512
+ if isinstance(ffn_models[0], dict):
513
+ # Use first FFN model's prefill function to create state
514
+ state = ffn_models[0]['prefill'].make_state()
515
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
516
+ return state
517
+ else:
518
+ state = ffn_models[0].make_state()
519
+ print("\nCreated unified transformer state")
520
+ return state
521
+
522
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, causal_mask=None, auto_prompt=None, warmup=False):
523
+ """Interactive chat loop."""
524
+ context_length = metadata.get('context_length')
525
+ batch_size = metadata.get('batch_size', 64)
526
+
527
+ if not warmup:
528
+ print(f"\nUsing context length: {context_length}")
529
+ print("\nStarting chat session. Press Ctrl+D to exit.")
530
+ print("Type your message and press Enter to chat.")
531
+
532
+ # Check if tokenizer has chat template and if it works
533
+ has_chat_template = False
534
+ try:
535
+ # Test if chat template works
536
+ test_messages = [{"role": "user", "content": "test"}]
537
+ tokenizer.apply_chat_template(test_messages, return_tensors="pt")
538
+ has_chat_template = True
539
+ if not warmup:
540
+ print("\nUsing chat template for prompts")
541
+ except:
542
+ if not warmup:
543
+ print("\nUsing manual formatting for prompts")
544
+
545
+ conversation = []
546
+
547
+ try:
548
+ while True:
549
+ try:
550
+ if not warmup:
551
+ print(f"\n{LIGHT_GREEN}You:{RESET_COLOR}", end=' ', flush=True)
552
+ if auto_prompt is not None:
553
+ user_input = auto_prompt
554
+ if not warmup:
555
+ print(user_input)
556
+ else:
557
+ user_input = input().strip()
558
+ except EOFError:
559
+ if not warmup:
560
+ print("\nExiting chat...")
561
+ break
562
+
563
+ if not user_input:
564
+ continue
565
+
566
+ # Format prompt based on tokenizer capabilities
567
+ if has_chat_template:
568
+ messages = [{"role": "user", "content": user_input}]
569
+ input_ids = tokenizer.apply_chat_template(
570
+ messages,
571
+ return_tensors="pt",
572
+ add_generation_prompt=True
573
+ ).to(torch.int32)
574
+ else:
575
+ # Manual formatting for Llama models without chat template
576
+ formatted_prompt = f"[INST] {user_input} [/INST]"
577
+ input_ids = tokenizer(
578
+ formatted_prompt,
579
+ return_tensors="pt",
580
+ add_special_tokens=True
581
+ ).input_ids.to(torch.int32)
582
+
583
+ context_pos = input_ids.size(1)
584
+
585
+ if not warmup:
586
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
587
+
588
+ # Initialize token printer
589
+ token_printer = TokenPrinter(tokenizer)
590
+ tokens_generated = 0 # Track number of tokens
591
+
592
+ try:
593
+ # Start prefill timing
594
+ prefill_start = time.time()
595
+
596
+ # Run prefill with state and causal mask
597
+ current_pos = run_prefill(
598
+ embed_model,
599
+ ffn_models,
600
+ input_ids,
601
+ context_pos,
602
+ context_length,
603
+ batch_size,
604
+ state,
605
+ causal_mask
606
+ )
607
+
608
+ # Calculate prefill timing
609
+ prefill_time = time.time() - prefill_start
610
+ prefill_tokens = context_pos # Number of tokens in input
611
+ prefill_tokens_per_sec = prefill_tokens / prefill_time if prefill_time > 0 else 0
612
+
613
+ # Generation loop with state
614
+ input_ids = input_ids
615
+ pos = context_pos
616
+ inference_start = time.time()
617
+ inference_tokens = 0
618
+
619
+ while pos < context_length - 1:
620
+ # Generate next token with causal mask
621
+ next_token = generate_next_token(
622
+ embed_model,
623
+ ffn_models,
624
+ lmhead_model,
625
+ input_ids,
626
+ pos,
627
+ context_length,
628
+ state,
629
+ causal_mask
630
+ )
631
+
632
+ # Add token to sequence
633
+ if pos < input_ids.size(1):
634
+ input_ids[0, pos] = next_token
635
+ else:
636
+ input_ids = torch.cat([
637
+ input_ids,
638
+ torch.tensor([[next_token]], dtype=torch.int32)
639
+ ], dim=1)
640
+
641
+ # Add to printer only if not in warmup
642
+ if not warmup:
643
+ token_printer.add_token(next_token)
644
+ token_printer.drain_buffer()
645
+
646
+ pos += 1
647
+ tokens_generated += 1
648
+ inference_tokens += 1
649
+
650
+ # Check limits
651
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
652
+ break
653
+
654
+ if next_token == tokenizer.eos_token_id:
655
+ break
656
+
657
+ # Calculate inference timing
658
+ inference_time = time.time() - inference_start
659
+ inference_tokens_per_sec = inference_tokens / inference_time if inference_time > 0 else 0
660
+
661
+ # Get final response and add to conversation
662
+ if not warmup:
663
+ response = token_printer.stop()
664
+ # Print timing stats
665
+ prefill_ms = prefill_time * 1000 # Convert to milliseconds
666
+ print(f"\nPrefill: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s)")
667
+ print(f"Inference: {inference_tokens_per_sec:.1f} t/s")
668
+ print(f"Total: Generated {tokens_generated} tokens in {prefill_time + inference_time:.2f}s")
669
+ conversation.append({"role": "assistant", "content": response})
670
+ else:
671
+ token_printer.stop() # Clean up without printing stats
672
+
673
+ # Exit after one response in auto_prompt mode
674
+ if auto_prompt is not None:
675
+ break
676
+
677
+ except KeyboardInterrupt:
678
+ print("\nGeneration interrupted")
679
+ token_printer.stop()
680
+ continue
681
+
682
+ except Exception as e:
683
+ print(f"\nError in chat loop: {str(e)}")
684
+ import traceback
685
+ traceback.print_exc()
686
+
687
+ def parse_args():
688
+ parser = argparse.ArgumentParser(description='Chat with CoreML LLaMA, gil resolved (c) 2025 Anemll')
689
+
690
+ # Add meta.yaml option
691
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
692
+
693
+ # Model paths
694
+ parser.add_argument('--d', '--dir', type=str, default='.',
695
+ help='Directory containing model files (default: current directory)')
696
+ parser.add_argument('--embed', type=str, required=False,
697
+ help='Path to embeddings model (relative to --dir)')
698
+ parser.add_argument('--ffn', type=str, required=False,
699
+ help='Path to FFN model (can be chunked, relative to --dir)')
700
+ parser.add_argument('--lmhead', type=str, required=False,
701
+ help='Path to LM head model (relative to --dir)')
702
+ parser.add_argument('--tokenizer', type=str, required=False,
703
+ help='Path to tokenizer')
704
+
705
+ # Add new argument for auto-generation
706
+ parser.add_argument('--prompt', type=str,
707
+ help='If specified, run once with this prompt and exit')
708
+
709
+ # Add no-warmup flag
710
+ parser.add_argument('--nw', action='store_true',
711
+ help='Skip warmup phase')
712
+
713
+ # Model configuration
714
+ parser.add_argument('--context-length', type=int,
715
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
716
+ parser.add_argument('--batch-size', type=int,
717
+ help='Batch size for prefill (default: 64)')
718
+
719
+ args = parser.parse_args()
720
+
721
+ # If meta.yaml is provided, load parameters from it
722
+ if args.meta:
723
+ try:
724
+ with open(args.meta, 'r') as f:
725
+ meta = yaml.safe_load(f)
726
+ params = meta['model_info']['parameters']
727
+
728
+ # Set model directory to meta.yaml directory if not specified
729
+ if not args.d or args.d == '.':
730
+ args.d = str(Path(args.meta).parent)
731
+
732
+ # Build model paths based on parameters
733
+ prefix = params.get('model_prefix', 'llama') # Default to 'llama' if not specified
734
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
735
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
736
+ lut_embeddings = f"_lut{params['lut_embeddings']}" if params['lut_embeddings'] != 'none' else ''
737
+ num_chunks = int(params['num_chunks'])
738
+
739
+ # Set model paths if not specified
740
+ if not args.lmhead:
741
+ args.lmhead = f'{prefix}_lm_head{lut_lmhead}'
742
+ if not args.embed:
743
+ args.embed = f'{prefix}_embeddings{lut_embeddings}' # Changed from lm_head to embeddings
744
+ if not args.ffn:
745
+ args.ffn = f'{prefix}_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
746
+ if not args.tokenizer:
747
+ args.tokenizer = args.d
748
+
749
+ # Set other parameters if not overridden by command line
750
+ if args.context_length is None:
751
+ args.context_length = int(params['context_length'])
752
+ if args.batch_size is None:
753
+ args.batch_size = int(params['batch_size'])
754
+ args.num_chunks = num_chunks
755
+
756
+ print(f"\nLoaded parameters from {args.meta}:")
757
+ print(f" Context Length: {args.context_length}")
758
+ print(f" Batch Size: {args.batch_size}")
759
+ print(f" Num Chunks: {args.num_chunks}")
760
+ print(f" Models Directory: {args.d}")
761
+ print(f" Embeddings: {args.embed}")
762
+ print(f" LM Head: {args.lmhead}")
763
+ print(f" FFN: {args.ffn}")
764
+
765
+ except Exception as e:
766
+ print(f"\nError loading meta.yaml: {str(e)}")
767
+ sys.exit(1)
768
+
769
+ return args
770
+
771
+ def main():
772
+ args = parse_args()
773
+
774
+ # Convert directory to absolute path
775
+ model_dir = Path(args.d).resolve()
776
+ if not model_dir.exists():
777
+ print(f"\nError: Model directory not found: {model_dir}")
778
+ return 1
779
+
780
+ print(f"\nUsing model directory: {model_dir}")
781
+ print(f"Context length: {args.context_length}")
782
+
783
+ try:
784
+ # Update paths to be relative to model directory
785
+ args.embed = str(model_dir / args.embed)
786
+ args.ffn = str(model_dir / args.ffn)
787
+ args.lmhead = str(model_dir / args.lmhead)
788
+
789
+ # Handle tokenizer path separately since it's not relative to model_dir
790
+ if args.tokenizer is None:
791
+ args.tokenizer = str(model_dir)
792
+
793
+ if not Path(args.tokenizer).exists():
794
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
795
+ return 1
796
+
797
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
798
+ print(f"Using tokenizer path: {args.tokenizer}")
799
+
800
+ metadata = {}
801
+ # Load models and extract metadata
802
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
803
+
804
+ print(f"\nMetadata befor args.context_length: {metadata}")
805
+
806
+ # Override context length from command line if provided
807
+ if args.context_length is not None:
808
+ metadata['context_length'] = args.context_length
809
+ metadata['state_length'] = args.context_length # Also update state_length
810
+ print(f"\nOverriding context length from command line: {args.context_length}")
811
+
812
+ print(f"\nMetadata after load_models: {metadata}")
813
+
814
+ # Load tokenizer with resolved path
815
+ tokenizer = initialize_tokenizer(args.tokenizer)
816
+ if tokenizer is None:
817
+ raise RuntimeError("Failed to initialize tokenizer")
818
+
819
+ # Create unified state once
820
+ state = create_unified_state(ffn_models, metadata['context_length'])
821
+
822
+ # Initialize causal mask once
823
+ causal_mask = initialize_causal_mask(metadata['context_length'])
824
+
825
+ # Warmup runs to prevent Python GIL issues with CoreML !
826
+ if not args.nw:
827
+ for i in range(2):
828
+ chat_loop(
829
+ embed_model=embed_model,
830
+ ffn_models=ffn_models,
831
+ lmhead_model=lmhead_model,
832
+ tokenizer=tokenizer,
833
+ metadata=metadata,
834
+ state=state,
835
+ causal_mask=causal_mask, # Pass the causal mask
836
+ warmup=True,
837
+ auto_prompt="who are you?"
838
+ )
839
+
840
+ # Main run
841
+ chat_loop(
842
+ embed_model=embed_model,
843
+ ffn_models=ffn_models,
844
+ lmhead_model=lmhead_model,
845
+ tokenizer=tokenizer,
846
+ metadata=metadata,
847
+ state=state,
848
+ causal_mask=causal_mask, # Pass the causal mask
849
+ warmup=False,
850
+ auto_prompt=args.prompt
851
+ )
852
+
853
+ except Exception as e:
854
+ print(f"\nError: {str(e)}")
855
+ import traceback
856
+ traceback.print_exc()
857
+ return 1
858
+
859
+ return 0
860
+
861
+ if __name__ == "__main__":
862
+ exit(main())
chat_full.py ADDED
@@ -0,0 +1,960 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at the top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+ THINKING_MODE = False
32
+ THINKING_PROMPT = """You are a deep thinking AI, you may use extremely long chains of thought to deeply consider the problem and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. You should enclose your thoughts and internal monologue inside <think> </think> tags, and then provide your solution or response to the problem."""
33
+
34
+ class TokenPrinter:
35
+ """Handles background printing of generated tokens."""
36
+ def __init__(self, tokenizer):
37
+ self.tokenizer = tokenizer
38
+ self.token_queue = queue.Queue()
39
+ self.stop_event = threading.Event()
40
+ self.thread = None
41
+ self.buffer = ""
42
+ self.lock = threading.Lock()
43
+ self.thinking = True # Track if we're still in thinking mode
44
+ self.decoding_buffer = [] # Buffer for token IDs
45
+ # Timing and stats tracking
46
+ self.start_time = time.time()
47
+ self.token_count = 0
48
+ self.prefill_time = 0
49
+ self.inference_time = 0
50
+ self.context_pos = 0
51
+ self.start()
52
+
53
+ def start(self):
54
+ """Start the printer thread."""
55
+ if self.thread is None:
56
+ self.thread = threading.Thread(target=self._print_worker)
57
+ self.thread.daemon = True
58
+ self.thread.start()
59
+
60
+ def add_token(self, token_id):
61
+ """Add a token to the print queue."""
62
+ if not self.stop_event.is_set():
63
+ self.token_queue.put(token_id)
64
+ self.token_count += 1
65
+
66
+ def drain_buffer(self):
67
+ """Decode token IDs from decoding_buffer in the main thread."""
68
+ if not self.decoding_buffer:
69
+ return
70
+
71
+ # Decode all tokens at once in the main thread
72
+ token_str = self.tokenizer.decode(self.decoding_buffer)
73
+ self.decoding_buffer.clear()
74
+
75
+ # Color-handling logic
76
+ if self.thinking and "</think>" in token_str:
77
+ self.thinking = False
78
+ parts = token_str.split("</think>")
79
+ if len(parts) > 0:
80
+ print(parts[0] + "</think>", end='', flush=True)
81
+ if len(parts) > 1:
82
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
83
+ else:
84
+ if not self.thinking:
85
+ print(LIGHT_BLUE + token_str, end='', flush=True)
86
+ else:
87
+ print(token_str, end='', flush=True)
88
+
89
+ def _print_worker(self):
90
+ """Worker thread that takes token_ids from the queue."""
91
+ while not self.stop_event.is_set():
92
+ try:
93
+ token_id = self.token_queue.get(timeout=0.01)
94
+ with self.lock:
95
+ self.decoding_buffer.append(token_id)
96
+ self.token_queue.task_done()
97
+ except queue.Empty:
98
+ continue
99
+ except Exception as e:
100
+ print(f"\nError: Token printer error: {str(e)}")
101
+ break
102
+
103
+ def stop(self):
104
+ """Stop the printer thread."""
105
+ if self.thread and self.thread.is_alive():
106
+ self.stop_event.set()
107
+ try:
108
+ self.thread.join(timeout=1.0)
109
+ except Exception:
110
+ pass
111
+ print(RESET_COLOR) # Reset color at the end
112
+ return self.buffer
113
+
114
+ def set_timing(self, prefill_time, inference_time, context_pos):
115
+ """Set timing information."""
116
+ self.prefill_time = prefill_time
117
+ self.inference_time = inference_time
118
+ self.context_pos = context_pos
119
+
120
+ def parse_model_path(path):
121
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
122
+ path = Path(path)
123
+
124
+ # If path exists exactly as specified, return it
125
+ if path.exists():
126
+ return str(path)
127
+
128
+ # Try with both extensions
129
+ candidates = [
130
+ path, # Original path
131
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
132
+ path.with_suffix('.mlpackage'), # With .mlpackage
133
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
134
+ Path(str(path) + '.mlpackage')
135
+ ]
136
+
137
+ # Try all possible paths
138
+ for candidate in candidates:
139
+ if candidate.exists():
140
+ print(f"Found model at: {candidate}")
141
+ return str(candidate)
142
+
143
+ # If we get here, no valid path was found
144
+ print("\nError: Model not found. Tried following paths:")
145
+ for candidate in candidates:
146
+ print(f" {candidate}")
147
+ raise FileNotFoundError(f"Model not found: {path}")
148
+
149
+ def parse_ffn_filename(path):
150
+ """Parse FFN model filename to extract chunk information."""
151
+ path = Path(path)
152
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
153
+ match = re.search(pattern, path.name)
154
+
155
+ if match:
156
+ current_chunk = int(match.group(1))
157
+ total_chunks = int(match.group(2))
158
+ return current_chunk, total_chunks
159
+ return None, None
160
+
161
+ def find_all_chunks(base_path):
162
+ """Find all chunk files matching the base FFN path pattern."""
163
+ path = Path(base_path)
164
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
165
+ return sorted(glob.glob(pattern))
166
+
167
+ def load_model(path, function_name=None):
168
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
169
+ path = Path(path)
170
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
171
+
172
+ try:
173
+ if path.suffix == '.mlmodelc':
174
+ # For compiled models (.mlmodelc), use CompiledMLModel
175
+ if function_name:
176
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
177
+ else:
178
+ return ct.models.CompiledMLModel(str(path), compute_unit)
179
+ else:
180
+ # For packages (.mlpackage)
181
+ if function_name:
182
+ return ct.models.MLModel(str(path), function_name=function_name)
183
+ else:
184
+ return ct.models.MLModel(str(path))
185
+
186
+ except RuntimeError as e:
187
+ if "valid manifest does not exist" in str(e):
188
+ print(f"\nError: Could not load compiled model at {path}")
189
+ print("This might be because:")
190
+ print("1. The model is not properly compiled")
191
+ print("2. The model was compiled for a different OS version")
192
+ print("3. The model needs to be recompiled")
193
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
194
+ raise
195
+
196
+ def parse_args():
197
+ parser = argparse.ArgumentParser(description='Full Chat with CoreML LLaMA with context window shifting, gil resolved (c) 2025 Anemll')
198
+
199
+ # Add meta.yaml option
200
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
201
+
202
+ # Add existing arguments
203
+ parser.add_argument('--d', '--dir', type=str, default='.',
204
+ help='Directory containing model files (default: current directory)')
205
+ parser.add_argument('--embed', type=str, required=False,
206
+ help='Path to embeddings model (relative to --dir)')
207
+ parser.add_argument('--ffn', type=str, required=False,
208
+ help='Path to FFN model (can be chunked, relative to --dir)')
209
+ parser.add_argument('--lmhead', type=str, required=False,
210
+ help='Path to LM head model (relative to --dir)')
211
+ parser.add_argument('--tokenizer', type=str, required=False,
212
+ help='Path to tokenizer')
213
+
214
+ # Add new argument for auto-generation
215
+ parser.add_argument('--prompt', type=str,
216
+ help='If specified, run once with this prompt and exit')
217
+
218
+ # Add no-warmup flag
219
+ parser.add_argument('--nw', action='store_true',
220
+ help='Skip warmup phase')
221
+
222
+ # Model configuration
223
+ parser.add_argument('--context-length', type=int,
224
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
225
+ parser.add_argument('--batch-size', type=int,
226
+ help='Batch size for prefill (default: 64)')
227
+
228
+ args = parser.parse_args()
229
+
230
+ # If meta.yaml is provided, load parameters from it
231
+ if args.meta:
232
+ try:
233
+ with open(args.meta, 'r') as f:
234
+ meta = yaml.safe_load(f)
235
+ params = meta['model_info']['parameters']
236
+
237
+ # Set model directory to meta.yaml directory if not specified
238
+ if not args.d or args.d == '.':
239
+ args.d = str(Path(args.meta).parent)
240
+
241
+ # Build model paths based on parameters
242
+ prefix = params.get('model_prefix', 'llama') # Default to 'llama' if not specified
243
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
244
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
245
+ lut_embeddings = f"_lut{params['lut_embeddings']}" if params['lut_embeddings'] != 'none' else ''
246
+ num_chunks = int(params['num_chunks'])
247
+
248
+ # Set model paths if not specified
249
+ if not args.lmhead:
250
+ args.lmhead = f'{prefix}_lm_head{lut_lmhead}'
251
+ if not args.embed:
252
+ args.embed = f'{prefix}_embeddings{lut_embeddings}' # Changed from lm_head to embeddings
253
+ if not args.ffn:
254
+ args.ffn = f'{prefix}_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
255
+ if not args.tokenizer:
256
+ args.tokenizer = args.d
257
+
258
+ # Set other parameters if not overridden by command line
259
+ if args.context_length is None:
260
+ args.context_length = int(params['context_length'])
261
+ if args.batch_size is None:
262
+ args.batch_size = int(params['batch_size'])
263
+ args.num_chunks = num_chunks
264
+
265
+ print(f"\nLoaded parameters from {args.meta}:")
266
+ print(f" Context Length: {args.context_length}")
267
+ print(f" Batch Size: {args.batch_size}")
268
+ print(f" Num Chunks: {args.num_chunks}")
269
+ print(f" Models Directory: {args.d}")
270
+ print(f" Embeddings: {args.embed}")
271
+ print(f" LM Head: {args.lmhead}")
272
+ print(f" FFN: {args.ffn}")
273
+
274
+ except Exception as e:
275
+ print(f"\nError loading meta.yaml: {str(e)}")
276
+ sys.exit(1)
277
+
278
+ return args
279
+
280
+ def load_metadata(model,args):
281
+ # Extract metadata and config parameters
282
+ metadata = {}
283
+ if hasattr(model, 'user_defined_metadata'):
284
+ meta = model.user_defined_metadata
285
+
286
+ # Extract key parameters with defaults
287
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
288
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
289
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
290
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
291
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
292
+
293
+ print("\nExtracted Parameters:")
294
+ print(f" Context Length: {metadata['context_length']}")
295
+ print(f" State Length: {metadata['state_length']}")
296
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
297
+ print(f" LUT Bits: {metadata['lut_bits']}")
298
+ print(f" Number of Chunks: {metadata['num_chunks']}")
299
+
300
+ # Print model info
301
+ print("\nModel Info:")
302
+ if 'com.anemll.info' in meta:
303
+ print(f" {meta['com.anemll.info']}")
304
+ if 'com.github.apple.coremltools.version' in meta:
305
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
306
+
307
+ # Print model input/output shapes
308
+ print("\nModel Shapes:")
309
+ if hasattr(model, 'input_description'):
310
+ print(" Inputs:")
311
+ for name, desc in model.input_description.items():
312
+ print(f" {name}: {desc}")
313
+ if hasattr(model, 'output_description'):
314
+ print(" Outputs:")
315
+ for name, desc in model.output_description.items():
316
+ print(f" {name}: {desc}")
317
+ else:
318
+ print("\nWarning: No metadata found in model")
319
+
320
+ # Check if model directory name contains context length pattern (ctxXXX)
321
+ ctx_len = 512
322
+ if args.context_length is None:
323
+ import re
324
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
325
+ if ctx_match:
326
+ ctx_len0 = int(ctx_match.group(1))
327
+ if 512 <= ctx_len0 <= 8096:
328
+ ctx_len = ctx_len0
329
+ print(f"\nDetected context length {ctx_len} from directory name")
330
+ else:
331
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
332
+ else:
333
+ ctx_len = args.context_length
334
+
335
+ # Use defaults or values from args
336
+ metadata['context_length'] = ctx_len
337
+ metadata['state_length'] = ctx_len
338
+ # Get batch size from args or use default
339
+ metadata['batch_size'] = getattr(args, 'batch_size', 64)
340
+ metadata['lut_bits'] = 4
341
+ metadata['num_chunks'] = getattr(args, 'num_chunks', 4)
342
+ print("\nUsing parameters:")
343
+ print(f" Context Length: {metadata['context_length']}")
344
+ print(f" State Length: {metadata['state_length']}")
345
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
346
+ print(f" LUT Bits: {metadata['lut_bits']}")
347
+ print(f" Number of Chunks: {metadata['num_chunks']}")
348
+
349
+ # Override with values from args if they exist
350
+ if hasattr(args, 'batch_size') and args.batch_size is not None:
351
+ metadata['batch_size'] = args.batch_size
352
+ print(f"\nOverriding batch size from args: {args.batch_size}")
353
+ if hasattr(args, 'num_chunks') and args.num_chunks is not None:
354
+ metadata['num_chunks'] = args.num_chunks
355
+ print(f"\nOverriding num chunks from args: {args.num_chunks}")
356
+
357
+ return metadata
358
+
359
+ def load_models(args,metadata):
360
+ """Load all required models and extract metadata."""
361
+ print("\nLoading models...")
362
+
363
+ try:
364
+ # Load embeddings model
365
+ print("\nLoading embeddings model...")
366
+ embed_path = parse_model_path(args.embed)
367
+ print(f"Loading from: {embed_path}")
368
+ embed_model = load_model(embed_path)
369
+ print("Embeddings model loaded successfully")
370
+ metadata = load_metadata(embed_model,args)
371
+
372
+
373
+
374
+ # Load LM head model
375
+ print("\nLoading LM head model...")
376
+ lmhead_path = parse_model_path(args.lmhead)
377
+ print(f"Loading from: {lmhead_path}")
378
+ lmhead_model = load_model(lmhead_path)
379
+ print("LM head model loaded successfully")
380
+
381
+ # Parse FFN path and find chunks if needed
382
+ print("\nLoading FFN+PREFILL model(s)...")
383
+ ffn_path = parse_model_path(args.ffn)
384
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
385
+
386
+ ffn_models = []
387
+ if chunk_no and total_chunks:
388
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
389
+ # Find and load all chunks
390
+ chunk_paths = find_all_chunks(ffn_path)
391
+ if len(chunk_paths) != total_chunks:
392
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
393
+
394
+ for chunk_path in chunk_paths:
395
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
396
+ try:
397
+ # For chunked models, we need both infer and prefill functions
398
+ ffn_models.append({
399
+ 'infer': load_model(chunk_path, function_name='infer'),
400
+ 'prefill': load_model(chunk_path, function_name='prefill')
401
+ })
402
+ print("Chunk loaded successfully")
403
+ except Exception as e:
404
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
405
+ raise
406
+ metadata = load_metadata(ffn_models[0],args)
407
+
408
+ else:
409
+ print("\nLoading single FFN model...")
410
+ ffn_models.append(load_model(ffn_path))
411
+ print("FFN model loaded successfully")
412
+
413
+ return embed_model, ffn_models, lmhead_model, metadata
414
+
415
+ except Exception as e:
416
+ print(f"\nError loading models: {str(e)}")
417
+ print("\nPlease ensure all model files exist and are accessible.")
418
+ print("Expected files:")
419
+ print(f" Embeddings: {args.embed}")
420
+ print(f" LM Head: {args.lmhead}")
421
+ print(f" FFN: {args.ffn}")
422
+ raise
423
+
424
+ # At the top of the file, make this a default path
425
+
426
+ def initialize_tokenizer(model_path=None):
427
+ """Initialize and configure the tokenizer."""
428
+ try:
429
+
430
+
431
+ tokenizer = AutoTokenizer.from_pretrained(
432
+ str(model_path),
433
+ use_fast=False,
434
+ trust_remote_code=True
435
+ )
436
+
437
+ print("\nTokenizer Configuration:")
438
+ print(f"Tokenizer type: {type(tokenizer)}")
439
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
440
+ print(f"Vocabulary size: {len(tokenizer)}")
441
+ print(f"Model max length: {tokenizer.model_max_length}")
442
+
443
+ if tokenizer.pad_token is None:
444
+ tokenizer.pad_token = tokenizer.eos_token
445
+ tokenizer.pad_token_id = tokenizer.eos_token_id
446
+ print("Set PAD token to EOS token")
447
+
448
+ tokenizer.padding_side = "left"
449
+
450
+ print(f"\nSpecial Tokens:")
451
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
452
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
453
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
454
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
455
+
456
+ return tokenizer
457
+
458
+ except Exception as e:
459
+ print(f"\nError: Failed to load tokenizer from {model_path}")
460
+ print(f"Error details: {str(e)}")
461
+ print(f"Error type: {type(e)}")
462
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
463
+ print("Please provide the path to a Llama 3.2 model directory.")
464
+ import traceback
465
+ traceback.print_exc()
466
+ raise
467
+
468
+
469
+
470
+ def make_causal_mask(length, start):
471
+ """Create causal attention mask."""
472
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
473
+ row_indices = np.arange(length).reshape(length, 1)
474
+ col_indices = np.arange(length).reshape(1, length)
475
+ mask[:, :, col_indices <= (row_indices + start)] = 0
476
+ return mask
477
+
478
+ def run_prefill(embed_model, ffn_models, input_ids, current_pos, context_length, batch_size, state, causal_mask):
479
+ """Run prefill on the input sequence."""
480
+ # Use provided causal mask or create one if not provided
481
+ if causal_mask is None:
482
+ causal_mask = make_causal_mask(context_length, 0)
483
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
484
+
485
+ # Process in batches
486
+ batch_pos = 0
487
+ while batch_pos < current_pos:
488
+ batch_end = min(batch_pos + batch_size, current_pos)
489
+ current_batch_size = batch_end - batch_pos
490
+
491
+ # Get current batch
492
+ batch_input = input_ids[:, batch_pos:batch_end]
493
+
494
+ # Always pad to full batch size for prefill
495
+ batch_input = F.pad(
496
+ batch_input,
497
+ (0, batch_size - current_batch_size),
498
+ value=0
499
+ )
500
+
501
+ # Generate position IDs for full batch size
502
+ position_ids = torch.arange(batch_size, dtype=torch.int32) # Changed: Always use full batch size
503
+ batch_causal_mask = causal_mask[:, :, :batch_size, :] # Changed: Use full batch size
504
+
505
+ # Run embeddings with proper batch size
506
+ hidden_states = torch.from_numpy(
507
+ embed_model.predict({
508
+ 'input_ids': batch_input.numpy(),
509
+ 'batch_size': np.array([batch_size], dtype=np.int32) # Add batch_size parameter
510
+ })['hidden_states']
511
+ )
512
+
513
+ # Run through FFN chunks with state
514
+ for ffn_model in ffn_models:
515
+ if isinstance(ffn_model, dict):
516
+ inputs = {
517
+ 'hidden_states': hidden_states.numpy(), # [1, 64, hidden_size]
518
+ 'position_ids': position_ids.numpy(), # [64]
519
+ 'causal_mask': batch_causal_mask.numpy(), # [1, 1, 64, context_length]
520
+ 'current_pos': np.array([batch_pos], dtype=np.int32) # [1]
521
+ }
522
+ output = ffn_model['prefill'].predict(inputs, state)
523
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
524
+
525
+ batch_pos = batch_end
526
+
527
+ return torch.tensor([current_pos], dtype=torch.int32)
528
+
529
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state, causal_mask, temperature=0.0):
530
+ """Generate the next token."""
531
+ # Get current token
532
+ current_token = input_ids[:, pos-1:pos]
533
+
534
+ # Run embeddings
535
+ hidden_states = torch.from_numpy(
536
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
537
+ )
538
+
539
+ # Create masks
540
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
541
+ update_mask[0, 0, pos-1, 0] = 1.0
542
+ position_ids = torch.tensor([pos-1], dtype=torch.int32)
543
+
544
+ # Use the pre-initialized causal mask and extract the single position portion
545
+ single_causal_mask = causal_mask[:, :, pos-1:pos, :]
546
+
547
+ # Run through FFN chunks
548
+ for ffn_model in ffn_models:
549
+ if isinstance(ffn_model, dict):
550
+ inputs = {
551
+ 'hidden_states': hidden_states.numpy(),
552
+ 'update_mask': update_mask.numpy(),
553
+ 'position_ids': position_ids.numpy(),
554
+ 'causal_mask': single_causal_mask.numpy(),
555
+ 'current_pos': position_ids.numpy()
556
+ }
557
+ output = ffn_model['infer'].predict(inputs, state)
558
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
559
+
560
+ # Run LM head and get next token
561
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
562
+
563
+ if 'logits1' in lm_output:
564
+ logits_parts = []
565
+ for i in range(1, 9):
566
+ key = f'logits{i}'
567
+ if key in lm_output:
568
+ logits_parts.append(torch.from_numpy(lm_output[key]))
569
+ logits = torch.cat(logits_parts, dim=-1)
570
+ else:
571
+ logits = torch.from_numpy(lm_output['output_logits'])
572
+
573
+ if temperature > 0:
574
+ logits = logits / temperature
575
+ probs = F.softmax(logits[0, -1, :], dim=-1)
576
+ next_token = torch.multinomial(probs, num_samples=1).item()
577
+ else:
578
+ next_token = torch.argmax(logits[0, -1, :]).item()
579
+
580
+ return next_token
581
+
582
+ def create_unified_state(ffn_models, context_length):
583
+ """Create unified KV cache state for transformer."""
584
+ if isinstance(ffn_models[0], dict):
585
+ # Use first FFN model's prefill function to create state
586
+ state = ffn_models[0]['prefill'].make_state()
587
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
588
+ return state
589
+ else:
590
+ state = ffn_models[0].make_state()
591
+ print("\nCreated unified transformer state")
592
+ return state
593
+
594
+ def initialize_causal_mask(context_length):
595
+ """Initialize causal mask for transformer attention."""
596
+ causal_mask = make_causal_mask(context_length, 0)
597
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
598
+ print(f"\nInitialized causal mask for context length {context_length}")
599
+ return causal_mask
600
+
601
+ def get_user_input():
602
+ """Get input from user, handling special key combinations."""
603
+ global THINKING_MODE
604
+ try:
605
+ import termios
606
+ import tty
607
+ import sys
608
+
609
+ def _getch():
610
+ fd = sys.stdin.fileno()
611
+ old_settings = termios.tcgetattr(fd)
612
+ try:
613
+ tty.setraw(sys.stdin.fileno())
614
+ ch = sys.stdin.read(1)
615
+ finally:
616
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
617
+ return ch
618
+
619
+ buffer = []
620
+ while True:
621
+ char = _getch()
622
+
623
+ # Debug: print the character code
624
+ print(f"\nKey pressed: {repr(char)} (hex: {hex(ord(char))})")
625
+
626
+ # Check for Enter key
627
+ if char == '\r' or char == '\n':
628
+ print() # Move to next line
629
+ input_text = ''.join(buffer)
630
+ # Check if the command is /t
631
+ if input_text == '/t':
632
+ THINKING_MODE = not THINKING_MODE
633
+ print(f"Thinking mode {'ON' if THINKING_MODE else 'OFF'}")
634
+ buffer = [] # Clear buffer
635
+ print(f"\n{LIGHT_GREEN}You{' (thinking)' if THINKING_MODE else ''}:{RESET_COLOR}", end=' ', flush=True)
636
+ continue
637
+ return input_text
638
+
639
+ # Handle backspace
640
+ if char == '\x7f': # backspace
641
+ if buffer:
642
+ buffer.pop()
643
+ sys.stdout.write('\b \b') # Erase character
644
+ sys.stdout.flush()
645
+ continue
646
+
647
+ # Handle Ctrl-C
648
+ if char == '\x03': # Ctrl-C
649
+ print("^C")
650
+ raise KeyboardInterrupt
651
+
652
+ # Print character and add to buffer
653
+ sys.stdout.write(char)
654
+ sys.stdout.flush()
655
+ buffer.append(char)
656
+
657
+ except ImportError:
658
+ # Fallback for systems without termios
659
+ return input("> ")
660
+
661
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, causal_mask, auto_prompt=None, warmup=False):
662
+ """Interactive chat loop."""
663
+ global THINKING_MODE
664
+ context_length = metadata.get('context_length')
665
+ batch_size = metadata.get('batch_size', 64)
666
+
667
+ if not warmup:
668
+ print(f"\nUsing context length: {context_length}")
669
+ print("\nStarting chat session. Press Ctrl+D to exit.")
670
+ print("Type your message and press Enter to chat. Use /t to toggle thinking mode.")
671
+ print(f"Thinking mode is {'ON' if THINKING_MODE else 'OFF'}")
672
+
673
+ # Keep track of conversation history
674
+ conversation = []
675
+
676
+ try:
677
+ while True:
678
+ try:
679
+ if not warmup:
680
+ print(f"\n{LIGHT_GREEN}You{' (thinking)' if THINKING_MODE else ''}:{RESET_COLOR}", end=' ', flush=True)
681
+ if auto_prompt is not None:
682
+ user_input = auto_prompt
683
+ if not warmup:
684
+ print(user_input)
685
+ else:
686
+ user_input = input().strip()
687
+ except EOFError:
688
+ if not warmup:
689
+ print("\nExiting chat...")
690
+ break
691
+
692
+ if not user_input:
693
+ continue
694
+
695
+ # Handle /t command
696
+ if user_input == "/t":
697
+ THINKING_MODE = not THINKING_MODE
698
+ print(f"Thinking mode {'ON' if THINKING_MODE else 'OFF'}")
699
+ continue
700
+
701
+ # Add user message to conversation
702
+ conversation.append({"role": "user", "content": user_input})
703
+
704
+ # Format using chat template with full history
705
+ if THINKING_MODE:
706
+ # Add thinking prompt to system message
707
+ conversation_with_thinking = [{"role": "system", "content": THINKING_PROMPT}] + conversation
708
+ base_input_ids = tokenizer.apply_chat_template(
709
+ conversation_with_thinking,
710
+ return_tensors="pt",
711
+ add_generation_prompt=True
712
+ ).to(torch.int32)
713
+ else:
714
+ base_input_ids = tokenizer.apply_chat_template(
715
+ conversation,
716
+ return_tensors="pt",
717
+ add_generation_prompt=True
718
+ ).to(torch.int32)
719
+
720
+ # Check if we need to trim history
721
+ while base_input_ids.size(1) > context_length - 100: # Leave room for response
722
+ # Remove oldest message pair (user + assistant)
723
+ if len(conversation) > 2:
724
+ conversation = conversation[2:] # Remove oldest pair
725
+ base_input_ids = tokenizer.apply_chat_template(
726
+ conversation,
727
+ return_tensors="pt",
728
+ add_generation_prompt=True
729
+ ).to(torch.int32)
730
+ else:
731
+ # If only current message remains and still too long, truncate
732
+ base_input_ids = base_input_ids[:, -context_length//2:]
733
+ break
734
+
735
+ context_pos = base_input_ids.size(1)
736
+
737
+ # Pad sequence to context_size
738
+ input_ids = F.pad(
739
+ base_input_ids,
740
+ (0, context_length - context_pos),
741
+ value=0
742
+ )
743
+
744
+ if not warmup:
745
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
746
+
747
+ # Initialize token printer and collect response
748
+ token_printer = TokenPrinter(tokenizer)
749
+ response_tokens = []
750
+ generation_start_time = time.time()
751
+
752
+ try:
753
+ # Run prefill on entire context
754
+ current_pos = run_prefill(
755
+ embed_model,
756
+ ffn_models,
757
+ input_ids,
758
+ context_pos,
759
+ context_length,
760
+ batch_size,
761
+ state,
762
+ causal_mask
763
+ )
764
+ #print(f"\n[DEBUG] After initial prefill - current_pos: {current_pos}")
765
+
766
+ # Generation loop
767
+ pos = context_pos
768
+ tokens_generated = 0
769
+ inference_start = time.time() # Start inference timing
770
+
771
+ while True:
772
+ # Check if we need to shift window
773
+ if pos >= context_length - 2:
774
+ # Calculate shift to maintain full batches
775
+ batch_size = metadata.get('batch_size', 64)
776
+ # Calculate max batches that fit in context
777
+ max_batches = context_length // batch_size
778
+ desired_batches = max(1, max_batches - 2) # Leave room for new tokens
779
+ new_size = min(desired_batches * batch_size, context_length - batch_size)
780
+
781
+ # Create shifted input_ids
782
+ tmp = torch.zeros((1, context_length), dtype=torch.int32)
783
+ tmp[:,0:new_size] = input_ids[:,pos-new_size:pos]
784
+ input_ids = tmp
785
+
786
+ # Reset state and run prefill
787
+ # keep the same state
788
+ #state = create_unified_state(ffn_models, context_length)
789
+ current_pos = run_prefill(
790
+ embed_model,
791
+ ffn_models,
792
+ input_ids,
793
+ new_size, # Prefill the entire shifted content
794
+ context_length,
795
+ batch_size,
796
+ state,
797
+ causal_mask
798
+ )
799
+
800
+ # Start generating from the next position
801
+ pos = new_size # Don't back up, continue from where we left off
802
+
803
+ #print(f"\n[DEBUG] After shift - next token will be at pos {pos}")
804
+ #print(f"[DEBUG] Context before next token: {tokenizer.decode(input_ids[0, pos-40:pos])}")
805
+
806
+ window_shifted = True
807
+
808
+ # Generate next token
809
+ next_token = generate_next_token(
810
+ embed_model,
811
+ ffn_models,
812
+ lmhead_model,
813
+ input_ids,
814
+ pos,
815
+ context_length,
816
+ state,
817
+ causal_mask
818
+ )
819
+
820
+ # Add token
821
+ input_ids[0, pos] = next_token
822
+ if not warmup:
823
+ token_printer.add_token(next_token)
824
+ token_printer.drain_buffer()
825
+ response_tokens.append(next_token)
826
+
827
+ pos += 1
828
+ tokens_generated += 1
829
+
830
+ # In warmup mode, limit tokens
831
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
832
+ break
833
+
834
+ if next_token == tokenizer.eos_token_id:
835
+ break
836
+
837
+ inference_time = time.time() - inference_start # Calculate inference time
838
+
839
+ # Add assistant response to conversation
840
+ response_text = token_printer.stop()
841
+ conversation.append({"role": "assistant", "content": response_text})
842
+
843
+ # Print stats only if not in warmup
844
+ if not warmup:
845
+ total_time = time.time() - generation_start_time
846
+ prefill_time = total_time - inference_time
847
+ inference_tokens_per_sec = len(response_tokens) / inference_time if inference_time > 0 else 0
848
+ prefill_ms = prefill_time * 1000
849
+ prefill_tokens_per_sec = context_pos / prefill_time if prefill_time > 0 else 0
850
+ print(f"{DARK_BLUE}{inference_tokens_per_sec:.1f} t/s, "
851
+ f"TTFT: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s), "
852
+ f"{len(response_tokens)} tokens{RESET_COLOR}")
853
+
854
+ if auto_prompt is not None:
855
+ break
856
+
857
+ except KeyboardInterrupt:
858
+ if not warmup:
859
+ print("\nGeneration interrupted")
860
+ token_printer.stop()
861
+ continue
862
+
863
+ except Exception as e:
864
+ if not warmup:
865
+ print(f"\nError in chat loop: {str(e)}")
866
+ import traceback
867
+ traceback.print_exc()
868
+
869
+ def main():
870
+ args = parse_args()
871
+
872
+ # Convert directory to absolute path
873
+ model_dir = Path(args.d).resolve()
874
+ if not model_dir.exists():
875
+ print(f"\nError: Model directory not found: {model_dir}")
876
+ return 1
877
+
878
+ print(f"\nUsing model directory: {model_dir}")
879
+ print(f"Context length: {args.context_length}")
880
+
881
+ try:
882
+ # Update paths to be relative to model directory
883
+ args.embed = str(model_dir / args.embed)
884
+ args.ffn = str(model_dir / args.ffn)
885
+ args.lmhead = str(model_dir / args.lmhead)
886
+
887
+ # Handle tokenizer path separately since it's not relative to model_dir
888
+ if args.tokenizer is None:
889
+ args.tokenizer = str(model_dir)
890
+
891
+ if not Path(args.tokenizer).exists():
892
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
893
+ return 1
894
+
895
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
896
+ print(f"Using tokenizer path: {args.tokenizer}")
897
+
898
+ metadata = {}
899
+ # Load models and extract metadata
900
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
901
+
902
+ print(f"\nMetadata befor args.context_length: {metadata}")
903
+
904
+ # Override context length from command line if provided
905
+ if args.context_length is not None:
906
+ metadata['context_length'] = args.context_length
907
+ metadata['state_length'] = args.context_length # Also update state_length
908
+ print(f"\nOverriding context length from command line: {args.context_length}")
909
+
910
+ print(f"\nMetadata after load_models: {metadata}")
911
+
912
+ # Load tokenizer with resolved path
913
+ tokenizer = initialize_tokenizer(args.tokenizer)
914
+ if tokenizer is None:
915
+ raise RuntimeError("Failed to initialize tokenizer")
916
+
917
+ # Create unified state once
918
+ state = create_unified_state(ffn_models, metadata['context_length'])
919
+
920
+ # Initialize causal mask once
921
+ causal_mask = initialize_causal_mask(metadata['context_length'])
922
+
923
+ # Warmup runs to prevent Python GIL issues with CoreML !
924
+ if not args.nw:
925
+ for i in range(2):
926
+ chat_loop(
927
+ embed_model=embed_model,
928
+ ffn_models=ffn_models,
929
+ lmhead_model=lmhead_model,
930
+ tokenizer=tokenizer,
931
+ metadata=metadata,
932
+ state=state, # Pass the state
933
+ causal_mask=causal_mask, # Pass the causal mask
934
+ warmup=True,
935
+ auto_prompt="who are you?"
936
+ )
937
+
938
+ # Main run
939
+ chat_loop(
940
+ embed_model=embed_model,
941
+ ffn_models=ffn_models,
942
+ lmhead_model=lmhead_model,
943
+ tokenizer=tokenizer,
944
+ metadata=metadata,
945
+ state=state, # Pass the state
946
+ causal_mask=causal_mask, # Pass the causal mask
947
+ warmup=False,
948
+ auto_prompt=args.prompt
949
+ )
950
+
951
+ except Exception as e:
952
+ print(f"\nError: {str(e)}")
953
+ import traceback
954
+ traceback.print_exc()
955
+ return 1
956
+
957
+ return 0
958
+
959
+ if __name__ == "__main__":
960
+ exit(main())
config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "LlamaTokenizer",
3
+ "model_type": "llama"
4
+ }
llama_FFN_PF_chunk_01of02.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3aa36502bb9a0d7bebabbbd190c20ec0dc75d2da0d134effd295edac9f0f055
3
+ size 243
llama_FFN_PF_chunk_01of02.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b9e9177095ad681bd28dbe0164d4bf56fd63566cce5e1c043f409fcc30a02be
3
+ size 951
llama_FFN_PF_chunk_01of02.mlmodelc/metadata.json ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "userDefinedMetadata" : {
5
+ "com.github.apple.coremltools.version" : "8.2",
6
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
7
+ "com.anemll.context_length" : "512",
8
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
9
+ "com.anemll.num_chunks" : "2",
10
+ "com.anemll.batch_size" : "64",
11
+ "com.anemll.info" : "Converted with Anemll v0.3.0",
12
+ "com.anemll.chunk_no" : "1"
13
+ },
14
+ "availability" : {
15
+ "macOS" : "15.0",
16
+ "tvOS" : "18.0",
17
+ "visionOS" : "2.0",
18
+ "watchOS" : "11.0",
19
+ "iOS" : "18.0",
20
+ "macCatalyst" : "18.0"
21
+ },
22
+ "inputSchema" : [
23
+ {
24
+ "hasShapeFlexibility" : "0",
25
+ "isOptional" : "0",
26
+ "dataType" : "Float16",
27
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
28
+ "shortDescription" : "",
29
+ "shape" : "[1, 1, 2048]",
30
+ "name" : "hidden_states",
31
+ "type" : "MultiArray"
32
+ },
33
+ {
34
+ "hasShapeFlexibility" : "0",
35
+ "isOptional" : "0",
36
+ "dataType" : "Int32",
37
+ "formattedType" : "MultiArray (Int32 1)",
38
+ "shortDescription" : "",
39
+ "shape" : "[1]",
40
+ "name" : "position_ids",
41
+ "type" : "MultiArray"
42
+ },
43
+ {
44
+ "hasShapeFlexibility" : "0",
45
+ "isOptional" : "0",
46
+ "dataType" : "Float16",
47
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1 × 512)",
48
+ "shortDescription" : "",
49
+ "shape" : "[1, 1, 1, 512]",
50
+ "name" : "causal_mask",
51
+ "type" : "MultiArray"
52
+ },
53
+ {
54
+ "hasShapeFlexibility" : "0",
55
+ "isOptional" : "0",
56
+ "dataType" : "Int32",
57
+ "formattedType" : "MultiArray (Int32 1)",
58
+ "shortDescription" : "",
59
+ "shape" : "[1]",
60
+ "name" : "current_pos",
61
+ "type" : "MultiArray"
62
+ }
63
+ ],
64
+ "outputSchema" : [
65
+ {
66
+ "hasShapeFlexibility" : "0",
67
+ "isOptional" : "0",
68
+ "dataType" : "Float16",
69
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
70
+ "shortDescription" : "",
71
+ "shape" : "[1, 1, 2048]",
72
+ "name" : "output_hidden_states",
73
+ "type" : "MultiArray"
74
+ }
75
+ ],
76
+ "modelParameters" : [
77
+
78
+ ],
79
+ "storagePrecision" : "Float16",
80
+ "method" : "predict",
81
+ "functions" : [
82
+ {
83
+ "inputSchema" : [
84
+ {
85
+ "hasShapeFlexibility" : "0",
86
+ "isOptional" : "0",
87
+ "dataType" : "Float16",
88
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
89
+ "shortDescription" : "",
90
+ "shape" : "[1, 1, 2048]",
91
+ "name" : "hidden_states",
92
+ "type" : "MultiArray"
93
+ },
94
+ {
95
+ "hasShapeFlexibility" : "0",
96
+ "isOptional" : "0",
97
+ "dataType" : "Int32",
98
+ "formattedType" : "MultiArray (Int32 1)",
99
+ "shortDescription" : "",
100
+ "shape" : "[1]",
101
+ "name" : "position_ids",
102
+ "type" : "MultiArray"
103
+ },
104
+ {
105
+ "hasShapeFlexibility" : "0",
106
+ "isOptional" : "0",
107
+ "dataType" : "Float16",
108
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1 × 512)",
109
+ "shortDescription" : "",
110
+ "shape" : "[1, 1, 1, 512]",
111
+ "name" : "causal_mask",
112
+ "type" : "MultiArray"
113
+ },
114
+ {
115
+ "hasShapeFlexibility" : "0",
116
+ "isOptional" : "0",
117
+ "dataType" : "Int32",
118
+ "formattedType" : "MultiArray (Int32 1)",
119
+ "shortDescription" : "",
120
+ "shape" : "[1]",
121
+ "name" : "current_pos",
122
+ "type" : "MultiArray"
123
+ }
124
+ ],
125
+ "computePrecision" : "Mixed (Float16, Int32)",
126
+ "storagePrecision" : "Float16",
127
+ "stateSchema" : [
128
+ {
129
+ "dataType" : "Float16",
130
+ "isOptional" : "0",
131
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
132
+ "shortDescription" : "",
133
+ "shape" : "[32, 8, 512, 64]",
134
+ "name" : "model_model_kv_cache_0",
135
+ "type" : "State"
136
+ }
137
+ ],
138
+ "outputSchema" : [
139
+ {
140
+ "hasShapeFlexibility" : "0",
141
+ "isOptional" : "0",
142
+ "dataType" : "Float16",
143
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
144
+ "shortDescription" : "",
145
+ "shape" : "[1, 1, 2048]",
146
+ "name" : "output_hidden_states",
147
+ "type" : "MultiArray"
148
+ }
149
+ ],
150
+ "name" : "infer",
151
+ "mlProgramOperationTypeHistogram" : {
152
+ "Ios18.expandDims" : 32,
153
+ "Ios18.mul" : 80,
154
+ "Ios18.matmul" : 16,
155
+ "Identity" : 1,
156
+ "Ios16.reduceMean" : 16,
157
+ "Ios18.exp" : 8,
158
+ "Ios18.realDiv" : 8,
159
+ "Ios18.greaterEqual" : 1,
160
+ "Select" : 1,
161
+ "Ios18.readState" : 17,
162
+ "Tile" : 16,
163
+ "Ios18.gather" : 2,
164
+ "Ios18.add" : 42,
165
+ "Ios18.layerNorm" : 16,
166
+ "Ios18.sliceUpdate" : 16,
167
+ "Ios18.writeState" : 16,
168
+ "Ios18.reshape" : 50,
169
+ "Ios16.reduceMax" : 8,
170
+ "Ios16.reduceSum" : 8,
171
+ "Ios18.conv" : 48,
172
+ "Ios18.concat" : 48,
173
+ "Ios18.transpose" : 32,
174
+ "Ios18.sub" : 40,
175
+ "Ios18.linear" : 8,
176
+ "Ios18.silu" : 8,
177
+ "Ios18.sliceByIndex" : 50,
178
+ "Ios18.squeeze" : 24
179
+ }
180
+ },
181
+ {
182
+ "inputSchema" : [
183
+ {
184
+ "hasShapeFlexibility" : "0",
185
+ "isOptional" : "0",
186
+ "dataType" : "Float16",
187
+ "formattedType" : "MultiArray (Float16 1 × 64 × 2048)",
188
+ "shortDescription" : "",
189
+ "shape" : "[1, 64, 2048]",
190
+ "name" : "hidden_states",
191
+ "type" : "MultiArray"
192
+ },
193
+ {
194
+ "hasShapeFlexibility" : "0",
195
+ "isOptional" : "0",
196
+ "dataType" : "Int32",
197
+ "formattedType" : "MultiArray (Int32 64)",
198
+ "shortDescription" : "",
199
+ "shape" : "[64]",
200
+ "name" : "position_ids",
201
+ "type" : "MultiArray"
202
+ },
203
+ {
204
+ "hasShapeFlexibility" : "0",
205
+ "isOptional" : "0",
206
+ "dataType" : "Float16",
207
+ "formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
208
+ "shortDescription" : "",
209
+ "shape" : "[1, 1, 64, 512]",
210
+ "name" : "causal_mask",
211
+ "type" : "MultiArray"
212
+ },
213
+ {
214
+ "hasShapeFlexibility" : "0",
215
+ "isOptional" : "0",
216
+ "dataType" : "Int32",
217
+ "formattedType" : "MultiArray (Int32 1)",
218
+ "shortDescription" : "",
219
+ "shape" : "[1]",
220
+ "name" : "current_pos",
221
+ "type" : "MultiArray"
222
+ }
223
+ ],
224
+ "computePrecision" : "Mixed (Float16, Int32)",
225
+ "storagePrecision" : "Float16",
226
+ "stateSchema" : [
227
+ {
228
+ "dataType" : "Float16",
229
+ "isOptional" : "0",
230
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
231
+ "shortDescription" : "",
232
+ "shape" : "[32, 8, 512, 64]",
233
+ "name" : "model_model_kv_cache_0",
234
+ "type" : "State"
235
+ }
236
+ ],
237
+ "outputSchema" : [
238
+ {
239
+ "hasShapeFlexibility" : "0",
240
+ "isOptional" : "0",
241
+ "dataType" : "Float16",
242
+ "formattedType" : "MultiArray (Float16 1 × 64 × 2048)",
243
+ "shortDescription" : "",
244
+ "shape" : "[1, 64, 2048]",
245
+ "name" : "output_hidden_states",
246
+ "type" : "MultiArray"
247
+ }
248
+ ],
249
+ "name" : "prefill",
250
+ "mlProgramOperationTypeHistogram" : {
251
+ "Ios18.expandDims" : 32,
252
+ "Ios18.mul" : 80,
253
+ "Ios18.matmul" : 16,
254
+ "Ios16.reduceMean" : 16,
255
+ "Ios18.exp" : 8,
256
+ "Ios18.realDiv" : 8,
257
+ "Ios18.greaterEqual" : 1,
258
+ "Select" : 1,
259
+ "Ios18.readState" : 17,
260
+ "Tile" : 16,
261
+ "Ios18.gather" : 2,
262
+ "Ios18.add" : 42,
263
+ "Ios18.layerNorm" : 16,
264
+ "Ios18.sliceUpdate" : 16,
265
+ "Ios18.writeState" : 16,
266
+ "Ios18.reshape" : 66,
267
+ "Ios16.reduceMax" : 8,
268
+ "Ios16.reduceSum" : 8,
269
+ "Ios18.conv" : 48,
270
+ "Ios18.concat" : 48,
271
+ "Ios18.transpose" : 58,
272
+ "Ios18.sub" : 40,
273
+ "Ios18.linear" : 8,
274
+ "Ios18.silu" : 8,
275
+ "Ios18.sliceByIndex" : 50,
276
+ "Ios18.squeeze" : 24
277
+ }
278
+ }
279
+ ],
280
+ "version" : "0.3.0",
281
+ "isUpdatable" : "0",
282
+ "defaultFunctionName" : "infer",
283
+ "specificationVersion" : 9,
284
+ "stateSchema" : [
285
+ {
286
+ "dataType" : "Float16",
287
+ "isOptional" : "0",
288
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
289
+ "shortDescription" : "",
290
+ "shape" : "[32, 8, 512, 64]",
291
+ "name" : "model_model_kv_cache_0",
292
+ "type" : "State"
293
+ }
294
+ ],
295
+ "computePrecision" : "Mixed (Float16, Int32)",
296
+ "mlProgramOperationTypeHistogram" : {
297
+ "Ios18.expandDims" : 32,
298
+ "Ios18.mul" : 80,
299
+ "Ios18.matmul" : 16,
300
+ "Identity" : 1,
301
+ "Ios16.reduceMean" : 16,
302
+ "Ios18.exp" : 8,
303
+ "Ios18.realDiv" : 8,
304
+ "Ios18.greaterEqual" : 1,
305
+ "Select" : 1,
306
+ "Ios18.readState" : 17,
307
+ "Tile" : 16,
308
+ "Ios18.gather" : 2,
309
+ "Ios18.add" : 42,
310
+ "Ios18.layerNorm" : 16,
311
+ "Ios18.sliceUpdate" : 16,
312
+ "Ios18.writeState" : 16,
313
+ "Ios18.reshape" : 50,
314
+ "Ios16.reduceMax" : 8,
315
+ "Ios16.reduceSum" : 8,
316
+ "Ios18.conv" : 48,
317
+ "Ios18.concat" : 48,
318
+ "Ios18.transpose" : 32,
319
+ "Ios18.sub" : 40,
320
+ "Ios18.linear" : 8,
321
+ "Ios18.silu" : 8,
322
+ "Ios18.sliceByIndex" : 50,
323
+ "Ios18.squeeze" : 24
324
+ },
325
+ "shortDescription" : "Anemll Model: Multifunction FFN+Prefill",
326
+ "generatedClassName" : "llama_FFN_PF_chunk_01of02",
327
+ "author" : "Converted with Anemll v0.3.0",
328
+ "modelType" : {
329
+ "name" : "MLModelType_mlProgram"
330
+ }
331
+ }
332
+ ]
llama_FFN_PF_chunk_01of02.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
llama_FFN_PF_chunk_01of02.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:668bd8b220674c2b2bd2abd87095bcaba1075fe6ca806fef5c744702442a4b63
3
+ size 1006707456
llama_FFN_PF_chunk_02of02.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f39a50160bba911d8ef28f9854cbb525548c6390db3eb35804b599a89889a07
3
+ size 243
llama_FFN_PF_chunk_02of02.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8617cece12f2a6c4fca63af54c3a3a9e87b83a08356a56404c1b8e4c743d9c87
3
+ size 951
llama_FFN_PF_chunk_02of02.mlmodelc/metadata.json ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "metadataOutputVersion" : "3.0",
4
+ "userDefinedMetadata" : {
5
+ "com.github.apple.coremltools.version" : "8.2",
6
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
7
+ "com.anemll.context_length" : "512",
8
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
9
+ "com.anemll.num_chunks" : "2",
10
+ "com.anemll.batch_size" : "64",
11
+ "com.anemll.info" : "Converted with Anemll v0.3.0",
12
+ "com.anemll.chunk_no" : "2"
13
+ },
14
+ "availability" : {
15
+ "macOS" : "15.0",
16
+ "tvOS" : "18.0",
17
+ "visionOS" : "2.0",
18
+ "watchOS" : "11.0",
19
+ "iOS" : "18.0",
20
+ "macCatalyst" : "18.0"
21
+ },
22
+ "inputSchema" : [
23
+ {
24
+ "hasShapeFlexibility" : "0",
25
+ "isOptional" : "0",
26
+ "dataType" : "Float16",
27
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
28
+ "shortDescription" : "",
29
+ "shape" : "[1, 1, 2048]",
30
+ "name" : "hidden_states",
31
+ "type" : "MultiArray"
32
+ },
33
+ {
34
+ "hasShapeFlexibility" : "0",
35
+ "isOptional" : "0",
36
+ "dataType" : "Int32",
37
+ "formattedType" : "MultiArray (Int32 1)",
38
+ "shortDescription" : "",
39
+ "shape" : "[1]",
40
+ "name" : "position_ids",
41
+ "type" : "MultiArray"
42
+ },
43
+ {
44
+ "hasShapeFlexibility" : "0",
45
+ "isOptional" : "0",
46
+ "dataType" : "Float16",
47
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1 × 512)",
48
+ "shortDescription" : "",
49
+ "shape" : "[1, 1, 1, 512]",
50
+ "name" : "causal_mask",
51
+ "type" : "MultiArray"
52
+ },
53
+ {
54
+ "hasShapeFlexibility" : "0",
55
+ "isOptional" : "0",
56
+ "dataType" : "Int32",
57
+ "formattedType" : "MultiArray (Int32 1)",
58
+ "shortDescription" : "",
59
+ "shape" : "[1]",
60
+ "name" : "current_pos",
61
+ "type" : "MultiArray"
62
+ }
63
+ ],
64
+ "outputSchema" : [
65
+ {
66
+ "hasShapeFlexibility" : "0",
67
+ "isOptional" : "0",
68
+ "dataType" : "Float16",
69
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
70
+ "shortDescription" : "",
71
+ "shape" : "[1, 1, 2048]",
72
+ "name" : "output_hidden_states",
73
+ "type" : "MultiArray"
74
+ }
75
+ ],
76
+ "modelParameters" : [
77
+
78
+ ],
79
+ "storagePrecision" : "Float16",
80
+ "method" : "predict",
81
+ "functions" : [
82
+ {
83
+ "inputSchema" : [
84
+ {
85
+ "hasShapeFlexibility" : "0",
86
+ "isOptional" : "0",
87
+ "dataType" : "Float16",
88
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
89
+ "shortDescription" : "",
90
+ "shape" : "[1, 1, 2048]",
91
+ "name" : "hidden_states",
92
+ "type" : "MultiArray"
93
+ },
94
+ {
95
+ "hasShapeFlexibility" : "0",
96
+ "isOptional" : "0",
97
+ "dataType" : "Int32",
98
+ "formattedType" : "MultiArray (Int32 1)",
99
+ "shortDescription" : "",
100
+ "shape" : "[1]",
101
+ "name" : "position_ids",
102
+ "type" : "MultiArray"
103
+ },
104
+ {
105
+ "hasShapeFlexibility" : "0",
106
+ "isOptional" : "0",
107
+ "dataType" : "Float16",
108
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1 × 512)",
109
+ "shortDescription" : "",
110
+ "shape" : "[1, 1, 1, 512]",
111
+ "name" : "causal_mask",
112
+ "type" : "MultiArray"
113
+ },
114
+ {
115
+ "hasShapeFlexibility" : "0",
116
+ "isOptional" : "0",
117
+ "dataType" : "Int32",
118
+ "formattedType" : "MultiArray (Int32 1)",
119
+ "shortDescription" : "",
120
+ "shape" : "[1]",
121
+ "name" : "current_pos",
122
+ "type" : "MultiArray"
123
+ }
124
+ ],
125
+ "computePrecision" : "Mixed (Float16, Int32)",
126
+ "storagePrecision" : "Float16",
127
+ "stateSchema" : [
128
+ {
129
+ "dataType" : "Float16",
130
+ "isOptional" : "0",
131
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
132
+ "shortDescription" : "",
133
+ "shape" : "[32, 8, 512, 64]",
134
+ "name" : "model_model_kv_cache_0",
135
+ "type" : "State"
136
+ }
137
+ ],
138
+ "outputSchema" : [
139
+ {
140
+ "hasShapeFlexibility" : "0",
141
+ "isOptional" : "0",
142
+ "dataType" : "Float16",
143
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
144
+ "shortDescription" : "",
145
+ "shape" : "[1, 1, 2048]",
146
+ "name" : "output_hidden_states",
147
+ "type" : "MultiArray"
148
+ }
149
+ ],
150
+ "name" : "infer",
151
+ "mlProgramOperationTypeHistogram" : {
152
+ "Ios18.expandDims" : 32,
153
+ "Ios18.mul" : 80,
154
+ "Ios18.matmul" : 16,
155
+ "Identity" : 1,
156
+ "Ios16.reduceMean" : 17,
157
+ "Ios18.exp" : 8,
158
+ "Ios18.realDiv" : 8,
159
+ "Ios18.greaterEqual" : 1,
160
+ "Select" : 1,
161
+ "Ios18.readState" : 17,
162
+ "Tile" : 16,
163
+ "Ios18.gather" : 2,
164
+ "Ios18.add" : 42,
165
+ "Ios18.layerNorm" : 17,
166
+ "Ios18.sliceUpdate" : 16,
167
+ "Ios18.writeState" : 16,
168
+ "Ios18.reshape" : 50,
169
+ "Ios16.reduceMax" : 8,
170
+ "Ios16.reduceSum" : 8,
171
+ "Ios18.conv" : 48,
172
+ "Ios18.concat" : 48,
173
+ "Ios18.transpose" : 32,
174
+ "Ios18.sub" : 41,
175
+ "Ios18.linear" : 8,
176
+ "Ios18.silu" : 8,
177
+ "Ios18.sliceByIndex" : 50,
178
+ "Ios18.squeeze" : 24
179
+ }
180
+ },
181
+ {
182
+ "inputSchema" : [
183
+ {
184
+ "hasShapeFlexibility" : "0",
185
+ "isOptional" : "0",
186
+ "dataType" : "Float16",
187
+ "formattedType" : "MultiArray (Float16 1 × 64 × 2048)",
188
+ "shortDescription" : "",
189
+ "shape" : "[1, 64, 2048]",
190
+ "name" : "hidden_states",
191
+ "type" : "MultiArray"
192
+ },
193
+ {
194
+ "hasShapeFlexibility" : "0",
195
+ "isOptional" : "0",
196
+ "dataType" : "Int32",
197
+ "formattedType" : "MultiArray (Int32 64)",
198
+ "shortDescription" : "",
199
+ "shape" : "[64]",
200
+ "name" : "position_ids",
201
+ "type" : "MultiArray"
202
+ },
203
+ {
204
+ "hasShapeFlexibility" : "0",
205
+ "isOptional" : "0",
206
+ "dataType" : "Float16",
207
+ "formattedType" : "MultiArray (Float16 1 × 1 × 64 × 512)",
208
+ "shortDescription" : "",
209
+ "shape" : "[1, 1, 64, 512]",
210
+ "name" : "causal_mask",
211
+ "type" : "MultiArray"
212
+ },
213
+ {
214
+ "hasShapeFlexibility" : "0",
215
+ "isOptional" : "0",
216
+ "dataType" : "Int32",
217
+ "formattedType" : "MultiArray (Int32 1)",
218
+ "shortDescription" : "",
219
+ "shape" : "[1]",
220
+ "name" : "current_pos",
221
+ "type" : "MultiArray"
222
+ }
223
+ ],
224
+ "computePrecision" : "Mixed (Float16, Int32)",
225
+ "storagePrecision" : "Float16",
226
+ "stateSchema" : [
227
+ {
228
+ "dataType" : "Float16",
229
+ "isOptional" : "0",
230
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
231
+ "shortDescription" : "",
232
+ "shape" : "[32, 8, 512, 64]",
233
+ "name" : "model_model_kv_cache_0",
234
+ "type" : "State"
235
+ }
236
+ ],
237
+ "outputSchema" : [
238
+ {
239
+ "hasShapeFlexibility" : "0",
240
+ "isOptional" : "0",
241
+ "dataType" : "Float16",
242
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
243
+ "shortDescription" : "",
244
+ "shape" : "[1, 1, 2048]",
245
+ "name" : "output_hidden_states",
246
+ "type" : "MultiArray"
247
+ }
248
+ ],
249
+ "name" : "prefill",
250
+ "mlProgramOperationTypeHistogram" : {
251
+ "Ios18.expandDims" : 31,
252
+ "Ios18.mul" : 79,
253
+ "Ios18.matmul" : 16,
254
+ "Ios16.reduceMean" : 15,
255
+ "Ios18.exp" : 8,
256
+ "Ios18.realDiv" : 8,
257
+ "Ios18.greaterEqual" : 1,
258
+ "Select" : 1,
259
+ "Ios18.readState" : 17,
260
+ "Tile" : 16,
261
+ "Ios18.gather" : 2,
262
+ "Ios18.add" : 41,
263
+ "Ios18.layerNorm" : 15,
264
+ "Ios18.sliceUpdate" : 16,
265
+ "Ios18.writeState" : 16,
266
+ "Ios18.reshape" : 66,
267
+ "Ios16.reduceMax" : 8,
268
+ "Ios16.reduceSum" : 8,
269
+ "Ios18.conv" : 45,
270
+ "Ios18.concat" : 48,
271
+ "Ios18.transpose" : 56,
272
+ "Ios18.sub" : 39,
273
+ "Ios18.linear" : 8,
274
+ "Ios18.silu" : 7,
275
+ "Ios18.sliceByIndex" : 51,
276
+ "Ios18.squeeze" : 23
277
+ }
278
+ }
279
+ ],
280
+ "version" : "0.3.0",
281
+ "isUpdatable" : "0",
282
+ "defaultFunctionName" : "infer",
283
+ "specificationVersion" : 9,
284
+ "stateSchema" : [
285
+ {
286
+ "dataType" : "Float16",
287
+ "isOptional" : "0",
288
+ "formattedType" : "State (Float16 32 × 8 × 512 × 64)",
289
+ "shortDescription" : "",
290
+ "shape" : "[32, 8, 512, 64]",
291
+ "name" : "model_model_kv_cache_0",
292
+ "type" : "State"
293
+ }
294
+ ],
295
+ "computePrecision" : "Mixed (Float16, Int32)",
296
+ "mlProgramOperationTypeHistogram" : {
297
+ "Ios18.expandDims" : 32,
298
+ "Ios18.mul" : 80,
299
+ "Ios18.matmul" : 16,
300
+ "Identity" : 1,
301
+ "Ios16.reduceMean" : 17,
302
+ "Ios18.exp" : 8,
303
+ "Ios18.realDiv" : 8,
304
+ "Ios18.greaterEqual" : 1,
305
+ "Select" : 1,
306
+ "Ios18.readState" : 17,
307
+ "Tile" : 16,
308
+ "Ios18.gather" : 2,
309
+ "Ios18.add" : 42,
310
+ "Ios18.layerNorm" : 17,
311
+ "Ios18.sliceUpdate" : 16,
312
+ "Ios18.writeState" : 16,
313
+ "Ios18.reshape" : 50,
314
+ "Ios16.reduceMax" : 8,
315
+ "Ios16.reduceSum" : 8,
316
+ "Ios18.conv" : 48,
317
+ "Ios18.concat" : 48,
318
+ "Ios18.transpose" : 32,
319
+ "Ios18.sub" : 41,
320
+ "Ios18.linear" : 8,
321
+ "Ios18.silu" : 8,
322
+ "Ios18.sliceByIndex" : 50,
323
+ "Ios18.squeeze" : 24
324
+ },
325
+ "shortDescription" : "Anemll Model: Multifunction FFN+Prefill",
326
+ "generatedClassName" : "llama_FFN_PF_chunk_02of02",
327
+ "author" : "Converted with Anemll v0.3.0",
328
+ "modelType" : {
329
+ "name" : "MLModelType_mlProgram"
330
+ }
331
+ }
332
+ ]
llama_FFN_PF_chunk_02of02.mlmodelc/model.mil ADDED
The diff for this file is too large to render. See raw diff
 
llama_FFN_PF_chunk_02of02.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ecd8a42e94f71bfe30e803d8e1db739dcf3c1325ae82dab88c20f62b3dedc14
3
+ size 1006711616
llama_embeddings.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a584b4103b6847031bf2b23d8a9947db58bffe4ba6a865290c8ef3d33a3ecff
3
+ size 243
llama_embeddings.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfb59657e6a7d770d6e215cd18b587be3083bac8f313bd2560d6e09f3779cb02
3
+ size 498
llama_embeddings.mlmodelc/metadata.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Anemll Model (Embeddings) converted to CoreML",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "hidden_states",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "version" : "0.3.0",
18
+ "modelParameters" : [
19
+
20
+ ],
21
+ "author" : "Converted with Anemll v0.3.0",
22
+ "specificationVersion" : 9,
23
+ "storagePrecision" : "Float16",
24
+ "mlProgramOperationTypeHistogram" : {
25
+ "Ios18.gather" : 1
26
+ },
27
+ "computePrecision" : "Mixed (Float16, Int32)",
28
+ "stateSchema" : [
29
+
30
+ ],
31
+ "isUpdatable" : "0",
32
+ "availability" : {
33
+ "macOS" : "15.0",
34
+ "tvOS" : "18.0",
35
+ "visionOS" : "2.0",
36
+ "watchOS" : "11.0",
37
+ "iOS" : "18.0",
38
+ "macCatalyst" : "18.0"
39
+ },
40
+ "modelType" : {
41
+ "name" : "MLModelType_mlProgram"
42
+ },
43
+ "inputSchema" : [
44
+ {
45
+ "shortDescription" : "",
46
+ "dataType" : "Int32",
47
+ "hasShapeFlexibility" : "1",
48
+ "isOptional" : "0",
49
+ "shapeFlexibility" : "1 × 1 | 1 × 64",
50
+ "formattedType" : "MultiArray (Int32 1 × 1)",
51
+ "type" : "MultiArray",
52
+ "shape" : "[1, 1]",
53
+ "name" : "input_ids",
54
+ "enumeratedShapes" : "[[1, 1], [1, 64]]"
55
+ }
56
+ ],
57
+ "userDefinedMetadata" : {
58
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
59
+ "com.github.apple.coremltools.version" : "8.2",
60
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
61
+ "com.anemll.info" : "Converted with Anemll v0.3.0",
62
+ "com.anemll.context_length" : "512"
63
+ },
64
+ "generatedClassName" : "llama_embeddings",
65
+ "method" : "predict"
66
+ }
67
+ ]
llama_embeddings.mlmodelc/model.mil ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3404.16.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
3
+ {
4
+ func main<ios18>(tensor<int32, [1, ?]> input_ids) [FlexibleShapeInformation = tuple<tuple<string, dict<string, tensor<int32, [?]>>>, tuple<string, dict<string, dict<string, tensor<int32, [?]>>>>>((("DefaultShapes", {{"input_ids", [1, 1]}}), ("EnumeratedShapes", {{"79ae981e", {{"input_ids", [1, 1]}}}, {"ed9b58c8", {{"input_ids", [1, 64]}}}})))] {
5
+ int32 hidden_states_axis_0 = const()[name = string("hidden_states_axis_0"), val = int32(0)];
6
+ int32 hidden_states_batch_dims_0 = const()[name = string("hidden_states_batch_dims_0"), val = int32(0)];
7
+ bool hidden_states_validate_indices_0 = const()[name = string("hidden_states_validate_indices_0"), val = bool(false)];
8
+ tensor<fp16, [128256, 2048]> embed_tokens_weight_to_fp16 = const()[name = string("embed_tokens_weight_to_fp16"), val = tensor<fp16, [128256, 2048]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
9
+ tensor<fp16, [1, ?, 2048]> hidden_states = gather(axis = hidden_states_axis_0, batch_dims = hidden_states_batch_dims_0, indices = input_ids, validate_indices = hidden_states_validate_indices_0, x = embed_tokens_weight_to_fp16)[name = string("hidden_states_cast_fp16")];
10
+ } -> (hidden_states);
11
+ }
llama_embeddings.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f76c5cbd933c0ee67f251d2278431346670fa90b5891d58ffd859af8e8003e
3
+ size 525336704
llama_lm_head.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa2e8e924a2534f234a920ffe4f9afc1629c3ff6c190980405a0bdf2884c4759
3
+ size 243
llama_lm_head.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e822b459108468fb68f202bad1a864bf524496ad4dabf0f097da5a3d4ef43edd
3
+ size 661
llama_lm_head.mlmodelc/metadata.json ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Anemll Model (LM Head) converted to CoreML",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 16032]",
13
+ "name" : "logits1",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 1, 16032]",
23
+ "name" : "logits2",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 1, 16032]",
33
+ "name" : "logits3",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1, 16032]",
43
+ "name" : "logits4",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 1, 16032]",
53
+ "name" : "logits5",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 1, 16032]",
63
+ "name" : "logits6",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 1, 16032]",
73
+ "name" : "logits7",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 1 × 16032)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 1, 16032]",
83
+ "name" : "logits8",
84
+ "type" : "MultiArray"
85
+ }
86
+ ],
87
+ "version" : "0.3.0",
88
+ "modelParameters" : [
89
+
90
+ ],
91
+ "author" : "Converted with Anemll v0.3.0",
92
+ "specificationVersion" : 9,
93
+ "storagePrecision" : "Float16",
94
+ "mlProgramOperationTypeHistogram" : {
95
+ "Ios18.transpose" : 9,
96
+ "Ios18.expandDims" : 1,
97
+ "Ios18.conv" : 8,
98
+ "Ios18.squeeze" : 8
99
+ },
100
+ "computePrecision" : "Mixed (Float16, Int32)",
101
+ "stateSchema" : [
102
+
103
+ ],
104
+ "isUpdatable" : "0",
105
+ "availability" : {
106
+ "macOS" : "15.0",
107
+ "tvOS" : "18.0",
108
+ "visionOS" : "2.0",
109
+ "watchOS" : "11.0",
110
+ "iOS" : "18.0",
111
+ "macCatalyst" : "18.0"
112
+ },
113
+ "modelType" : {
114
+ "name" : "MLModelType_mlProgram"
115
+ },
116
+ "inputSchema" : [
117
+ {
118
+ "hasShapeFlexibility" : "0",
119
+ "isOptional" : "0",
120
+ "dataType" : "Float16",
121
+ "formattedType" : "MultiArray (Float16 1 × 1 × 2048)",
122
+ "shortDescription" : "",
123
+ "shape" : "[1, 1, 2048]",
124
+ "name" : "hidden_states",
125
+ "type" : "MultiArray"
126
+ }
127
+ ],
128
+ "userDefinedMetadata" : {
129
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
130
+ "com.github.apple.coremltools.version" : "8.2",
131
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
132
+ "com.anemll.info" : "Converted with Anemll v0.3.0",
133
+ "com.anemll.context_length" : "512"
134
+ },
135
+ "generatedClassName" : "llama_lm_head",
136
+ "method" : "predict"
137
+ }
138
+ ]
llama_lm_head.mlmodelc/model.mil ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3404.16.1"}, {"coremlc-version", "3404.23.1"}, {"coremltools-component-torch", "2.5.0"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.2"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [1, 1, 2048]> hidden_states) {
5
+ tensor<int32, [3]> var_5 = const()[name = string("op_5"), val = tensor<int32, [3]>([0, 2, 1])];
6
+ tensor<int32, [1]> input_axes_0 = const()[name = string("input_axes_0"), val = tensor<int32, [1]>([2])];
7
+ tensor<fp16, [1, 2048, 1]> var_6_cast_fp16 = transpose(perm = var_5, x = hidden_states)[name = string("transpose_8")];
8
+ tensor<fp16, [1, 2048, 1, 1]> input_cast_fp16 = expand_dims(axes = input_axes_0, x = var_6_cast_fp16)[name = string("input_cast_fp16")];
9
+ string var_29_pad_type_0 = const()[name = string("op_29_pad_type_0"), val = string("valid")];
10
+ tensor<int32, [2]> var_29_strides_0 = const()[name = string("op_29_strides_0"), val = tensor<int32, [2]>([1, 1])];
11
+ tensor<int32, [4]> var_29_pad_0 = const()[name = string("op_29_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
12
+ tensor<int32, [2]> var_29_dilations_0 = const()[name = string("op_29_dilations_0"), val = tensor<int32, [2]>([1, 1])];
13
+ int32 var_29_groups_0 = const()[name = string("op_29_groups_0"), val = int32(1)];
14
+ tensor<fp16, [16032, 2048, 1, 1]> var_9_promoted_to_fp16 = const()[name = string("op_9_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64)))];
15
+ tensor<fp16, [1, 16032, 1, 1]> var_29_cast_fp16 = conv(dilations = var_29_dilations_0, groups = var_29_groups_0, pad = var_29_pad_0, pad_type = var_29_pad_type_0, strides = var_29_strides_0, weight = var_9_promoted_to_fp16, x = input_cast_fp16)[name = string("op_29_cast_fp16")];
16
+ tensor<int32, [1]> var_31_axes_0 = const()[name = string("op_31_axes_0"), val = tensor<int32, [1]>([2])];
17
+ tensor<fp16, [1, 16032, 1]> var_31_cast_fp16 = squeeze(axes = var_31_axes_0, x = var_29_cast_fp16)[name = string("op_31_cast_fp16")];
18
+ tensor<int32, [3]> var_34_perm_0 = const()[name = string("op_34_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
19
+ string var_55_pad_type_0 = const()[name = string("op_55_pad_type_0"), val = string("valid")];
20
+ tensor<int32, [2]> var_55_strides_0 = const()[name = string("op_55_strides_0"), val = tensor<int32, [2]>([1, 1])];
21
+ tensor<int32, [4]> var_55_pad_0 = const()[name = string("op_55_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
22
+ tensor<int32, [2]> var_55_dilations_0 = const()[name = string("op_55_dilations_0"), val = tensor<int32, [2]>([1, 1])];
23
+ int32 var_55_groups_0 = const()[name = string("op_55_groups_0"), val = int32(1)];
24
+ tensor<fp16, [16032, 2048, 1, 1]> var_35_promoted_to_fp16 = const()[name = string("op_35_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(65667200)))];
25
+ tensor<fp16, [1, 16032, 1, 1]> var_55_cast_fp16 = conv(dilations = var_55_dilations_0, groups = var_55_groups_0, pad = var_55_pad_0, pad_type = var_55_pad_type_0, strides = var_55_strides_0, weight = var_35_promoted_to_fp16, x = input_cast_fp16)[name = string("op_55_cast_fp16")];
26
+ tensor<int32, [1]> var_57_axes_0 = const()[name = string("op_57_axes_0"), val = tensor<int32, [1]>([2])];
27
+ tensor<fp16, [1, 16032, 1]> var_57_cast_fp16 = squeeze(axes = var_57_axes_0, x = var_55_cast_fp16)[name = string("op_57_cast_fp16")];
28
+ tensor<int32, [3]> var_60_perm_0 = const()[name = string("op_60_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
29
+ string var_81_pad_type_0 = const()[name = string("op_81_pad_type_0"), val = string("valid")];
30
+ tensor<int32, [2]> var_81_strides_0 = const()[name = string("op_81_strides_0"), val = tensor<int32, [2]>([1, 1])];
31
+ tensor<int32, [4]> var_81_pad_0 = const()[name = string("op_81_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
32
+ tensor<int32, [2]> var_81_dilations_0 = const()[name = string("op_81_dilations_0"), val = tensor<int32, [2]>([1, 1])];
33
+ int32 var_81_groups_0 = const()[name = string("op_81_groups_0"), val = int32(1)];
34
+ tensor<fp16, [16032, 2048, 1, 1]> var_61_promoted_to_fp16 = const()[name = string("op_61_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(131334336)))];
35
+ tensor<fp16, [1, 16032, 1, 1]> var_81_cast_fp16 = conv(dilations = var_81_dilations_0, groups = var_81_groups_0, pad = var_81_pad_0, pad_type = var_81_pad_type_0, strides = var_81_strides_0, weight = var_61_promoted_to_fp16, x = input_cast_fp16)[name = string("op_81_cast_fp16")];
36
+ tensor<int32, [1]> var_83_axes_0 = const()[name = string("op_83_axes_0"), val = tensor<int32, [1]>([2])];
37
+ tensor<fp16, [1, 16032, 1]> var_83_cast_fp16 = squeeze(axes = var_83_axes_0, x = var_81_cast_fp16)[name = string("op_83_cast_fp16")];
38
+ tensor<int32, [3]> var_86_perm_0 = const()[name = string("op_86_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
39
+ string var_107_pad_type_0 = const()[name = string("op_107_pad_type_0"), val = string("valid")];
40
+ tensor<int32, [2]> var_107_strides_0 = const()[name = string("op_107_strides_0"), val = tensor<int32, [2]>([1, 1])];
41
+ tensor<int32, [4]> var_107_pad_0 = const()[name = string("op_107_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
42
+ tensor<int32, [2]> var_107_dilations_0 = const()[name = string("op_107_dilations_0"), val = tensor<int32, [2]>([1, 1])];
43
+ int32 var_107_groups_0 = const()[name = string("op_107_groups_0"), val = int32(1)];
44
+ tensor<fp16, [16032, 2048, 1, 1]> var_87_promoted_to_fp16 = const()[name = string("op_87_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(197001472)))];
45
+ tensor<fp16, [1, 16032, 1, 1]> var_107_cast_fp16 = conv(dilations = var_107_dilations_0, groups = var_107_groups_0, pad = var_107_pad_0, pad_type = var_107_pad_type_0, strides = var_107_strides_0, weight = var_87_promoted_to_fp16, x = input_cast_fp16)[name = string("op_107_cast_fp16")];
46
+ tensor<int32, [1]> var_109_axes_0 = const()[name = string("op_109_axes_0"), val = tensor<int32, [1]>([2])];
47
+ tensor<fp16, [1, 16032, 1]> var_109_cast_fp16 = squeeze(axes = var_109_axes_0, x = var_107_cast_fp16)[name = string("op_109_cast_fp16")];
48
+ tensor<int32, [3]> var_112_perm_0 = const()[name = string("op_112_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
49
+ string var_133_pad_type_0 = const()[name = string("op_133_pad_type_0"), val = string("valid")];
50
+ tensor<int32, [2]> var_133_strides_0 = const()[name = string("op_133_strides_0"), val = tensor<int32, [2]>([1, 1])];
51
+ tensor<int32, [4]> var_133_pad_0 = const()[name = string("op_133_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
52
+ tensor<int32, [2]> var_133_dilations_0 = const()[name = string("op_133_dilations_0"), val = tensor<int32, [2]>([1, 1])];
53
+ int32 var_133_groups_0 = const()[name = string("op_133_groups_0"), val = int32(1)];
54
+ tensor<fp16, [16032, 2048, 1, 1]> var_113_promoted_to_fp16 = const()[name = string("op_113_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(262668608)))];
55
+ tensor<fp16, [1, 16032, 1, 1]> var_133_cast_fp16 = conv(dilations = var_133_dilations_0, groups = var_133_groups_0, pad = var_133_pad_0, pad_type = var_133_pad_type_0, strides = var_133_strides_0, weight = var_113_promoted_to_fp16, x = input_cast_fp16)[name = string("op_133_cast_fp16")];
56
+ tensor<int32, [1]> var_135_axes_0 = const()[name = string("op_135_axes_0"), val = tensor<int32, [1]>([2])];
57
+ tensor<fp16, [1, 16032, 1]> var_135_cast_fp16 = squeeze(axes = var_135_axes_0, x = var_133_cast_fp16)[name = string("op_135_cast_fp16")];
58
+ tensor<int32, [3]> var_138_perm_0 = const()[name = string("op_138_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
59
+ string var_159_pad_type_0 = const()[name = string("op_159_pad_type_0"), val = string("valid")];
60
+ tensor<int32, [2]> var_159_strides_0 = const()[name = string("op_159_strides_0"), val = tensor<int32, [2]>([1, 1])];
61
+ tensor<int32, [4]> var_159_pad_0 = const()[name = string("op_159_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
62
+ tensor<int32, [2]> var_159_dilations_0 = const()[name = string("op_159_dilations_0"), val = tensor<int32, [2]>([1, 1])];
63
+ int32 var_159_groups_0 = const()[name = string("op_159_groups_0"), val = int32(1)];
64
+ tensor<fp16, [16032, 2048, 1, 1]> var_139_promoted_to_fp16 = const()[name = string("op_139_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(328335744)))];
65
+ tensor<fp16, [1, 16032, 1, 1]> var_159_cast_fp16 = conv(dilations = var_159_dilations_0, groups = var_159_groups_0, pad = var_159_pad_0, pad_type = var_159_pad_type_0, strides = var_159_strides_0, weight = var_139_promoted_to_fp16, x = input_cast_fp16)[name = string("op_159_cast_fp16")];
66
+ tensor<int32, [1]> var_161_axes_0 = const()[name = string("op_161_axes_0"), val = tensor<int32, [1]>([2])];
67
+ tensor<fp16, [1, 16032, 1]> var_161_cast_fp16 = squeeze(axes = var_161_axes_0, x = var_159_cast_fp16)[name = string("op_161_cast_fp16")];
68
+ tensor<int32, [3]> var_164_perm_0 = const()[name = string("op_164_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
69
+ string var_185_pad_type_0 = const()[name = string("op_185_pad_type_0"), val = string("valid")];
70
+ tensor<int32, [2]> var_185_strides_0 = const()[name = string("op_185_strides_0"), val = tensor<int32, [2]>([1, 1])];
71
+ tensor<int32, [4]> var_185_pad_0 = const()[name = string("op_185_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
72
+ tensor<int32, [2]> var_185_dilations_0 = const()[name = string("op_185_dilations_0"), val = tensor<int32, [2]>([1, 1])];
73
+ int32 var_185_groups_0 = const()[name = string("op_185_groups_0"), val = int32(1)];
74
+ tensor<fp16, [16032, 2048, 1, 1]> var_165_promoted_to_fp16 = const()[name = string("op_165_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(394002880)))];
75
+ tensor<fp16, [1, 16032, 1, 1]> var_185_cast_fp16 = conv(dilations = var_185_dilations_0, groups = var_185_groups_0, pad = var_185_pad_0, pad_type = var_185_pad_type_0, strides = var_185_strides_0, weight = var_165_promoted_to_fp16, x = input_cast_fp16)[name = string("op_185_cast_fp16")];
76
+ tensor<int32, [1]> var_187_axes_0 = const()[name = string("op_187_axes_0"), val = tensor<int32, [1]>([2])];
77
+ tensor<fp16, [1, 16032, 1]> var_187_cast_fp16 = squeeze(axes = var_187_axes_0, x = var_185_cast_fp16)[name = string("op_187_cast_fp16")];
78
+ tensor<int32, [3]> var_190_perm_0 = const()[name = string("op_190_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
79
+ string var_211_pad_type_0 = const()[name = string("op_211_pad_type_0"), val = string("valid")];
80
+ tensor<int32, [2]> var_211_strides_0 = const()[name = string("op_211_strides_0"), val = tensor<int32, [2]>([1, 1])];
81
+ tensor<int32, [4]> var_211_pad_0 = const()[name = string("op_211_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
82
+ tensor<int32, [2]> var_211_dilations_0 = const()[name = string("op_211_dilations_0"), val = tensor<int32, [2]>([1, 1])];
83
+ int32 var_211_groups_0 = const()[name = string("op_211_groups_0"), val = int32(1)];
84
+ tensor<fp16, [16032, 2048, 1, 1]> var_191_promoted_to_fp16 = const()[name = string("op_191_promoted_to_fp16"), val = tensor<fp16, [16032, 2048, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(459670016)))];
85
+ tensor<fp16, [1, 16032, 1, 1]> var_211_cast_fp16 = conv(dilations = var_211_dilations_0, groups = var_211_groups_0, pad = var_211_pad_0, pad_type = var_211_pad_type_0, strides = var_211_strides_0, weight = var_191_promoted_to_fp16, x = input_cast_fp16)[name = string("op_211_cast_fp16")];
86
+ tensor<int32, [1]> var_213_axes_0 = const()[name = string("op_213_axes_0"), val = tensor<int32, [1]>([2])];
87
+ tensor<fp16, [1, 16032, 1]> var_213_cast_fp16 = squeeze(axes = var_213_axes_0, x = var_211_cast_fp16)[name = string("op_213_cast_fp16")];
88
+ tensor<int32, [3]> var_216_perm_0 = const()[name = string("op_216_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
89
+ tensor<fp16, [1, 1, 16032]> logits8 = transpose(perm = var_216_perm_0, x = var_213_cast_fp16)[name = string("transpose_0")];
90
+ tensor<fp16, [1, 1, 16032]> logits7 = transpose(perm = var_190_perm_0, x = var_187_cast_fp16)[name = string("transpose_1")];
91
+ tensor<fp16, [1, 1, 16032]> logits6 = transpose(perm = var_164_perm_0, x = var_161_cast_fp16)[name = string("transpose_2")];
92
+ tensor<fp16, [1, 1, 16032]> logits5 = transpose(perm = var_138_perm_0, x = var_135_cast_fp16)[name = string("transpose_3")];
93
+ tensor<fp16, [1, 1, 16032]> logits4 = transpose(perm = var_112_perm_0, x = var_109_cast_fp16)[name = string("transpose_4")];
94
+ tensor<fp16, [1, 1, 16032]> logits3 = transpose(perm = var_86_perm_0, x = var_83_cast_fp16)[name = string("transpose_5")];
95
+ tensor<fp16, [1, 1, 16032]> logits2 = transpose(perm = var_60_perm_0, x = var_57_cast_fp16)[name = string("transpose_6")];
96
+ tensor<fp16, [1, 1, 16032]> logits1 = transpose(perm = var_34_perm_0, x = var_31_cast_fp16)[name = string("transpose_7")];
97
+ } -> (logits1, logits2, logits3, logits4, logits5, logits6, logits7, logits8);
98
+ }
llama_lm_head.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa8e709ced9796bd998ebb1f358ebb9d865b85c921b757639b0f43bc5a59fc7a
3
+ size 525337152
meta.yaml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_info:
2
+ name: anemll-Meta-Llama-3.2-1B-ctx512
3
+ version: 0.3.0
4
+ description: |
5
+ Demonstarates running Meta-Llama-3.2-1B on Apple Neural Engine
6
+ Context length: 512
7
+ Batch size: 64
8
+ Chunks: 2
9
+ license: MIT
10
+ author: Anemll
11
+ framework: Core ML
12
+ language: Python
13
+ parameters:
14
+ context_length: 512
15
+ batch_size: 64
16
+ lut_embeddings: none
17
+ lut_ffn: none
18
+ lut_lmhead: none
19
+ num_chunks: 2
20
+ model_prefix: llama
21
+ embeddings: llama_embeddings.mlmodelc
22
+ lm_head: llama_lm_head.mlmodelc
23
+ ffn: llama_FFN_PF.mlmodelc
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,2062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "128000": {
4
+ "content": "<|begin_of_text|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "128001": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "128002": {
20
+ "content": "<|reserved_special_token_0|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "128003": {
28
+ "content": "<|reserved_special_token_1|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128004": {
36
+ "content": "<|finetune_right_pad_id|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "128005": {
44
+ "content": "<|reserved_special_token_2|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "128006": {
52
+ "content": "<|start_header_id|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "128007": {
60
+ "content": "<|end_header_id|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "128008": {
68
+ "content": "<|eom_id|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "128009": {
76
+ "content": "<|eot_id|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "128010": {
84
+ "content": "<|python_tag|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "128011": {
92
+ "content": "<|reserved_special_token_3|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "128012": {
100
+ "content": "<|reserved_special_token_4|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "128013": {
108
+ "content": "<|reserved_special_token_5|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "128014": {
116
+ "content": "<|reserved_special_token_6|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "128015": {
124
+ "content": "<|reserved_special_token_7|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "128016": {
132
+ "content": "<|reserved_special_token_8|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "128017": {
140
+ "content": "<|reserved_special_token_9|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "128018": {
148
+ "content": "<|reserved_special_token_10|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "128019": {
156
+ "content": "<|reserved_special_token_11|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "128020": {
164
+ "content": "<|reserved_special_token_12|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "128021": {
172
+ "content": "<|reserved_special_token_13|>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "128022": {
180
+ "content": "<|reserved_special_token_14|>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "128023": {
188
+ "content": "<|reserved_special_token_15|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "128024": {
196
+ "content": "<|reserved_special_token_16|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "128025": {
204
+ "content": "<|reserved_special_token_17|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "128026": {
212
+ "content": "<|reserved_special_token_18|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "128027": {
220
+ "content": "<|reserved_special_token_19|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "128028": {
228
+ "content": "<|reserved_special_token_20|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "128029": {
236
+ "content": "<|reserved_special_token_21|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "128030": {
244
+ "content": "<|reserved_special_token_22|>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "128031": {
252
+ "content": "<|reserved_special_token_23|>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "128032": {
260
+ "content": "<|reserved_special_token_24|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "128033": {
268
+ "content": "<|reserved_special_token_25|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "128034": {
276
+ "content": "<|reserved_special_token_26|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "128035": {
284
+ "content": "<|reserved_special_token_27|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "128036": {
292
+ "content": "<|reserved_special_token_28|>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "128037": {
300
+ "content": "<|reserved_special_token_29|>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "128038": {
308
+ "content": "<|reserved_special_token_30|>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "128039": {
316
+ "content": "<|reserved_special_token_31|>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "128040": {
324
+ "content": "<|reserved_special_token_32|>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "128041": {
332
+ "content": "<|reserved_special_token_33|>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "128042": {
340
+ "content": "<|reserved_special_token_34|>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "128043": {
348
+ "content": "<|reserved_special_token_35|>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "128044": {
356
+ "content": "<|reserved_special_token_36|>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "128045": {
364
+ "content": "<|reserved_special_token_37|>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "128046": {
372
+ "content": "<|reserved_special_token_38|>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "128047": {
380
+ "content": "<|reserved_special_token_39|>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "128048": {
388
+ "content": "<|reserved_special_token_40|>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "128049": {
396
+ "content": "<|reserved_special_token_41|>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "128050": {
404
+ "content": "<|reserved_special_token_42|>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "128051": {
412
+ "content": "<|reserved_special_token_43|>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "128052": {
420
+ "content": "<|reserved_special_token_44|>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "128053": {
428
+ "content": "<|reserved_special_token_45|>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "128054": {
436
+ "content": "<|reserved_special_token_46|>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "128055": {
444
+ "content": "<|reserved_special_token_47|>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "128056": {
452
+ "content": "<|reserved_special_token_48|>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "128057": {
460
+ "content": "<|reserved_special_token_49|>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "128058": {
468
+ "content": "<|reserved_special_token_50|>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "128059": {
476
+ "content": "<|reserved_special_token_51|>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "128060": {
484
+ "content": "<|reserved_special_token_52|>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "128061": {
492
+ "content": "<|reserved_special_token_53|>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "128062": {
500
+ "content": "<|reserved_special_token_54|>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "128063": {
508
+ "content": "<|reserved_special_token_55|>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "128064": {
516
+ "content": "<|reserved_special_token_56|>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "128065": {
524
+ "content": "<|reserved_special_token_57|>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "128066": {
532
+ "content": "<|reserved_special_token_58|>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "128067": {
540
+ "content": "<|reserved_special_token_59|>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "128068": {
548
+ "content": "<|reserved_special_token_60|>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "128069": {
556
+ "content": "<|reserved_special_token_61|>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "128070": {
564
+ "content": "<|reserved_special_token_62|>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "128071": {
572
+ "content": "<|reserved_special_token_63|>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "128072": {
580
+ "content": "<|reserved_special_token_64|>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "128073": {
588
+ "content": "<|reserved_special_token_65|>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "128074": {
596
+ "content": "<|reserved_special_token_66|>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "128075": {
604
+ "content": "<|reserved_special_token_67|>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "128076": {
612
+ "content": "<|reserved_special_token_68|>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "128077": {
620
+ "content": "<|reserved_special_token_69|>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "128078": {
628
+ "content": "<|reserved_special_token_70|>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "128079": {
636
+ "content": "<|reserved_special_token_71|>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "128080": {
644
+ "content": "<|reserved_special_token_72|>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "128081": {
652
+ "content": "<|reserved_special_token_73|>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "128082": {
660
+ "content": "<|reserved_special_token_74|>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "128083": {
668
+ "content": "<|reserved_special_token_75|>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "128084": {
676
+ "content": "<|reserved_special_token_76|>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "128085": {
684
+ "content": "<|reserved_special_token_77|>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "128086": {
692
+ "content": "<|reserved_special_token_78|>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "128087": {
700
+ "content": "<|reserved_special_token_79|>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "128088": {
708
+ "content": "<|reserved_special_token_80|>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "128089": {
716
+ "content": "<|reserved_special_token_81|>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "128090": {
724
+ "content": "<|reserved_special_token_82|>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "128091": {
732
+ "content": "<|reserved_special_token_83|>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "128092": {
740
+ "content": "<|reserved_special_token_84|>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "128093": {
748
+ "content": "<|reserved_special_token_85|>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "128094": {
756
+ "content": "<|reserved_special_token_86|>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "128095": {
764
+ "content": "<|reserved_special_token_87|>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "128096": {
772
+ "content": "<|reserved_special_token_88|>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "128097": {
780
+ "content": "<|reserved_special_token_89|>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "128098": {
788
+ "content": "<|reserved_special_token_90|>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "128099": {
796
+ "content": "<|reserved_special_token_91|>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "128100": {
804
+ "content": "<|reserved_special_token_92|>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "128101": {
812
+ "content": "<|reserved_special_token_93|>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "128102": {
820
+ "content": "<|reserved_special_token_94|>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ },
827
+ "128103": {
828
+ "content": "<|reserved_special_token_95|>",
829
+ "lstrip": false,
830
+ "normalized": false,
831
+ "rstrip": false,
832
+ "single_word": false,
833
+ "special": true
834
+ },
835
+ "128104": {
836
+ "content": "<|reserved_special_token_96|>",
837
+ "lstrip": false,
838
+ "normalized": false,
839
+ "rstrip": false,
840
+ "single_word": false,
841
+ "special": true
842
+ },
843
+ "128105": {
844
+ "content": "<|reserved_special_token_97|>",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false,
849
+ "special": true
850
+ },
851
+ "128106": {
852
+ "content": "<|reserved_special_token_98|>",
853
+ "lstrip": false,
854
+ "normalized": false,
855
+ "rstrip": false,
856
+ "single_word": false,
857
+ "special": true
858
+ },
859
+ "128107": {
860
+ "content": "<|reserved_special_token_99|>",
861
+ "lstrip": false,
862
+ "normalized": false,
863
+ "rstrip": false,
864
+ "single_word": false,
865
+ "special": true
866
+ },
867
+ "128108": {
868
+ "content": "<|reserved_special_token_100|>",
869
+ "lstrip": false,
870
+ "normalized": false,
871
+ "rstrip": false,
872
+ "single_word": false,
873
+ "special": true
874
+ },
875
+ "128109": {
876
+ "content": "<|reserved_special_token_101|>",
877
+ "lstrip": false,
878
+ "normalized": false,
879
+ "rstrip": false,
880
+ "single_word": false,
881
+ "special": true
882
+ },
883
+ "128110": {
884
+ "content": "<|reserved_special_token_102|>",
885
+ "lstrip": false,
886
+ "normalized": false,
887
+ "rstrip": false,
888
+ "single_word": false,
889
+ "special": true
890
+ },
891
+ "128111": {
892
+ "content": "<|reserved_special_token_103|>",
893
+ "lstrip": false,
894
+ "normalized": false,
895
+ "rstrip": false,
896
+ "single_word": false,
897
+ "special": true
898
+ },
899
+ "128112": {
900
+ "content": "<|reserved_special_token_104|>",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false,
905
+ "special": true
906
+ },
907
+ "128113": {
908
+ "content": "<|reserved_special_token_105|>",
909
+ "lstrip": false,
910
+ "normalized": false,
911
+ "rstrip": false,
912
+ "single_word": false,
913
+ "special": true
914
+ },
915
+ "128114": {
916
+ "content": "<|reserved_special_token_106|>",
917
+ "lstrip": false,
918
+ "normalized": false,
919
+ "rstrip": false,
920
+ "single_word": false,
921
+ "special": true
922
+ },
923
+ "128115": {
924
+ "content": "<|reserved_special_token_107|>",
925
+ "lstrip": false,
926
+ "normalized": false,
927
+ "rstrip": false,
928
+ "single_word": false,
929
+ "special": true
930
+ },
931
+ "128116": {
932
+ "content": "<|reserved_special_token_108|>",
933
+ "lstrip": false,
934
+ "normalized": false,
935
+ "rstrip": false,
936
+ "single_word": false,
937
+ "special": true
938
+ },
939
+ "128117": {
940
+ "content": "<|reserved_special_token_109|>",
941
+ "lstrip": false,
942
+ "normalized": false,
943
+ "rstrip": false,
944
+ "single_word": false,
945
+ "special": true
946
+ },
947
+ "128118": {
948
+ "content": "<|reserved_special_token_110|>",
949
+ "lstrip": false,
950
+ "normalized": false,
951
+ "rstrip": false,
952
+ "single_word": false,
953
+ "special": true
954
+ },
955
+ "128119": {
956
+ "content": "<|reserved_special_token_111|>",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false,
961
+ "special": true
962
+ },
963
+ "128120": {
964
+ "content": "<|reserved_special_token_112|>",
965
+ "lstrip": false,
966
+ "normalized": false,
967
+ "rstrip": false,
968
+ "single_word": false,
969
+ "special": true
970
+ },
971
+ "128121": {
972
+ "content": "<|reserved_special_token_113|>",
973
+ "lstrip": false,
974
+ "normalized": false,
975
+ "rstrip": false,
976
+ "single_word": false,
977
+ "special": true
978
+ },
979
+ "128122": {
980
+ "content": "<|reserved_special_token_114|>",
981
+ "lstrip": false,
982
+ "normalized": false,
983
+ "rstrip": false,
984
+ "single_word": false,
985
+ "special": true
986
+ },
987
+ "128123": {
988
+ "content": "<|reserved_special_token_115|>",
989
+ "lstrip": false,
990
+ "normalized": false,
991
+ "rstrip": false,
992
+ "single_word": false,
993
+ "special": true
994
+ },
995
+ "128124": {
996
+ "content": "<|reserved_special_token_116|>",
997
+ "lstrip": false,
998
+ "normalized": false,
999
+ "rstrip": false,
1000
+ "single_word": false,
1001
+ "special": true
1002
+ },
1003
+ "128125": {
1004
+ "content": "<|reserved_special_token_117|>",
1005
+ "lstrip": false,
1006
+ "normalized": false,
1007
+ "rstrip": false,
1008
+ "single_word": false,
1009
+ "special": true
1010
+ },
1011
+ "128126": {
1012
+ "content": "<|reserved_special_token_118|>",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false,
1017
+ "special": true
1018
+ },
1019
+ "128127": {
1020
+ "content": "<|reserved_special_token_119|>",
1021
+ "lstrip": false,
1022
+ "normalized": false,
1023
+ "rstrip": false,
1024
+ "single_word": false,
1025
+ "special": true
1026
+ },
1027
+ "128128": {
1028
+ "content": "<|reserved_special_token_120|>",
1029
+ "lstrip": false,
1030
+ "normalized": false,
1031
+ "rstrip": false,
1032
+ "single_word": false,
1033
+ "special": true
1034
+ },
1035
+ "128129": {
1036
+ "content": "<|reserved_special_token_121|>",
1037
+ "lstrip": false,
1038
+ "normalized": false,
1039
+ "rstrip": false,
1040
+ "single_word": false,
1041
+ "special": true
1042
+ },
1043
+ "128130": {
1044
+ "content": "<|reserved_special_token_122|>",
1045
+ "lstrip": false,
1046
+ "normalized": false,
1047
+ "rstrip": false,
1048
+ "single_word": false,
1049
+ "special": true
1050
+ },
1051
+ "128131": {
1052
+ "content": "<|reserved_special_token_123|>",
1053
+ "lstrip": false,
1054
+ "normalized": false,
1055
+ "rstrip": false,
1056
+ "single_word": false,
1057
+ "special": true
1058
+ },
1059
+ "128132": {
1060
+ "content": "<|reserved_special_token_124|>",
1061
+ "lstrip": false,
1062
+ "normalized": false,
1063
+ "rstrip": false,
1064
+ "single_word": false,
1065
+ "special": true
1066
+ },
1067
+ "128133": {
1068
+ "content": "<|reserved_special_token_125|>",
1069
+ "lstrip": false,
1070
+ "normalized": false,
1071
+ "rstrip": false,
1072
+ "single_word": false,
1073
+ "special": true
1074
+ },
1075
+ "128134": {
1076
+ "content": "<|reserved_special_token_126|>",
1077
+ "lstrip": false,
1078
+ "normalized": false,
1079
+ "rstrip": false,
1080
+ "single_word": false,
1081
+ "special": true
1082
+ },
1083
+ "128135": {
1084
+ "content": "<|reserved_special_token_127|>",
1085
+ "lstrip": false,
1086
+ "normalized": false,
1087
+ "rstrip": false,
1088
+ "single_word": false,
1089
+ "special": true
1090
+ },
1091
+ "128136": {
1092
+ "content": "<|reserved_special_token_128|>",
1093
+ "lstrip": false,
1094
+ "normalized": false,
1095
+ "rstrip": false,
1096
+ "single_word": false,
1097
+ "special": true
1098
+ },
1099
+ "128137": {
1100
+ "content": "<|reserved_special_token_129|>",
1101
+ "lstrip": false,
1102
+ "normalized": false,
1103
+ "rstrip": false,
1104
+ "single_word": false,
1105
+ "special": true
1106
+ },
1107
+ "128138": {
1108
+ "content": "<|reserved_special_token_130|>",
1109
+ "lstrip": false,
1110
+ "normalized": false,
1111
+ "rstrip": false,
1112
+ "single_word": false,
1113
+ "special": true
1114
+ },
1115
+ "128139": {
1116
+ "content": "<|reserved_special_token_131|>",
1117
+ "lstrip": false,
1118
+ "normalized": false,
1119
+ "rstrip": false,
1120
+ "single_word": false,
1121
+ "special": true
1122
+ },
1123
+ "128140": {
1124
+ "content": "<|reserved_special_token_132|>",
1125
+ "lstrip": false,
1126
+ "normalized": false,
1127
+ "rstrip": false,
1128
+ "single_word": false,
1129
+ "special": true
1130
+ },
1131
+ "128141": {
1132
+ "content": "<|reserved_special_token_133|>",
1133
+ "lstrip": false,
1134
+ "normalized": false,
1135
+ "rstrip": false,
1136
+ "single_word": false,
1137
+ "special": true
1138
+ },
1139
+ "128142": {
1140
+ "content": "<|reserved_special_token_134|>",
1141
+ "lstrip": false,
1142
+ "normalized": false,
1143
+ "rstrip": false,
1144
+ "single_word": false,
1145
+ "special": true
1146
+ },
1147
+ "128143": {
1148
+ "content": "<|reserved_special_token_135|>",
1149
+ "lstrip": false,
1150
+ "normalized": false,
1151
+ "rstrip": false,
1152
+ "single_word": false,
1153
+ "special": true
1154
+ },
1155
+ "128144": {
1156
+ "content": "<|reserved_special_token_136|>",
1157
+ "lstrip": false,
1158
+ "normalized": false,
1159
+ "rstrip": false,
1160
+ "single_word": false,
1161
+ "special": true
1162
+ },
1163
+ "128145": {
1164
+ "content": "<|reserved_special_token_137|>",
1165
+ "lstrip": false,
1166
+ "normalized": false,
1167
+ "rstrip": false,
1168
+ "single_word": false,
1169
+ "special": true
1170
+ },
1171
+ "128146": {
1172
+ "content": "<|reserved_special_token_138|>",
1173
+ "lstrip": false,
1174
+ "normalized": false,
1175
+ "rstrip": false,
1176
+ "single_word": false,
1177
+ "special": true
1178
+ },
1179
+ "128147": {
1180
+ "content": "<|reserved_special_token_139|>",
1181
+ "lstrip": false,
1182
+ "normalized": false,
1183
+ "rstrip": false,
1184
+ "single_word": false,
1185
+ "special": true
1186
+ },
1187
+ "128148": {
1188
+ "content": "<|reserved_special_token_140|>",
1189
+ "lstrip": false,
1190
+ "normalized": false,
1191
+ "rstrip": false,
1192
+ "single_word": false,
1193
+ "special": true
1194
+ },
1195
+ "128149": {
1196
+ "content": "<|reserved_special_token_141|>",
1197
+ "lstrip": false,
1198
+ "normalized": false,
1199
+ "rstrip": false,
1200
+ "single_word": false,
1201
+ "special": true
1202
+ },
1203
+ "128150": {
1204
+ "content": "<|reserved_special_token_142|>",
1205
+ "lstrip": false,
1206
+ "normalized": false,
1207
+ "rstrip": false,
1208
+ "single_word": false,
1209
+ "special": true
1210
+ },
1211
+ "128151": {
1212
+ "content": "<|reserved_special_token_143|>",
1213
+ "lstrip": false,
1214
+ "normalized": false,
1215
+ "rstrip": false,
1216
+ "single_word": false,
1217
+ "special": true
1218
+ },
1219
+ "128152": {
1220
+ "content": "<|reserved_special_token_144|>",
1221
+ "lstrip": false,
1222
+ "normalized": false,
1223
+ "rstrip": false,
1224
+ "single_word": false,
1225
+ "special": true
1226
+ },
1227
+ "128153": {
1228
+ "content": "<|reserved_special_token_145|>",
1229
+ "lstrip": false,
1230
+ "normalized": false,
1231
+ "rstrip": false,
1232
+ "single_word": false,
1233
+ "special": true
1234
+ },
1235
+ "128154": {
1236
+ "content": "<|reserved_special_token_146|>",
1237
+ "lstrip": false,
1238
+ "normalized": false,
1239
+ "rstrip": false,
1240
+ "single_word": false,
1241
+ "special": true
1242
+ },
1243
+ "128155": {
1244
+ "content": "<|reserved_special_token_147|>",
1245
+ "lstrip": false,
1246
+ "normalized": false,
1247
+ "rstrip": false,
1248
+ "single_word": false,
1249
+ "special": true
1250
+ },
1251
+ "128156": {
1252
+ "content": "<|reserved_special_token_148|>",
1253
+ "lstrip": false,
1254
+ "normalized": false,
1255
+ "rstrip": false,
1256
+ "single_word": false,
1257
+ "special": true
1258
+ },
1259
+ "128157": {
1260
+ "content": "<|reserved_special_token_149|>",
1261
+ "lstrip": false,
1262
+ "normalized": false,
1263
+ "rstrip": false,
1264
+ "single_word": false,
1265
+ "special": true
1266
+ },
1267
+ "128158": {
1268
+ "content": "<|reserved_special_token_150|>",
1269
+ "lstrip": false,
1270
+ "normalized": false,
1271
+ "rstrip": false,
1272
+ "single_word": false,
1273
+ "special": true
1274
+ },
1275
+ "128159": {
1276
+ "content": "<|reserved_special_token_151|>",
1277
+ "lstrip": false,
1278
+ "normalized": false,
1279
+ "rstrip": false,
1280
+ "single_word": false,
1281
+ "special": true
1282
+ },
1283
+ "128160": {
1284
+ "content": "<|reserved_special_token_152|>",
1285
+ "lstrip": false,
1286
+ "normalized": false,
1287
+ "rstrip": false,
1288
+ "single_word": false,
1289
+ "special": true
1290
+ },
1291
+ "128161": {
1292
+ "content": "<|reserved_special_token_153|>",
1293
+ "lstrip": false,
1294
+ "normalized": false,
1295
+ "rstrip": false,
1296
+ "single_word": false,
1297
+ "special": true
1298
+ },
1299
+ "128162": {
1300
+ "content": "<|reserved_special_token_154|>",
1301
+ "lstrip": false,
1302
+ "normalized": false,
1303
+ "rstrip": false,
1304
+ "single_word": false,
1305
+ "special": true
1306
+ },
1307
+ "128163": {
1308
+ "content": "<|reserved_special_token_155|>",
1309
+ "lstrip": false,
1310
+ "normalized": false,
1311
+ "rstrip": false,
1312
+ "single_word": false,
1313
+ "special": true
1314
+ },
1315
+ "128164": {
1316
+ "content": "<|reserved_special_token_156|>",
1317
+ "lstrip": false,
1318
+ "normalized": false,
1319
+ "rstrip": false,
1320
+ "single_word": false,
1321
+ "special": true
1322
+ },
1323
+ "128165": {
1324
+ "content": "<|reserved_special_token_157|>",
1325
+ "lstrip": false,
1326
+ "normalized": false,
1327
+ "rstrip": false,
1328
+ "single_word": false,
1329
+ "special": true
1330
+ },
1331
+ "128166": {
1332
+ "content": "<|reserved_special_token_158|>",
1333
+ "lstrip": false,
1334
+ "normalized": false,
1335
+ "rstrip": false,
1336
+ "single_word": false,
1337
+ "special": true
1338
+ },
1339
+ "128167": {
1340
+ "content": "<|reserved_special_token_159|>",
1341
+ "lstrip": false,
1342
+ "normalized": false,
1343
+ "rstrip": false,
1344
+ "single_word": false,
1345
+ "special": true
1346
+ },
1347
+ "128168": {
1348
+ "content": "<|reserved_special_token_160|>",
1349
+ "lstrip": false,
1350
+ "normalized": false,
1351
+ "rstrip": false,
1352
+ "single_word": false,
1353
+ "special": true
1354
+ },
1355
+ "128169": {
1356
+ "content": "<|reserved_special_token_161|>",
1357
+ "lstrip": false,
1358
+ "normalized": false,
1359
+ "rstrip": false,
1360
+ "single_word": false,
1361
+ "special": true
1362
+ },
1363
+ "128170": {
1364
+ "content": "<|reserved_special_token_162|>",
1365
+ "lstrip": false,
1366
+ "normalized": false,
1367
+ "rstrip": false,
1368
+ "single_word": false,
1369
+ "special": true
1370
+ },
1371
+ "128171": {
1372
+ "content": "<|reserved_special_token_163|>",
1373
+ "lstrip": false,
1374
+ "normalized": false,
1375
+ "rstrip": false,
1376
+ "single_word": false,
1377
+ "special": true
1378
+ },
1379
+ "128172": {
1380
+ "content": "<|reserved_special_token_164|>",
1381
+ "lstrip": false,
1382
+ "normalized": false,
1383
+ "rstrip": false,
1384
+ "single_word": false,
1385
+ "special": true
1386
+ },
1387
+ "128173": {
1388
+ "content": "<|reserved_special_token_165|>",
1389
+ "lstrip": false,
1390
+ "normalized": false,
1391
+ "rstrip": false,
1392
+ "single_word": false,
1393
+ "special": true
1394
+ },
1395
+ "128174": {
1396
+ "content": "<|reserved_special_token_166|>",
1397
+ "lstrip": false,
1398
+ "normalized": false,
1399
+ "rstrip": false,
1400
+ "single_word": false,
1401
+ "special": true
1402
+ },
1403
+ "128175": {
1404
+ "content": "<|reserved_special_token_167|>",
1405
+ "lstrip": false,
1406
+ "normalized": false,
1407
+ "rstrip": false,
1408
+ "single_word": false,
1409
+ "special": true
1410
+ },
1411
+ "128176": {
1412
+ "content": "<|reserved_special_token_168|>",
1413
+ "lstrip": false,
1414
+ "normalized": false,
1415
+ "rstrip": false,
1416
+ "single_word": false,
1417
+ "special": true
1418
+ },
1419
+ "128177": {
1420
+ "content": "<|reserved_special_token_169|>",
1421
+ "lstrip": false,
1422
+ "normalized": false,
1423
+ "rstrip": false,
1424
+ "single_word": false,
1425
+ "special": true
1426
+ },
1427
+ "128178": {
1428
+ "content": "<|reserved_special_token_170|>",
1429
+ "lstrip": false,
1430
+ "normalized": false,
1431
+ "rstrip": false,
1432
+ "single_word": false,
1433
+ "special": true
1434
+ },
1435
+ "128179": {
1436
+ "content": "<|reserved_special_token_171|>",
1437
+ "lstrip": false,
1438
+ "normalized": false,
1439
+ "rstrip": false,
1440
+ "single_word": false,
1441
+ "special": true
1442
+ },
1443
+ "128180": {
1444
+ "content": "<|reserved_special_token_172|>",
1445
+ "lstrip": false,
1446
+ "normalized": false,
1447
+ "rstrip": false,
1448
+ "single_word": false,
1449
+ "special": true
1450
+ },
1451
+ "128181": {
1452
+ "content": "<|reserved_special_token_173|>",
1453
+ "lstrip": false,
1454
+ "normalized": false,
1455
+ "rstrip": false,
1456
+ "single_word": false,
1457
+ "special": true
1458
+ },
1459
+ "128182": {
1460
+ "content": "<|reserved_special_token_174|>",
1461
+ "lstrip": false,
1462
+ "normalized": false,
1463
+ "rstrip": false,
1464
+ "single_word": false,
1465
+ "special": true
1466
+ },
1467
+ "128183": {
1468
+ "content": "<|reserved_special_token_175|>",
1469
+ "lstrip": false,
1470
+ "normalized": false,
1471
+ "rstrip": false,
1472
+ "single_word": false,
1473
+ "special": true
1474
+ },
1475
+ "128184": {
1476
+ "content": "<|reserved_special_token_176|>",
1477
+ "lstrip": false,
1478
+ "normalized": false,
1479
+ "rstrip": false,
1480
+ "single_word": false,
1481
+ "special": true
1482
+ },
1483
+ "128185": {
1484
+ "content": "<|reserved_special_token_177|>",
1485
+ "lstrip": false,
1486
+ "normalized": false,
1487
+ "rstrip": false,
1488
+ "single_word": false,
1489
+ "special": true
1490
+ },
1491
+ "128186": {
1492
+ "content": "<|reserved_special_token_178|>",
1493
+ "lstrip": false,
1494
+ "normalized": false,
1495
+ "rstrip": false,
1496
+ "single_word": false,
1497
+ "special": true
1498
+ },
1499
+ "128187": {
1500
+ "content": "<|reserved_special_token_179|>",
1501
+ "lstrip": false,
1502
+ "normalized": false,
1503
+ "rstrip": false,
1504
+ "single_word": false,
1505
+ "special": true
1506
+ },
1507
+ "128188": {
1508
+ "content": "<|reserved_special_token_180|>",
1509
+ "lstrip": false,
1510
+ "normalized": false,
1511
+ "rstrip": false,
1512
+ "single_word": false,
1513
+ "special": true
1514
+ },
1515
+ "128189": {
1516
+ "content": "<|reserved_special_token_181|>",
1517
+ "lstrip": false,
1518
+ "normalized": false,
1519
+ "rstrip": false,
1520
+ "single_word": false,
1521
+ "special": true
1522
+ },
1523
+ "128190": {
1524
+ "content": "<|reserved_special_token_182|>",
1525
+ "lstrip": false,
1526
+ "normalized": false,
1527
+ "rstrip": false,
1528
+ "single_word": false,
1529
+ "special": true
1530
+ },
1531
+ "128191": {
1532
+ "content": "<|reserved_special_token_183|>",
1533
+ "lstrip": false,
1534
+ "normalized": false,
1535
+ "rstrip": false,
1536
+ "single_word": false,
1537
+ "special": true
1538
+ },
1539
+ "128192": {
1540
+ "content": "<|reserved_special_token_184|>",
1541
+ "lstrip": false,
1542
+ "normalized": false,
1543
+ "rstrip": false,
1544
+ "single_word": false,
1545
+ "special": true
1546
+ },
1547
+ "128193": {
1548
+ "content": "<|reserved_special_token_185|>",
1549
+ "lstrip": false,
1550
+ "normalized": false,
1551
+ "rstrip": false,
1552
+ "single_word": false,
1553
+ "special": true
1554
+ },
1555
+ "128194": {
1556
+ "content": "<|reserved_special_token_186|>",
1557
+ "lstrip": false,
1558
+ "normalized": false,
1559
+ "rstrip": false,
1560
+ "single_word": false,
1561
+ "special": true
1562
+ },
1563
+ "128195": {
1564
+ "content": "<|reserved_special_token_187|>",
1565
+ "lstrip": false,
1566
+ "normalized": false,
1567
+ "rstrip": false,
1568
+ "single_word": false,
1569
+ "special": true
1570
+ },
1571
+ "128196": {
1572
+ "content": "<|reserved_special_token_188|>",
1573
+ "lstrip": false,
1574
+ "normalized": false,
1575
+ "rstrip": false,
1576
+ "single_word": false,
1577
+ "special": true
1578
+ },
1579
+ "128197": {
1580
+ "content": "<|reserved_special_token_189|>",
1581
+ "lstrip": false,
1582
+ "normalized": false,
1583
+ "rstrip": false,
1584
+ "single_word": false,
1585
+ "special": true
1586
+ },
1587
+ "128198": {
1588
+ "content": "<|reserved_special_token_190|>",
1589
+ "lstrip": false,
1590
+ "normalized": false,
1591
+ "rstrip": false,
1592
+ "single_word": false,
1593
+ "special": true
1594
+ },
1595
+ "128199": {
1596
+ "content": "<|reserved_special_token_191|>",
1597
+ "lstrip": false,
1598
+ "normalized": false,
1599
+ "rstrip": false,
1600
+ "single_word": false,
1601
+ "special": true
1602
+ },
1603
+ "128200": {
1604
+ "content": "<|reserved_special_token_192|>",
1605
+ "lstrip": false,
1606
+ "normalized": false,
1607
+ "rstrip": false,
1608
+ "single_word": false,
1609
+ "special": true
1610
+ },
1611
+ "128201": {
1612
+ "content": "<|reserved_special_token_193|>",
1613
+ "lstrip": false,
1614
+ "normalized": false,
1615
+ "rstrip": false,
1616
+ "single_word": false,
1617
+ "special": true
1618
+ },
1619
+ "128202": {
1620
+ "content": "<|reserved_special_token_194|>",
1621
+ "lstrip": false,
1622
+ "normalized": false,
1623
+ "rstrip": false,
1624
+ "single_word": false,
1625
+ "special": true
1626
+ },
1627
+ "128203": {
1628
+ "content": "<|reserved_special_token_195|>",
1629
+ "lstrip": false,
1630
+ "normalized": false,
1631
+ "rstrip": false,
1632
+ "single_word": false,
1633
+ "special": true
1634
+ },
1635
+ "128204": {
1636
+ "content": "<|reserved_special_token_196|>",
1637
+ "lstrip": false,
1638
+ "normalized": false,
1639
+ "rstrip": false,
1640
+ "single_word": false,
1641
+ "special": true
1642
+ },
1643
+ "128205": {
1644
+ "content": "<|reserved_special_token_197|>",
1645
+ "lstrip": false,
1646
+ "normalized": false,
1647
+ "rstrip": false,
1648
+ "single_word": false,
1649
+ "special": true
1650
+ },
1651
+ "128206": {
1652
+ "content": "<|reserved_special_token_198|>",
1653
+ "lstrip": false,
1654
+ "normalized": false,
1655
+ "rstrip": false,
1656
+ "single_word": false,
1657
+ "special": true
1658
+ },
1659
+ "128207": {
1660
+ "content": "<|reserved_special_token_199|>",
1661
+ "lstrip": false,
1662
+ "normalized": false,
1663
+ "rstrip": false,
1664
+ "single_word": false,
1665
+ "special": true
1666
+ },
1667
+ "128208": {
1668
+ "content": "<|reserved_special_token_200|>",
1669
+ "lstrip": false,
1670
+ "normalized": false,
1671
+ "rstrip": false,
1672
+ "single_word": false,
1673
+ "special": true
1674
+ },
1675
+ "128209": {
1676
+ "content": "<|reserved_special_token_201|>",
1677
+ "lstrip": false,
1678
+ "normalized": false,
1679
+ "rstrip": false,
1680
+ "single_word": false,
1681
+ "special": true
1682
+ },
1683
+ "128210": {
1684
+ "content": "<|reserved_special_token_202|>",
1685
+ "lstrip": false,
1686
+ "normalized": false,
1687
+ "rstrip": false,
1688
+ "single_word": false,
1689
+ "special": true
1690
+ },
1691
+ "128211": {
1692
+ "content": "<|reserved_special_token_203|>",
1693
+ "lstrip": false,
1694
+ "normalized": false,
1695
+ "rstrip": false,
1696
+ "single_word": false,
1697
+ "special": true
1698
+ },
1699
+ "128212": {
1700
+ "content": "<|reserved_special_token_204|>",
1701
+ "lstrip": false,
1702
+ "normalized": false,
1703
+ "rstrip": false,
1704
+ "single_word": false,
1705
+ "special": true
1706
+ },
1707
+ "128213": {
1708
+ "content": "<|reserved_special_token_205|>",
1709
+ "lstrip": false,
1710
+ "normalized": false,
1711
+ "rstrip": false,
1712
+ "single_word": false,
1713
+ "special": true
1714
+ },
1715
+ "128214": {
1716
+ "content": "<|reserved_special_token_206|>",
1717
+ "lstrip": false,
1718
+ "normalized": false,
1719
+ "rstrip": false,
1720
+ "single_word": false,
1721
+ "special": true
1722
+ },
1723
+ "128215": {
1724
+ "content": "<|reserved_special_token_207|>",
1725
+ "lstrip": false,
1726
+ "normalized": false,
1727
+ "rstrip": false,
1728
+ "single_word": false,
1729
+ "special": true
1730
+ },
1731
+ "128216": {
1732
+ "content": "<|reserved_special_token_208|>",
1733
+ "lstrip": false,
1734
+ "normalized": false,
1735
+ "rstrip": false,
1736
+ "single_word": false,
1737
+ "special": true
1738
+ },
1739
+ "128217": {
1740
+ "content": "<|reserved_special_token_209|>",
1741
+ "lstrip": false,
1742
+ "normalized": false,
1743
+ "rstrip": false,
1744
+ "single_word": false,
1745
+ "special": true
1746
+ },
1747
+ "128218": {
1748
+ "content": "<|reserved_special_token_210|>",
1749
+ "lstrip": false,
1750
+ "normalized": false,
1751
+ "rstrip": false,
1752
+ "single_word": false,
1753
+ "special": true
1754
+ },
1755
+ "128219": {
1756
+ "content": "<|reserved_special_token_211|>",
1757
+ "lstrip": false,
1758
+ "normalized": false,
1759
+ "rstrip": false,
1760
+ "single_word": false,
1761
+ "special": true
1762
+ },
1763
+ "128220": {
1764
+ "content": "<|reserved_special_token_212|>",
1765
+ "lstrip": false,
1766
+ "normalized": false,
1767
+ "rstrip": false,
1768
+ "single_word": false,
1769
+ "special": true
1770
+ },
1771
+ "128221": {
1772
+ "content": "<|reserved_special_token_213|>",
1773
+ "lstrip": false,
1774
+ "normalized": false,
1775
+ "rstrip": false,
1776
+ "single_word": false,
1777
+ "special": true
1778
+ },
1779
+ "128222": {
1780
+ "content": "<|reserved_special_token_214|>",
1781
+ "lstrip": false,
1782
+ "normalized": false,
1783
+ "rstrip": false,
1784
+ "single_word": false,
1785
+ "special": true
1786
+ },
1787
+ "128223": {
1788
+ "content": "<|reserved_special_token_215|>",
1789
+ "lstrip": false,
1790
+ "normalized": false,
1791
+ "rstrip": false,
1792
+ "single_word": false,
1793
+ "special": true
1794
+ },
1795
+ "128224": {
1796
+ "content": "<|reserved_special_token_216|>",
1797
+ "lstrip": false,
1798
+ "normalized": false,
1799
+ "rstrip": false,
1800
+ "single_word": false,
1801
+ "special": true
1802
+ },
1803
+ "128225": {
1804
+ "content": "<|reserved_special_token_217|>",
1805
+ "lstrip": false,
1806
+ "normalized": false,
1807
+ "rstrip": false,
1808
+ "single_word": false,
1809
+ "special": true
1810
+ },
1811
+ "128226": {
1812
+ "content": "<|reserved_special_token_218|>",
1813
+ "lstrip": false,
1814
+ "normalized": false,
1815
+ "rstrip": false,
1816
+ "single_word": false,
1817
+ "special": true
1818
+ },
1819
+ "128227": {
1820
+ "content": "<|reserved_special_token_219|>",
1821
+ "lstrip": false,
1822
+ "normalized": false,
1823
+ "rstrip": false,
1824
+ "single_word": false,
1825
+ "special": true
1826
+ },
1827
+ "128228": {
1828
+ "content": "<|reserved_special_token_220|>",
1829
+ "lstrip": false,
1830
+ "normalized": false,
1831
+ "rstrip": false,
1832
+ "single_word": false,
1833
+ "special": true
1834
+ },
1835
+ "128229": {
1836
+ "content": "<|reserved_special_token_221|>",
1837
+ "lstrip": false,
1838
+ "normalized": false,
1839
+ "rstrip": false,
1840
+ "single_word": false,
1841
+ "special": true
1842
+ },
1843
+ "128230": {
1844
+ "content": "<|reserved_special_token_222|>",
1845
+ "lstrip": false,
1846
+ "normalized": false,
1847
+ "rstrip": false,
1848
+ "single_word": false,
1849
+ "special": true
1850
+ },
1851
+ "128231": {
1852
+ "content": "<|reserved_special_token_223|>",
1853
+ "lstrip": false,
1854
+ "normalized": false,
1855
+ "rstrip": false,
1856
+ "single_word": false,
1857
+ "special": true
1858
+ },
1859
+ "128232": {
1860
+ "content": "<|reserved_special_token_224|>",
1861
+ "lstrip": false,
1862
+ "normalized": false,
1863
+ "rstrip": false,
1864
+ "single_word": false,
1865
+ "special": true
1866
+ },
1867
+ "128233": {
1868
+ "content": "<|reserved_special_token_225|>",
1869
+ "lstrip": false,
1870
+ "normalized": false,
1871
+ "rstrip": false,
1872
+ "single_word": false,
1873
+ "special": true
1874
+ },
1875
+ "128234": {
1876
+ "content": "<|reserved_special_token_226|>",
1877
+ "lstrip": false,
1878
+ "normalized": false,
1879
+ "rstrip": false,
1880
+ "single_word": false,
1881
+ "special": true
1882
+ },
1883
+ "128235": {
1884
+ "content": "<|reserved_special_token_227|>",
1885
+ "lstrip": false,
1886
+ "normalized": false,
1887
+ "rstrip": false,
1888
+ "single_word": false,
1889
+ "special": true
1890
+ },
1891
+ "128236": {
1892
+ "content": "<|reserved_special_token_228|>",
1893
+ "lstrip": false,
1894
+ "normalized": false,
1895
+ "rstrip": false,
1896
+ "single_word": false,
1897
+ "special": true
1898
+ },
1899
+ "128237": {
1900
+ "content": "<|reserved_special_token_229|>",
1901
+ "lstrip": false,
1902
+ "normalized": false,
1903
+ "rstrip": false,
1904
+ "single_word": false,
1905
+ "special": true
1906
+ },
1907
+ "128238": {
1908
+ "content": "<|reserved_special_token_230|>",
1909
+ "lstrip": false,
1910
+ "normalized": false,
1911
+ "rstrip": false,
1912
+ "single_word": false,
1913
+ "special": true
1914
+ },
1915
+ "128239": {
1916
+ "content": "<|reserved_special_token_231|>",
1917
+ "lstrip": false,
1918
+ "normalized": false,
1919
+ "rstrip": false,
1920
+ "single_word": false,
1921
+ "special": true
1922
+ },
1923
+ "128240": {
1924
+ "content": "<|reserved_special_token_232|>",
1925
+ "lstrip": false,
1926
+ "normalized": false,
1927
+ "rstrip": false,
1928
+ "single_word": false,
1929
+ "special": true
1930
+ },
1931
+ "128241": {
1932
+ "content": "<|reserved_special_token_233|>",
1933
+ "lstrip": false,
1934
+ "normalized": false,
1935
+ "rstrip": false,
1936
+ "single_word": false,
1937
+ "special": true
1938
+ },
1939
+ "128242": {
1940
+ "content": "<|reserved_special_token_234|>",
1941
+ "lstrip": false,
1942
+ "normalized": false,
1943
+ "rstrip": false,
1944
+ "single_word": false,
1945
+ "special": true
1946
+ },
1947
+ "128243": {
1948
+ "content": "<|reserved_special_token_235|>",
1949
+ "lstrip": false,
1950
+ "normalized": false,
1951
+ "rstrip": false,
1952
+ "single_word": false,
1953
+ "special": true
1954
+ },
1955
+ "128244": {
1956
+ "content": "<|reserved_special_token_236|>",
1957
+ "lstrip": false,
1958
+ "normalized": false,
1959
+ "rstrip": false,
1960
+ "single_word": false,
1961
+ "special": true
1962
+ },
1963
+ "128245": {
1964
+ "content": "<|reserved_special_token_237|>",
1965
+ "lstrip": false,
1966
+ "normalized": false,
1967
+ "rstrip": false,
1968
+ "single_word": false,
1969
+ "special": true
1970
+ },
1971
+ "128246": {
1972
+ "content": "<|reserved_special_token_238|>",
1973
+ "lstrip": false,
1974
+ "normalized": false,
1975
+ "rstrip": false,
1976
+ "single_word": false,
1977
+ "special": true
1978
+ },
1979
+ "128247": {
1980
+ "content": "<|reserved_special_token_239|>",
1981
+ "lstrip": false,
1982
+ "normalized": false,
1983
+ "rstrip": false,
1984
+ "single_word": false,
1985
+ "special": true
1986
+ },
1987
+ "128248": {
1988
+ "content": "<|reserved_special_token_240|>",
1989
+ "lstrip": false,
1990
+ "normalized": false,
1991
+ "rstrip": false,
1992
+ "single_word": false,
1993
+ "special": true
1994
+ },
1995
+ "128249": {
1996
+ "content": "<|reserved_special_token_241|>",
1997
+ "lstrip": false,
1998
+ "normalized": false,
1999
+ "rstrip": false,
2000
+ "single_word": false,
2001
+ "special": true
2002
+ },
2003
+ "128250": {
2004
+ "content": "<|reserved_special_token_242|>",
2005
+ "lstrip": false,
2006
+ "normalized": false,
2007
+ "rstrip": false,
2008
+ "single_word": false,
2009
+ "special": true
2010
+ },
2011
+ "128251": {
2012
+ "content": "<|reserved_special_token_243|>",
2013
+ "lstrip": false,
2014
+ "normalized": false,
2015
+ "rstrip": false,
2016
+ "single_word": false,
2017
+ "special": true
2018
+ },
2019
+ "128252": {
2020
+ "content": "<|reserved_special_token_244|>",
2021
+ "lstrip": false,
2022
+ "normalized": false,
2023
+ "rstrip": false,
2024
+ "single_word": false,
2025
+ "special": true
2026
+ },
2027
+ "128253": {
2028
+ "content": "<|reserved_special_token_245|>",
2029
+ "lstrip": false,
2030
+ "normalized": false,
2031
+ "rstrip": false,
2032
+ "single_word": false,
2033
+ "special": true
2034
+ },
2035
+ "128254": {
2036
+ "content": "<|reserved_special_token_246|>",
2037
+ "lstrip": false,
2038
+ "normalized": false,
2039
+ "rstrip": false,
2040
+ "single_word": false,
2041
+ "special": true
2042
+ },
2043
+ "128255": {
2044
+ "content": "<|reserved_special_token_247|>",
2045
+ "lstrip": false,
2046
+ "normalized": false,
2047
+ "rstrip": false,
2048
+ "single_word": false,
2049
+ "special": true
2050
+ }
2051
+ },
2052
+ "bos_token": "<|begin_of_text|>",
2053
+ "chat_template": "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{#- System message #}\n{{- \"<|start_header_id|>system<|end_header_id|>\\n\\n\" }}\n{%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n{%- endif %}\n{{- \"Cutting Knowledge Date: December 2023\\n\" }}\n{{- \"Today Date: \" + date_string + \"\\n\\n\" }}\n{%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n{%- endif %}\n{{- system_message }}\n{{- \"<|eot_id|>\" }}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n\\n'+ message['content'] | trim + '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n\\n' }}\n{%- endif %}\n",
2054
+ "clean_up_tokenization_spaces": true,
2055
+ "eos_token": "<|eot_id|>",
2056
+ "model_input_names": [
2057
+ "input_ids",
2058
+ "attention_mask"
2059
+ ],
2060
+ "model_max_length": 131072,
2061
+ "tokenizer_class": "PreTrainedTokenizerFast"
2062
+ }