matt-here commited on
Commit
92023e0
·
verified ·
1 Parent(s): 4e936c9

Upload 16 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,54 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - zh
5
+ - ko
6
+ license: apache-2.0
7
+ base_model: Jackrong/Qwopus3.5-27B-v3
8
+ tags:
9
+ - unsloth
10
+ - qwen
11
+ - qwen3.5
12
+ - reasoning
13
+ - chain-of-thought
14
+ - lora
15
+ - competitive-programming
16
+ - mlx
17
+ pipeline_tag: image-text-to-text
18
+ library_name: mlx
19
+ ---
20
+ # MLX-Qwopus3.5-27B-v3-vision-6bit
21
+
22
+ A **6-bit MLX** quantization of [Jackrong/Qwopus3.5-27B-v3](https://huggingface.co/Jackrong/Qwopus3.5-27B-v3) with some tweaks to bring back the multimodal capabilities.
23
+
24
+ ---
25
+ ## Quantization Details
26
+
27
+ | Property | Value |
28
+ |----------|-------|
29
+ | Method | 6-bit (6.661 bits per weight) |
30
+ | Tool | `mlx-vlm 0.4.2` via `mlx-vlm.convert` |
31
+ | Size | ~22.9GB |
32
+
33
+ ---
34
+ ## Other Available Quants
35
+
36
+ | Model | Size | Quantization | Bits per weight | Multimodal |
37
+ |--------|--------|--------|--------|--------|
38
+ | [Jackrong/MLX-Qwopus3.5-27B-v3-4bit](https://huggingface.co/Jackrong/MLX-Qwopus3.5-27B-v3-4bit) | 15.15 GB | 4-bit | 4.501 | ✗ |
39
+ | [matt-here/MLX-Qwopus3.5-27B-v3-vision-4bit](https://huggingface.co/matt-here/MLX-Qwopus3.5-27B-v3-vision-4bit) | 16.08 GB | 4-bit | 4.695 | ✓ (Vision) |
40
+ | [matt-here/MLX-Qwopus3.5-27B-v3-5bit](https://huggingface.co/matt-here/MLX-Qwopus3.5-27B-v3-5bit) | 18.56 GB | 5-bit | 5.501 | ✗ |
41
+ | [matt-here/MLX-Qwopus3.5-27B-v3-vision-5bit](https://huggingface.co/matt-here/MLX-Qwopus3.5-27B-v3-vision-5bit) | 19.46 GB | 5-bit | 5.678 | ✓ (Vision) |
42
+ | [Jackrong/MLX-Qwopus3.5-27B-v3-6bit](https://huggingface.co/Jackrong/MLX-Qwopus3.5-27B-v3-6bit) | 21.88 GB | 6-bit | 6.501 | ✗ |
43
+ | [(This model)](https://huggingface.co/matt-here/MLX-Qwopus3.5-27B-v3-vision-6bit) | 22.85 GB | 6-bit | 6.661 | ✓ (Vision) |
44
+ | [Jackrong/MLX-Qwopus3.5-27B-v3-bf16](https://huggingface.co/Jackrong/MLX-Qwopus3.5-27B-v3-bf16) | 53.81 GB | bf16 | 16 | ✗ |
45
+
46
+ > GGUF quants - [Jackrong/Qwopus3.5-27B-v3-GGUF](https://huggingface.co/Jackrong/Qwopus3.5-27B-v3-GGUF)
47
+
48
+ ---
49
+ ## Credits
50
+
51
+ - [**Alibaba Qwen Team**](https://huggingface.co/Qwen) — [Qwen 3.5 27B](https://huggingface.co/Qwen/Qwen3.5-27B) dense model
52
+ - [**Jackrong**](https://huggingface.co/Jackrong) - Claude 4.6 Opus v3 distillation work
53
+ - [**Unsloth**](https://unsloth.ai/) - Training framework
54
+ - **Apple MLX Team** - High-speed local inference on Apple Silicon
chat_template.jinja ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- set image_count = namespace(value=0) %}
2
+ {%- set video_count = namespace(value=0) %}
3
+ {%- macro render_content(content, do_vision_count, is_system_content=false) %}
4
+ {%- if content is string %}
5
+ {{- content }}
6
+ {%- elif content is iterable and content is not mapping %}
7
+ {%- for item in content %}
8
+ {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}
9
+ {%- if is_system_content %}
10
+ {{- raise_exception('System message cannot contain images.') }}
11
+ {%- endif %}
12
+ {%- if do_vision_count %}
13
+ {%- set image_count.value = image_count.value + 1 %}
14
+ {%- endif %}
15
+ {%- if add_vision_id %}
16
+ {{- 'Picture ' ~ image_count.value ~ ': ' }}
17
+ {%- endif %}
18
+ {{- '<|vision_start|><|image_pad|><|vision_end|>' }}
19
+ {%- elif 'video' in item or item.type == 'video' %}
20
+ {%- if is_system_content %}
21
+ {{- raise_exception('System message cannot contain videos.') }}
22
+ {%- endif %}
23
+ {%- if do_vision_count %}
24
+ {%- set video_count.value = video_count.value + 1 %}
25
+ {%- endif %}
26
+ {%- if add_vision_id %}
27
+ {{- 'Video ' ~ video_count.value ~ ': ' }}
28
+ {%- endif %}
29
+ {{- '<|vision_start|><|video_pad|><|vision_end|>' }}
30
+ {%- elif 'text' in item %}
31
+ {{- item.text }}
32
+ {%- else %}
33
+ {{- raise_exception('Unexpected item type in content.') }}
34
+ {%- endif %}
35
+ {%- endfor %}
36
+ {%- elif content is none or content is undefined %}
37
+ {{- '' }}
38
+ {%- else %}
39
+ {{- raise_exception('Unexpected content type.') }}
40
+ {%- endif %}
41
+ {%- endmacro %}
42
+ {%- if not messages %}
43
+ {{- raise_exception('No messages provided.') }}
44
+ {%- endif %}
45
+ {%- if tools and tools is iterable and tools is not mapping %}
46
+ {{- '<|im_start|>system\n' }}
47
+ {{- "# Tools\n\nYou have access to the following functions:\n\n<tools>" }}
48
+ {%- for tool in tools %}
49
+ {{- "\n" }}
50
+ {{- tool | tojson }}
51
+ {%- endfor %}
52
+ {{- "\n</tools>" }}
53
+ {{- '\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call>' }}
54
+ {%- if messages[0].role == 'system' %}
55
+ {%- set content = render_content(messages[0].content, false, true)|trim %}
56
+ {%- if content %}
57
+ {{- '\n\n' + content }}
58
+ {%- endif %}
59
+ {%- endif %}
60
+ {{- '<|im_end|>\n' }}
61
+ {%- else %}
62
+ {%- if messages[0].role == 'system' %}
63
+ {%- set content = render_content(messages[0].content, false, true)|trim %}
64
+ {{- '<|im_start|>system\n' + content + '<|im_end|>\n' }}
65
+ {%- endif %}
66
+ {%- endif %}
67
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
68
+ {%- for message in messages[::-1] %}
69
+ {%- set index = (messages|length - 1) - loop.index0 %}
70
+ {%- if ns.multi_step_tool and message.role == "user" %}
71
+ {%- set content = render_content(message.content, false)|trim %}
72
+ {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}
73
+ {%- set ns.multi_step_tool = false %}
74
+ {%- set ns.last_query_index = index %}
75
+ {%- endif %}
76
+ {%- endif %}
77
+ {%- endfor %}
78
+ {%- if ns.multi_step_tool %}
79
+ {{- raise_exception('No user query found in messages.') }}
80
+ {%- endif %}
81
+ {%- for message in messages %}
82
+ {%- set content = render_content(message.content, true)|trim %}
83
+ {%- if message.role == "system" %}
84
+ {%- if not loop.first %}
85
+ {{- raise_exception('System message must be at the beginning.') }}
86
+ {%- endif %}
87
+ {%- elif message.role == "user" %}
88
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
89
+ {%- elif message.role == "assistant" %}
90
+ {%- set reasoning_content = '' %}
91
+ {%- if message.reasoning_content is string %}
92
+ {%- set reasoning_content = message.reasoning_content %}
93
+ {%- else %}
94
+ {%- if '</think>' in content %}
95
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
96
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
97
+ {%- endif %}
98
+ {%- endif %}
99
+ {%- set reasoning_content = reasoning_content|trim %}
100
+ {%- if loop.index0 > ns.last_query_index %}
101
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content + '\n</think>\n\n' + content }}
102
+ {%- else %}
103
+ {{- '<|im_start|>' + message.role + '\n' + content }}
104
+ {%- endif %}
105
+ {%- if message.tool_calls and message.tool_calls is iterable and message.tool_calls is not mapping %}
106
+ {%- for tool_call in message.tool_calls %}
107
+ {%- if tool_call.function is defined %}
108
+ {%- set tool_call = tool_call.function %}
109
+ {%- endif %}
110
+ {%- if loop.first %}
111
+ {%- if content|trim %}
112
+ {{- '\n\n<tool_call>\n{"name": "' }}
113
+ {%- else %}
114
+ {{- '<tool_call>\n{"name": "' }}
115
+ {%- endif %}
116
+ {%- else %}
117
+ {{- '\n<tool_call>\n{"name": "' }}
118
+ {%- endif %}
119
+ {{- tool_call.name }}
120
+ {{- '", "arguments": ' }}
121
+ {{- tool_call.arguments | tojson }}
122
+ {{- '}\n</tool_call>' }}
123
+ {%- endfor %}
124
+ {%- endif %}
125
+ {{- '<|im_end|>\n' }}
126
+ {%- elif message.role == "tool" %}
127
+ {%- if loop.previtem and loop.previtem.role != "tool" %}
128
+ {{- '<|im_start|>user' }}
129
+ {%- endif %}
130
+ {{- '\n<tool_response>\n' }}
131
+ {{- content }}
132
+ {{- '\n</tool_response>' }}
133
+ {%- if not loop.last and loop.nextitem.role != "tool" %}
134
+ {{- '<|im_end|>\n' }}
135
+ {%- elif loop.last %}
136
+ {{- '<|im_end|>\n' }}
137
+ {%- endif %}
138
+ {%- else %}
139
+ {{- raise_exception('Unexpected message role.') }}
140
+ {%- endif %}
141
+ {%- endfor %}
142
+ {%- if add_generation_prompt %}
143
+ {{- '<|im_start|>assistant\n' }}
144
+ {%- if enable_thinking is defined and enable_thinking is false %}
145
+ {{- '<think>\n\n</think>\n\n' }}
146
+ {%- else %}
147
+ {{- '<think>\n' }}
148
+ {%- endif %}
149
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3_5ForConditionalGeneration"
4
+ ],
5
+ "eos_token_id": [
6
+ 248046,
7
+ 248044
8
+ ],
9
+ "image_token_id": 248056,
10
+ "model_name": "unsloth/Qwen3.5-27B",
11
+ "model_type": "qwen3_5",
12
+ "quantization": {
13
+ "group_size": 64,
14
+ "bits": 6,
15
+ "mode": "affine"
16
+ },
17
+ "quantization_config": {
18
+ "group_size": 64,
19
+ "bits": 6,
20
+ "mode": "affine"
21
+ },
22
+ "text_config": {
23
+ "attention_bias": false,
24
+ "attention_dropout": 0.0,
25
+ "attn_output_gate": true,
26
+ "torch_dtype": "bfloat16",
27
+ "eos_token_id": 248044,
28
+ "full_attention_interval": 4,
29
+ "head_dim": 256,
30
+ "hidden_act": "silu",
31
+ "hidden_size": 5120,
32
+ "initializer_range": 0.02,
33
+ "intermediate_size": 17408,
34
+ "layer_types": [
35
+ "linear_attention",
36
+ "linear_attention",
37
+ "linear_attention",
38
+ "full_attention",
39
+ "linear_attention",
40
+ "linear_attention",
41
+ "linear_attention",
42
+ "full_attention",
43
+ "linear_attention",
44
+ "linear_attention",
45
+ "linear_attention",
46
+ "full_attention",
47
+ "linear_attention",
48
+ "linear_attention",
49
+ "linear_attention",
50
+ "full_attention",
51
+ "linear_attention",
52
+ "linear_attention",
53
+ "linear_attention",
54
+ "full_attention",
55
+ "linear_attention",
56
+ "linear_attention",
57
+ "linear_attention",
58
+ "full_attention",
59
+ "linear_attention",
60
+ "linear_attention",
61
+ "linear_attention",
62
+ "full_attention",
63
+ "linear_attention",
64
+ "linear_attention",
65
+ "linear_attention",
66
+ "full_attention",
67
+ "linear_attention",
68
+ "linear_attention",
69
+ "linear_attention",
70
+ "full_attention",
71
+ "linear_attention",
72
+ "linear_attention",
73
+ "linear_attention",
74
+ "full_attention",
75
+ "linear_attention",
76
+ "linear_attention",
77
+ "linear_attention",
78
+ "full_attention",
79
+ "linear_attention",
80
+ "linear_attention",
81
+ "linear_attention",
82
+ "full_attention",
83
+ "linear_attention",
84
+ "linear_attention",
85
+ "linear_attention",
86
+ "full_attention",
87
+ "linear_attention",
88
+ "linear_attention",
89
+ "linear_attention",
90
+ "full_attention",
91
+ "linear_attention",
92
+ "linear_attention",
93
+ "linear_attention",
94
+ "full_attention",
95
+ "linear_attention",
96
+ "linear_attention",
97
+ "linear_attention",
98
+ "full_attention"
99
+ ],
100
+ "linear_conv_kernel_dim": 4,
101
+ "linear_key_head_dim": 128,
102
+ "linear_num_key_heads": 16,
103
+ "linear_num_value_heads": 48,
104
+ "linear_value_head_dim": 128,
105
+ "mamba_ssm_dtype": "float32",
106
+ "max_position_embeddings": 262144,
107
+ "mlp_only_layers": [],
108
+ "model_type": "qwen3_5_text",
109
+ "mtp_num_hidden_layers": 1,
110
+ "mtp_use_dedicated_embeddings": false,
111
+ "num_attention_heads": 24,
112
+ "num_hidden_layers": 64,
113
+ "num_key_value_heads": 4,
114
+ "partial_rotary_factor": 0.25,
115
+ "rms_norm_eps": 1e-06,
116
+ "rope_parameters": {
117
+ "mrope_interleaved": true,
118
+ "mrope_section": [
119
+ 11,
120
+ 11,
121
+ 10
122
+ ],
123
+ "partial_rotary_factor": 0.25,
124
+ "rope_theta": 10000000,
125
+ "rope_type": "default"
126
+ },
127
+ "tie_word_embeddings": false,
128
+ "use_cache": true,
129
+ "vocab_size": 248320
130
+ },
131
+ "tie_word_embeddings": false,
132
+ "unsloth_fixed": true,
133
+ "unsloth_version": "2026.3.18",
134
+ "use_cache": false,
135
+ "video_token_id": 248057,
136
+ "vision_config": {
137
+ "deepstack_visual_indexes": [],
138
+ "depth": 27,
139
+ "torch_dtype": "bfloat16",
140
+ "hidden_act": "gelu_pytorch_tanh",
141
+ "hidden_size": 1152,
142
+ "in_channels": 3,
143
+ "initializer_range": 0.02,
144
+ "intermediate_size": 4304,
145
+ "model_type": "qwen3_5",
146
+ "num_heads": 16,
147
+ "num_position_embeddings": 2304,
148
+ "out_hidden_size": 5120,
149
+ "patch_size": 16,
150
+ "spatial_merge_size": 2,
151
+ "temporal_patch_size": 2
152
+ },
153
+ "vision_end_token_id": 248054,
154
+ "vision_start_token_id": 248053
155
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 248044,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 248046,
6
+ 248044
7
+ ],
8
+ "pad_token_id": 248044,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "5.4.0"
13
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4871fc619a8f6101944cc44e61504544934cd93bb8f3c7d5c4f41b0d13d03064
3
+ size 5362726174
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202ecc122ef7bab0ef38e30c66be0378ccb0b23609896aa1b6ae3a047d1e0278
3
+ size 5344282130
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ddda4aded9e2c7c37cc9ebf3caf579ead0721e3298d56d7709931b887c12ec6
3
+ size 5331479350
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e37f9aa96e3a422d8a9aac9134edfb62c76f0f82218df453c812a2b30e1b6b48
3
+ size 5331479370
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c442a0e16d5dd786601efde0e43b43c7d8aab054f382f8942131236d459f6595
3
+ size 1407912945
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {
3
+ "longest_edge": 16777216,
4
+ "shortest_edge": 65536
5
+ },
6
+ "patch_size": 16,
7
+ "temporal_patch_size": 2,
8
+ "merge_size": 2,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "processor_class": "Qwen3VLProcessor",
20
+ "image_processor_type": "Qwen2VLImageProcessorFast"
21
+ }
processor_config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_processor": {
3
+ "data_format": "channels_first",
4
+ "do_convert_rgb": true,
5
+ "do_normalize": true,
6
+ "do_rescale": true,
7
+ "do_resize": true,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_processor_type": "Qwen2VLImageProcessor",
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "merge_size": 2,
20
+ "patch_size": 16,
21
+ "resample": 3,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "longest_edge": 16777216,
25
+ "shortest_edge": 65536
26
+ },
27
+ "temporal_patch_size": 2
28
+ },
29
+ "processor_class": "Qwen3VLProcessor"
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87a7830d63fcf43bf241c3c5242e96e62dd3fdc29224ca26fed8ea333db72de4
3
+ size 19989343
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "audio_bos_token": "<|audio_start|>",
4
+ "audio_eos_token": "<|audio_end|>",
5
+ "audio_token": "<|audio_pad|>",
6
+ "backend": "tokenizers",
7
+ "bos_token": null,
8
+ "clean_up_tokenization_spaces": false,
9
+ "eos_token": "<|im_end|>",
10
+ "errors": "replace",
11
+ "image_token": "<|image_pad|>",
12
+ "is_local": true,
13
+ "model_max_length": 262144,
14
+ "model_specific_special_tokens": {
15
+ "audio_bos_token": "<|audio_start|>",
16
+ "audio_eos_token": "<|audio_end|>",
17
+ "audio_token": "<|audio_pad|>",
18
+ "image_token": "<|image_pad|>",
19
+ "video_token": "<|video_pad|>",
20
+ "vision_bos_token": "<|vision_start|>",
21
+ "vision_eos_token": "<|vision_end|>"
22
+ },
23
+ "pad_token": "<|endoftext|>",
24
+ "padding_side": "right",
25
+ "pretokenize_regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?[\\p{L}\\p{M}]+|\\p{N}| ?[^\\s\\p{L}\\p{M}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
26
+ "processor_class": "Qwen3VLProcessor",
27
+ "split_special_tokens": false,
28
+ "tokenizer_class": "TokenizersBackend",
29
+ "unk_token": null,
30
+ "video_token": "<|video_pad|>",
31
+ "vision_bos_token": "<|vision_start|>",
32
+ "vision_eos_token": "<|vision_end|>"
33
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {
3
+ "longest_edge": 25165824,
4
+ "shortest_edge": 4096
5
+ },
6
+ "patch_size": 16,
7
+ "temporal_patch_size": 2,
8
+ "merge_size": 2,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "processor_class": "Qwen3VLProcessor",
20
+ "video_processor_type": "Qwen3VLVideoProcessor"
21
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff