Mungert commited on
Commit
e85c698
·
verified ·
0 Parent(s):

Super-squash history to reclaim storage

Browse files
.gitattributes ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RWKV7-Goose-World3-2.9B-HF-f16.gguf filter=lfs diff=lfs merge=lfs -text
37
+ RWKV7-Goose-World3-2.9B-HF-f16-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
38
+ RWKV7-Goose-World3-2.9B-HF-bf16-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
39
+ RWKV7-Goose-World3-2.9B-HF-f16-q6_k.gguf filter=lfs diff=lfs merge=lfs -text
40
+ RWKV7-Goose-World3-2.9B-HF-bf16-q6_k.gguf filter=lfs diff=lfs merge=lfs -text
41
+ RWKV7-Goose-World3-2.9B-HF-f16-q4_k.gguf filter=lfs diff=lfs merge=lfs -text
42
+ RWKV7-Goose-World3-2.9B-HF-bf16-q4_k.gguf filter=lfs diff=lfs merge=lfs -text
43
+ RWKV7-Goose-World3-2.9B-HF-q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
44
+ RWKV7-Goose-World3-2.9B-HF-q4_k_l.gguf filter=lfs diff=lfs merge=lfs -text
45
+ RWKV7-Goose-World3-2.9B-HF-q5_k_l.gguf filter=lfs diff=lfs merge=lfs -text
46
+ RWKV7-Goose-World3-2.9B-HF-q6_k_l.gguf filter=lfs diff=lfs merge=lfs -text
47
+ RWKV7-Goose-World3-2.9B-HF-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
48
+ RWKV7-Goose-World3-2.9B-HF-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
49
+ RWKV7-Goose-World3-2.9B-HF-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
50
+ RWKV7-Goose-World3-2.9B-HF-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
51
+ RWKV7-Goose-World3-2.9B-HF-q5_k_s.gguf filter=lfs diff=lfs merge=lfs -text
52
+ RWKV7-Goose-World3-2.9B-HF-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
53
+ RWKV7-Goose-World3-2.9B-HF-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
54
+ RWKV7-Goose-World3-2.9B-HF-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
55
+ RWKV7-Goose-World3-2.9B-HF-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
56
+ RWKV7-Goose-World3-2.9B-HF-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
57
+ RWKV7-Goose-World3-2.9B-HF-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
58
+ RWKV7-Goose-World3-2.9B-HF-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
59
+ RWKV7-Goose-World3-2.9B-HF-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
60
+ RWKV7-Goose-World3-2.9B-HF-q4_0_l.gguf filter=lfs diff=lfs merge=lfs -text
61
+ RWKV7-Goose-World3-2.9B-HF-q4_1_l.gguf filter=lfs diff=lfs merge=lfs -text
62
+ RWKV7-Goose-World3-2.9B-HF-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
63
+ RWKV7-Goose-World3-2.9B-HF-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
64
+ RWKV7-Goose-World3-2.9B-HF-q5_0_l.gguf filter=lfs diff=lfs merge=lfs -text
65
+ RWKV7-Goose-World3-2.9B-HF-q5_1_l.gguf filter=lfs diff=lfs merge=lfs -text
66
+ RWKV7-Goose-World3-2.9B-HF-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
67
+ RWKV7-Goose-World3-2.9B-HF-iq3_s.gguf filter=lfs diff=lfs merge=lfs -text
68
+ RWKV7-Goose-World3-2.9B-HF-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
69
+ RWKV7-Goose-World3-2.9B-HF.imatrix filter=lfs diff=lfs merge=lfs -text
70
+ RWKV7-Goose-World3-2.9B-HF-bf16.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ - zh
6
+ - ja
7
+ - ko
8
+ - fr
9
+ - ar
10
+ - es
11
+ - pt
12
+ metrics:
13
+ - accuracy
14
+ base_model:
15
+ - BlinkDL/rwkv-7-world
16
+ pipeline_tag: text-generation
17
+ ---
18
+
19
+ # <span style="color: #7FFF7F;">RWKV7-Goose-World3-2.9B-HF GGUF Models</span>
20
+
21
+ ## **Choosing the Right Model Format**
22
+
23
+ Selecting the correct model format depends on your **hardware capabilities** and **memory constraints**.
24
+
25
+ ### **BF16 (Brain Float 16) – Use if BF16 acceleration is available**
26
+ - A 16-bit floating-point format designed for **faster computation** while retaining good precision.
27
+ - Provides **similar dynamic range** as FP32 but with **lower memory usage**.
28
+ - Recommended if your hardware supports **BF16 acceleration** (check your device's specs).
29
+ - Ideal for **high-performance inference** with **reduced memory footprint** compared to FP32.
30
+
31
+ 📌 **Use BF16 if:**
32
+ ✔ Your hardware has native **BF16 support** (e.g., newer GPUs, TPUs).
33
+ ✔ You want **higher precision** while saving memory.
34
+ ✔ You plan to **requantize** the model into another format.
35
+
36
+ 📌 **Avoid BF16 if:**
37
+ ❌ Your hardware does **not** support BF16 (it may fall back to FP32 and run slower).
38
+ ❌ You need compatibility with older devices that lack BF16 optimization.
39
+
40
+ ---
41
+
42
+ ### **F16 (Float 16) – More widely supported than BF16**
43
+ - A 16-bit floating-point **high precision** but with less of range of values than BF16.
44
+ - Works on most devices with **FP16 acceleration support** (including many GPUs and some CPUs).
45
+ - Slightly lower numerical precision than BF16 but generally sufficient for inference.
46
+
47
+ 📌 **Use F16 if:**
48
+ ✔ Your hardware supports **FP16** but **not BF16**.
49
+ ✔ You need a **balance between speed, memory usage, and accuracy**.
50
+ ✔ You are running on a **GPU** or another device optimized for FP16 computations.
51
+
52
+ 📌 **Avoid F16 if:**
53
+ ❌ Your device lacks **native FP16 support** (it may run slower than expected).
54
+ ❌ You have memory limitations.
55
+
56
+ ---
57
+
58
+ ### **Quantized Models (Q4_K, Q6_K, Q8, etc.) – For CPU & Low-VRAM Inference**
59
+ Quantization reduces model size and memory usage while maintaining as much accuracy as possible.
60
+ - **Lower-bit models (Q4_K)** → **Best for minimal memory usage**, may have lower precision.
61
+ - **Higher-bit models (Q6_K, Q8_0)** → **Better accuracy**, requires more memory.
62
+
63
+ 📌 **Use Quantized Models if:**
64
+ ✔ You are running inference on a **CPU** and need an optimized model.
65
+ ✔ Your device has **low VRAM** and cannot load full-precision models.
66
+ ✔ You want to reduce **memory footprint** while keeping reasonable accuracy.
67
+
68
+ 📌 **Avoid Quantized Models if:**
69
+ ❌ You need **maximum accuracy** (full-precision models are better for this).
70
+ ❌ Your hardware has enough VRAM for higher-precision formats (BF16/F16).
71
+
72
+ ---
73
+
74
+ ### **Very Low-Bit Quantization (IQ3_XS, IQ3_S, IQ3_M, Q4_K, Q4_0)**
75
+ These models are optimized for **extreme memory efficiency**, making them ideal for **low-power devices** or **large-scale deployments** where memory is a critical constraint.
76
+
77
+ - **IQ3_XS**: Ultra-low-bit quantization (3-bit) with **extreme memory efficiency**.
78
+ - **Use case**: Best for **ultra-low-memory devices** where even Q4_K is too large.
79
+ - **Trade-off**: Lower accuracy compared to higher-bit quantizations.
80
+
81
+ - **IQ3_S**: Small block size for **maximum memory efficiency**.
82
+ - **Use case**: Best for **low-memory devices** where **IQ3_XS** is too aggressive.
83
+
84
+ - **IQ3_M**: Medium block size for better accuracy than **IQ3_S**.
85
+ - **Use case**: Suitable for **low-memory devices** where **IQ3_S** is too limiting.
86
+
87
+ - **Q4_K**: 4-bit quantization with **block-wise optimization** for better accuracy.
88
+ - **Use case**: Best for **low-memory devices** where **Q6_K** is too large.
89
+
90
+ - **Q4_0**: Pure 4-bit quantization, optimized for **ARM devices**.
91
+ - **Use case**: Best for **ARM-based devices** or **low-memory environments**.
92
+
93
+ ---
94
+
95
+ ### **Summary Table: Model Format Selection**
96
+
97
+ | Model Format | Precision | Memory Usage | Device Requirements | Best Use Case |
98
+ |--------------|------------|---------------|----------------------|---------------|
99
+ | **BF16** | Highest | High | BF16-supported GPU/CPUs | High-speed inference with reduced memory |
100
+ | **F16** | High | High | FP16-supported devices | GPU inference when BF16 isn't available |
101
+ | **Q4_K** | Medium Low | Low | CPU or Low-VRAM devices | Best for memory-constrained environments |
102
+ | **Q6_K** | Medium | Moderate | CPU with more memory | Better accuracy while still being quantized |
103
+ | **Q8_0** | High | Moderate | CPU or GPU with enough VRAM | Best accuracy among quantized models |
104
+ | **IQ3_XS** | Very Low | Very Low | Ultra-low-memory devices | Extreme memory efficiency and low accuracy |
105
+ | **Q4_0** | Low | Low | ARM or low-memory devices | llama.cpp can optimize for ARM devices |
106
+
107
+ ---
108
+
109
+ ## **Included Files & Details**
110
+
111
+ ### `RWKV7-Goose-World3-2.9B-HF-bf16.gguf`
112
+ - Model weights preserved in **BF16**.
113
+ - Use this if you want to **requantize** the model into a different format.
114
+ - Best if your device supports **BF16 acceleration**.
115
+
116
+ ### `RWKV7-Goose-World3-2.9B-HF-f16.gguf`
117
+ - Model weights stored in **F16**.
118
+ - Use if your device supports **FP16**, especially if BF16 is not available.
119
+
120
+ ### `RWKV7-Goose-World3-2.9B-HF-bf16-q8_0.gguf`
121
+ - **Output & embeddings** remain in **BF16**.
122
+ - All other layers quantized to **Q8_0**.
123
+ - Use if your device supports **BF16** and you want a quantized version.
124
+
125
+ ### `RWKV7-Goose-World3-2.9B-HF-f16-q8_0.gguf`
126
+ - **Output & embeddings** remain in **F16**.
127
+ - All other layers quantized to **Q8_0**.
128
+
129
+ ### `RWKV7-Goose-World3-2.9B-HF-q4_k.gguf`
130
+ - **Output & embeddings** quantized to **Q8_0**.
131
+ - All other layers quantized to **Q4_K**.
132
+ - Good for **CPU inference** with limited memory.
133
+
134
+ ### `RWKV7-Goose-World3-2.9B-HF-q4_k_s.gguf`
135
+ - Smallest **Q4_K** variant, using less memory at the cost of accuracy.
136
+ - Best for **very low-memory setups**.
137
+
138
+ ### `RWKV7-Goose-World3-2.9B-HF-q6_k.gguf`
139
+ - **Output & embeddings** quantized to **Q8_0**.
140
+ - All other layers quantized to **Q6_K** .
141
+
142
+ ### `RWKV7-Goose-World3-2.9B-HF-q8_0.gguf`
143
+ - Fully **Q8** quantized model for better accuracy.
144
+ - Requires **more memory** but offers higher precision.
145
+
146
+ ### `RWKV7-Goose-World3-2.9B-HF-iq3_xs.gguf`
147
+ - **IQ3_XS** quantization, optimized for **extreme memory efficiency**.
148
+ - Best for **ultra-low-memory devices**.
149
+
150
+ ### `RWKV7-Goose-World3-2.9B-HF-iq3_m.gguf`
151
+ - **IQ3_M** quantization, offering a **medium block size** for better accuracy.
152
+ - Suitable for **low-memory devices**.
153
+
154
+ ### `RWKV7-Goose-World3-2.9B-HF-q4_0.gguf`
155
+ - Pure **Q4_0** quantization, optimized for **ARM devices**.
156
+ - Best for **low-memory environments**.
157
+ - Prefer IQ4_NL for better accuracy.
158
+
159
+ # <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
160
+ ❤ **Please click "Like" if you find this useful!**
161
+ Help me test my **AI-Powered Network Monitor Assistant** with **quantum-ready security checks**:
162
+ 👉 [Quantum Network Monitor](https://readyforquantum.com)
163
+
164
+ 💬 **How to test**:
165
+ 1. Click the **chat icon** (bottom right on any page)
166
+ 2. Choose an **AI assistant type**:
167
+ - `TurboLLM` (GPT-4-mini)
168
+ - `FreeLLM` (Open-source)
169
+ - `TestLLM` (Experimental CPU-only)
170
+
171
+ ### **What I’m Testing**
172
+ I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
173
+ - **Function calling** against live network services
174
+ - **How small can a model go** while still handling:
175
+ - Automated **Nmap scans**
176
+ - **Quantum-readiness checks**
177
+ - **Metasploit integration**
178
+
179
+ 🟡 **TestLLM** – Current experimental model (llama.cpp on 6 CPU threads):
180
+ - ✅ **Zero-configuration setup**
181
+ - ⏳ 30s load time (slow inference but **no API costs**)
182
+ - 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
183
+
184
+ ### **Other Assistants**
185
+ 🟢 **TurboLLM** – Uses **gpt-4-mini** for:
186
+ - **Real-time network diagnostics**
187
+ - **Automated penetration testing** (Nmap/Metasploit)
188
+ - 🔑 Get more tokens by [downloading our Quantum Network Monitor Agent](https://readyforquantum.com/download/?utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
189
+
190
+ 🔵 **HugLLM** – Open-source models (≈8B params):
191
+ - **2x more tokens** than TurboLLM
192
+ - **AI-powered log analysis**
193
+ - 🌐 Runs on Hugging Face Inference API
194
+
195
+ ### 💡 **Example AI Commands to Test**:
196
+ 1. `"Give me info on my websites SSL certificate"`
197
+ 2. `"Check if my server is using quantum safe encyption for communication"`
198
+ 3. `"Run a quick Nmap vulnerability test"`
199
+ 4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a Quantum Network Monitor Agent to run the .net code from. This is a very flexible and powerful feature. Use with caution!
200
+
201
+ ### Final word
202
+ I fund the servers to create the models files, run the Quantum Network Monitor Service and Pay for Inference from Novita and OpenAI all from my own pocket. All of the code for creating the models and the work I have done with Quantum Network Monitor is [open source](https://github.com/Mungert69). Feel free to use what you find useful. Please support my work and consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) .
203
+ This will help me pay for the services and increase the token limits for everyone.
204
+
205
+ Thank you :)
206
+
207
+
208
+
209
+ # rwkv7-2.9B-world
210
+
211
+ <!-- Provide a quick summary of what the model is/does. -->
212
+
213
+ This is RWKV-7 model under flash-linear attention format.
214
+
215
+ ## Model Details
216
+
217
+
218
+ ### Model Description
219
+
220
+ <!-- Provide a longer summary of what this model is. -->
221
+
222
+ - **Developed by:** Bo Peng, Yu Zhang, Songlin Yang, Ruichong Zhang
223
+ - **Funded by:** RWKV Project (Under LF AI & Data Foundation)
224
+ - **Model type:** RWKV7
225
+ - **Language(s) (NLP):** English
226
+ - **License:** Apache-2.0
227
+ - **Parameter count:** 2.9B
228
+ - **Tokenizer:** RWKV World tokenizer
229
+ - **Vocabulary size:** 65,536
230
+
231
+ ### Model Sources
232
+
233
+ <!-- Provide the basic links for the model. -->
234
+
235
+ - **Repository:** https://github.com/fla-org/flash-linear-attention ; https://github.com/BlinkDL/RWKV-LM
236
+ - **Paper:** With in Progress
237
+
238
+ ## Uses
239
+
240
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
241
+ Install `flash-linear-attention` and the latest version of `transformers` before using this model:
242
+
243
+ ```bash
244
+ pip install git+https://github.com/fla-org/flash-linear-attention
245
+ pip install 'transformers>=4.48.0'
246
+ ```
247
+
248
+ ### Direct Use
249
+
250
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
251
+ You can use this model just as any other HuggingFace models:
252
+ ```python
253
+ from transformers import AutoModelForCausalLM, AutoTokenizer
254
+ model = AutoModelForCausalLM.from_pretrained('fla-hub/rwkv7-2.9B-world', trust_remote_code=True)
255
+ tokenizer = AutoTokenizer.from_pretrained('fla-hub/rwkv7-2.9B-world', trust_remote_code=True)
256
+ model = model.cuda()
257
+ prompt = "What is a large language model?"
258
+ messages = [
259
+ {"role": "user", "content": "Who are you?"},
260
+ {"role": "assistant", "content": "I am a GPT-3 based model."},
261
+ {"role": "user", "content": prompt}
262
+ ]
263
+ text = tokenizer.apply_chat_template(
264
+ messages,
265
+ tokenize=False,
266
+ add_generation_prompt=True
267
+ )
268
+
269
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
270
+
271
+ generated_ids = model.generate(
272
+ **model_inputs,
273
+ max_new_tokens=1024,
274
+ )
275
+ generated_ids = [
276
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
277
+ ]
278
+
279
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=False)[0]
280
+ print(response)
281
+ ```
282
+
283
+ ### Training Data
284
+
285
+ This model is trained on the World v3 with a total of 3.119 trillion tokens.
286
+
287
+ #### Training Hyperparameters
288
+
289
+ - **Training regime:** bfloat16, lr 4e-4 to 1e-5 "delayed" cosine decay, wd 0.1 (with increasing batch sizes during the middle)
290
+ - **Final Loss:** 1.8745
291
+ - **Token Count:** 3.119 trillion
292
+
293
+ ## FAQ
294
+ Q: safetensors metadata is none.
295
+
296
+ A: upgrade transformers to >=4.48.0: `pip install 'transformers>=4.48.0'`
RWKV7-Goose-World3-2.9B-HF-bf16-q4_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26961a54710462ff432c1a8f480f97f13e6ff83ee4cb983bd2e09ffa155bc962
3
+ size 2314884576
RWKV7-Goose-World3-2.9B-HF-bf16-q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76578cf72cc393aadf2f0c3db2b04a6ada099b116eb1b8c3532559db84c222c2
3
+ size 2963690976
RWKV7-Goose-World3-2.9B-HF-bf16-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b4b5777d368b93f755aef6ae644fc3745167bf2903cfb7695f3f445284ec85
3
+ size 3573175488
RWKV7-Goose-World3-2.9B-HF-bf16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:725ce1974a9c22fa7a128dac55721ec3aa7931f18c17c328d7a3c5232ebb3357
3
+ size 5932471680
RWKV7-Goose-World3-2.9B-HF-f16-q4_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfcb5b239cf99ac4a590d7163266bbe3bb14f41d221ad6f7a2bf602b610211b0
3
+ size 2314884576
RWKV7-Goose-World3-2.9B-HF-f16-q6_k.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf7a218e8bde564f575e669e6e3082ba74025d79ee17257e990ff2b59a5cf6e0
3
+ size 2963690976
RWKV7-Goose-World3-2.9B-HF-f16-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe0e9dce99135a3cc02ea54cd64234ceaa5002dcea7e2b37c486d2d642aeaa1d
3
+ size 3573175488
RWKV7-Goose-World3-2.9B-HF-iq3_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0228923dc8481a9c2a3025a3ef4cded14edff77eab9e61542eed1fe484a0a4d
3
+ size 1540249248
RWKV7-Goose-World3-2.9B-HF-iq3_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b477a1ac344fe0842908bb0047f41afea6849223fbad505112a1e6089c205595
3
+ size 1540249248
RWKV7-Goose-World3-2.9B-HF-iq3_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df9b151bf1a0375f65a5357666937e0196b773e5f70a9fd6e23b73fc5fead062
3
+ size 1540249248
RWKV7-Goose-World3-2.9B-HF-iq3_xxs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd6f645d6455fb1a9d7e349706de3a395d6285c3dfd8313c2d28f55a67ee04d5
3
+ size 1422284448
RWKV7-Goose-World3-2.9B-HF-iq4_nl.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:534973a7cad9b58a7ab7e542aacb57ab6ce5c54b96654da3be651ca4b6858eb8
3
+ size 1875793376
RWKV7-Goose-World3-2.9B-HF-iq4_xs.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03de5463c716fa915aeb54813e2c8d82cbd2b71f8e4a01036c9b87ee8dd6e4de
3
+ size 1791907296
RWKV7-Goose-World3-2.9B-HF-q3_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:139521a4403e855c9e7fc57d5f37e92f641b1081a2bd329404a828d37db83f88
3
+ size 1540249248
RWKV7-Goose-World3-2.9B-HF-q3_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84d53ac05a35dd998103f2bef94dddb02001a758bdbde6a006f418ff7e61e16f
3
+ size 1540249248
RWKV7-Goose-World3-2.9B-HF-q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d8963e8c60481a4a05bb28fda8e5ab676caef6b760af51a43103d873d6d526f
3
+ size 1832539616
RWKV7-Goose-World3-2.9B-HF-q4_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ec57488be3f6a1d159cf6930d10cfa779dbc619cd73b22ede4a4d4f24e49bdd
3
+ size 2010797536
RWKV7-Goose-World3-2.9B-HF-q4_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a29d349bcf456a166406d2d836c8abd83b574d7b4549799318e919071dd99ae
3
+ size 1919047328
RWKV7-Goose-World3-2.9B-HF-q4_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcba58fcaaf6276f9f19a2878e220cdbf6850fa0bd9ff88e5b89c64dd22c16ac
3
+ size 1919047328
RWKV7-Goose-World3-2.9B-HF-q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd121993abdd98194e78e1e26a0c76342c8f40baa37ce52293d18e6d6ceec2bd
3
+ size 2189055456
RWKV7-Goose-World3-2.9B-HF-q5_1.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93bd3144eda2fbd6aee4fea59bce8bbe2d1e340c0f4d47d1a67bcfbfebd931d
3
+ size 2367313376
RWKV7-Goose-World3-2.9B-HF-q5_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:447435304b02c4cb1939f24632ae1eea733a3957f8855cc12633ace279b7bde1
3
+ size 2233620128
RWKV7-Goose-World3-2.9B-HF-q5_k_s.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68ca31e776112881d7dd59c413490a08a491c5011697be84039c9b726954b9e7
3
+ size 2233620128
RWKV7-Goose-World3-2.9B-HF-q6_k_m.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96659c50fb4b1ec53035556989c8194b3366a2c2f32ae8b0b9a278f760e48437
3
+ size 2567853536
RWKV7-Goose-World3-2.9B-HF-q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:022b3cec12b3e6b7ea0a8592c951a5c2548b74dbe729b265a01c90257ca421a2
3
+ size 3258602688
RWKV7-Goose-World3-2.9B-HF.imatrix ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d921dc804cda2fab4f468b0141d543c83b832505444ae7f5812b19efe4a9841c
3
+ size 4340297