Datasets:
Upload folder using huggingface_hub
Browse files- .gitattributes +3 -59
- .gitignore +18 -0
- ARCHITECTURE.md +300 -0
- DATASET_CARD.md +148 -0
- DATA_INTEGRATION_STRATEGY.md +276 -0
- LICENSE +21 -0
- README.md +192 -0
- USAGE_GUIDE.md +372 -0
- archive/AllCountriesListing.csv +254 -0
- archive/AllLanguageListing.csv +0 -0
- archive/AllPeoplesAcrossCountries.csv +0 -0
- archive/AllPeoplesInCountry.csv +0 -0
- archive/FieldDefinitions.csv +240 -0
- archive/PeopleCtryLangListing.csv +0 -0
- archive/UnreachedPeoplesByCountry.csv +0 -0
- archive/analyze_api_data.py +128 -0
- archive/analyze_data.py +145 -0
- archive/api_data_sample.json +0 -0
- archive/extracted_cppi/README.txt +11 -0
- archive/extracted_cppi/jp-cppi-cross-reference.csv +0 -0
- archive/extracted_cppi/jp-cppi-cross-reference.xlsx +3 -0
- archive/joshua_data_summary.md +28 -0
- archive/jp-cppi-cross-reference-csv.zip +3 -0
- create_enriched_datasets.py +311 -0
- data_utilities.py +256 -0
- dataset_metadata.json +30 -0
- enrich_with_coordinates.py +343 -0
- enriched_metadata.json +30 -0
- enrichment_metadata.json +39 -0
- fetch_all_datasets.py +217 -0
- fetch_full_data.py +65 -0
- joshua_project_countries.json +0 -0
- joshua_project_enriched.parquet +3 -0
- joshua_project_full_dump.json +3 -0
- joshua_project_languages.json +0 -0
- joshua_project_totals.json +192 -0
- joshua_project_unreached.parquet +3 -0
- prepare_souls_viz_data.py +214 -0
- process_joshua_data.py +97 -0
.gitattributes
CHANGED
|
@@ -1,60 +1,4 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
-
# Audio files - uncompressed
|
| 40 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
-
# Audio files - compressed
|
| 44 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
-
# Image files - uncompressed
|
| 50 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
-
# Image files - compressed
|
| 55 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
-
# Video files - compressed
|
| 59 |
-
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
-
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
joshua_project_full_dump.json filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
archive/extracted_cppi/jp-cppi-cross-reference.xlsx filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
archive/jp-cppi-cross-reference-csv.zip filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.Python
|
| 6 |
+
*.so
|
| 7 |
+
venv/
|
| 8 |
+
env/
|
| 9 |
+
ENV/
|
| 10 |
+
.vscode/
|
| 11 |
+
.idea/
|
| 12 |
+
*.swp
|
| 13 |
+
*.swo
|
| 14 |
+
.claude/
|
| 15 |
+
.DS_Store
|
| 16 |
+
Thumbs.db
|
| 17 |
+
*.tmp
|
| 18 |
+
*.bak
|
ARCHITECTURE.md
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Joshua Project Dataset Architecture
|
| 2 |
+
|
| 3 |
+
## Design Philosophy
|
| 4 |
+
|
| 5 |
+
**Normalized datasets as source of truth** + **Enriched datasets for consumption**
|
| 6 |
+
|
| 7 |
+
This hybrid architecture provides the best of both worlds:
|
| 8 |
+
- ✅ Clean, updatable source data (normalized)
|
| 9 |
+
- ✅ Easy-to-use visualizations (enriched JSON)
|
| 10 |
+
- ✅ High-performance analysis (enriched Parquet)
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## Architecture Diagram
|
| 15 |
+
|
| 16 |
+
```
|
| 17 |
+
┌─────────────────────────────────────────────────────────────┐
|
| 18 |
+
│ JOSHUA PROJECT API │
|
| 19 |
+
│ https://api.joshuaproject.net/v1/ │
|
| 20 |
+
└─────────────────────────────────────────────────────────────┘
|
| 21 |
+
│
|
| 22 |
+
│ fetch_all_datasets.py
|
| 23 |
+
│ (quarterly updates)
|
| 24 |
+
▼
|
| 25 |
+
┌─────────────────────────────────────────────────────────────┐
|
| 26 |
+
│ NORMALIZED DATASETS (Source of Truth) │
|
| 27 |
+
├─────────────────────────────────────────────────────────────┤
|
| 28 |
+
│ 📄 joshua_project_full_dump.json 130 MB 16,382 │
|
| 29 |
+
│ 📄 joshua_project_countries.json 286 KB 238 │
|
| 30 |
+
│ 📄 joshua_project_languages.json 4.9 MB 7,134 │
|
| 31 |
+
│ 📄 joshua_project_totals.json 3.1 KB 38 │
|
| 32 |
+
│ 📄 dataset_metadata.json < 1 KB (tracking) │
|
| 33 |
+
└─────────────────────────────────────────────────────────────┘
|
| 34 |
+
│
|
| 35 |
+
│ create_enriched_datasets.py
|
| 36 |
+
│ (run after API updates)
|
| 37 |
+
▼
|
| 38 |
+
┌─────────────────────────────────────────────────────────────┐
|
| 39 |
+
│ ENRICHED DATASETS (For Consumption) │
|
| 40 |
+
├─────────────────────────────────────────────────────────────┤
|
| 41 |
+
│ FULL DATASET (all people groups with embedded data) │
|
| 42 |
+
│ 📄 joshua_project_enriched.json 139 MB 16,382 │
|
| 43 |
+
│ 📦 joshua_project_enriched.parquet 6.2 MB 16,382 │
|
| 44 |
+
│ │
|
| 45 |
+
│ UNREACHED SUBSET (43.5% of data) │
|
| 46 |
+
│ 📄 joshua_project_unreached.json 72 MB 7,124 │
|
| 47 |
+
│ 📦 joshua_project_unreached.parquet 3.8 MB 7,124 │
|
| 48 |
+
│ │
|
| 49 |
+
│ METADATA │
|
| 50 |
+
│ 📄 enriched_metadata.json < 1 KB (stats) │
|
| 51 |
+
└─────────────────────────────────────────────────────────────┘
|
| 52 |
+
│
|
| 53 |
+
├───────────────┬───────────────┐
|
| 54 |
+
▼ ▼ ▼
|
| 55 |
+
┌──────────────┐ ┌──────────────┐ ┌─────────────┐
|
| 56 |
+
│ D3.js / Web │ │ Python │ │ Hugging Face│
|
| 57 |
+
│ Visualizations│ │ Analysis │ │ Upload │
|
| 58 |
+
│ │ │ │ │ │
|
| 59 |
+
│ .json files │ │ .parquet files│ │ .parquet │
|
| 60 |
+
└──────────────┘ └──────────────┘ └─────────────┘
|
| 61 |
+
```
|
| 62 |
+
|
| 63 |
+
---
|
| 64 |
+
|
| 65 |
+
## File Inventory
|
| 66 |
+
|
| 67 |
+
### Source of Truth (Normalized - 135 MB total)
|
| 68 |
+
```
|
| 69 |
+
joshua_project_full_dump.json 130 MB People groups (PGIC)
|
| 70 |
+
joshua_project_countries.json 286 KB Country statistics
|
| 71 |
+
joshua_project_languages.json 4.9 MB Language details
|
| 72 |
+
joshua_project_totals.json 3.1 KB Global summaries
|
| 73 |
+
dataset_metadata.json < 1 KB Fetch tracking
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
**Update workflow**: `python3 fetch_all_datasets.py` (quarterly)
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
|
| 80 |
+
### Enriched for Consumption (222 MB JSON, 10 MB Parquet)
|
| 81 |
+
```
|
| 82 |
+
joshua_project_enriched.json 139 MB Full dataset (browser viz)
|
| 83 |
+
joshua_project_enriched.parquet 6.2 MB Full dataset (analysis)
|
| 84 |
+
joshua_project_unreached.json 72 MB Unreached subset (browser)
|
| 85 |
+
joshua_project_unreached.parquet 3.8 MB Unreached subset (analysis)
|
| 86 |
+
enriched_metadata.json < 1 KB Enrichment stats
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
**Regenerate workflow**: `python3 create_enriched_datasets.py` (after API updates)
|
| 90 |
+
|
| 91 |
+
---
|
| 92 |
+
|
| 93 |
+
### Scripts & Utilities
|
| 94 |
+
```
|
| 95 |
+
fetch_all_datasets.py 6.8 KB Fetch from API
|
| 96 |
+
create_enriched_datasets.py 11 KB Generate enriched versions
|
| 97 |
+
data_utilities.py 7.4 KB Loading helpers
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
---
|
| 101 |
+
|
| 102 |
+
### Documentation
|
| 103 |
+
```
|
| 104 |
+
README.md Updated Complete inventory
|
| 105 |
+
USAGE_GUIDE.md 9.9 KB How to use datasets
|
| 106 |
+
DATA_INTEGRATION_STRATEGY.md 7.4 KB Technical architecture
|
| 107 |
+
DATASET_CARD.md 6.4 KB Hugging Face ready
|
| 108 |
+
ARCHITECTURE.md This file System overview
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
---
|
| 112 |
+
|
| 113 |
+
## Data Flow
|
| 114 |
+
|
| 115 |
+
### 1. Initial Setup (One-time)
|
| 116 |
+
```bash
|
| 117 |
+
# Fetch all normalized datasets from API
|
| 118 |
+
python3 fetch_all_datasets.py
|
| 119 |
+
|
| 120 |
+
# Generate enriched versions
|
| 121 |
+
python3 create_enriched_datasets.py
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
**Result**: 9 dataset files ready for use
|
| 125 |
+
|
| 126 |
+
---
|
| 127 |
+
|
| 128 |
+
### 2. Quarterly Updates
|
| 129 |
+
```bash
|
| 130 |
+
# Step 1: Update source of truth
|
| 131 |
+
python3 fetch_all_datasets.py
|
| 132 |
+
# Downloads: countries, languages, totals (< 5 seconds)
|
| 133 |
+
|
| 134 |
+
# Step 2: Regenerate enriched datasets
|
| 135 |
+
python3 create_enriched_datasets.py
|
| 136 |
+
# Creates: enriched JSON + Parquet (~ 30 seconds)
|
| 137 |
+
|
| 138 |
+
# Step 3: Update Hugging Face (optional)
|
| 139 |
+
cp joshua_project_enriched.parquet huggingface_dataset/
|
| 140 |
+
cd huggingface_dataset && git push
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
---
|
| 144 |
+
|
| 145 |
+
### 3. Usage Patterns
|
| 146 |
+
|
| 147 |
+
**For D3.js/Observable Visualization**:
|
| 148 |
+
```javascript
|
| 149 |
+
// Load enriched JSON - everything embedded, no joins
|
| 150 |
+
d3.json('joshua_project_enriched.json').then(data => {
|
| 151 |
+
// Country and language data already embedded
|
| 152 |
+
const viz = data.filter(d => d.LeastReached === 'Y')
|
| 153 |
+
.map(d => ({
|
| 154 |
+
name: d.PeopNameInCountry,
|
| 155 |
+
country: d.country_data.name,
|
| 156 |
+
language: d.language_data.name,
|
| 157 |
+
population: d.Population
|
| 158 |
+
}));
|
| 159 |
+
});
|
| 160 |
+
```
|
| 161 |
+
|
| 162 |
+
**For Python/pandas Analysis**:
|
| 163 |
+
```python
|
| 164 |
+
# Load Parquet - 95.5% smaller, 20x faster
|
| 165 |
+
import pandas as pd
|
| 166 |
+
df = pd.read_parquet('joshua_project_enriched.parquet')
|
| 167 |
+
|
| 168 |
+
# Query with embedded data
|
| 169 |
+
result = df[df['LeastReached'] == 'Y'].groupby('ROG3').agg({
|
| 170 |
+
'Population': 'sum',
|
| 171 |
+
'PeopleID3': 'count'
|
| 172 |
+
})
|
| 173 |
+
```
|
| 174 |
+
|
| 175 |
+
**For Simple Queries**:
|
| 176 |
+
```python
|
| 177 |
+
# Use helper functions
|
| 178 |
+
from data_utilities import *
|
| 179 |
+
|
| 180 |
+
india = get_by_country('IN') # All groups in India
|
| 181 |
+
unreached = load_unreached() # Just unreached groups
|
| 182 |
+
hindi = get_by_language('hin') # Hindi speakers
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
---
|
| 186 |
+
|
| 187 |
+
## Key Design Decisions
|
| 188 |
+
|
| 189 |
+
### Why Keep Normalized Datasets?
|
| 190 |
+
|
| 191 |
+
✅ **Source of Truth**: Match API structure exactly
|
| 192 |
+
✅ **Clean Updates**: Refresh individual datasets without rebuilding everything
|
| 193 |
+
✅ **Storage Efficient**: No data duplication in source files
|
| 194 |
+
✅ **API Parity**: Easy to validate against source
|
| 195 |
+
|
| 196 |
+
### Why Create Enriched Datasets?
|
| 197 |
+
|
| 198 |
+
✅ **No Joins Needed**: Single file loading for visualizations
|
| 199 |
+
✅ **Browser Friendly**: JSON works directly in browsers
|
| 200 |
+
✅ **Performance**: Parquet is 95.5% smaller and 10-100x faster
|
| 201 |
+
✅ **Simplicity**: Beginners don't need to understand relational joins
|
| 202 |
+
|
| 203 |
+
### Why Both JSON and Parquet?
|
| 204 |
+
|
| 205 |
+
**JSON** (for visualizations):
|
| 206 |
+
- ✅ Works in browsers
|
| 207 |
+
- ✅ Human readable
|
| 208 |
+
- ✅ No dependencies
|
| 209 |
+
- ✅ D3.js native format
|
| 210 |
+
|
| 211 |
+
**Parquet** (for analysis):
|
| 212 |
+
- ✅ 95.5% smaller (6.2 MB vs 139 MB)
|
| 213 |
+
- ✅ Columnar = efficient filtering
|
| 214 |
+
- ✅ Strongly typed
|
| 215 |
+
- ✅ Industry standard (Hugging Face, Databricks, Snowflake)
|
| 216 |
+
|
| 217 |
+
---
|
| 218 |
+
|
| 219 |
+
## Storage Efficiency
|
| 220 |
+
|
| 221 |
+
| Format | Files | Total Size | Compression |
|
| 222 |
+
|--------|-------|------------|-------------|
|
| 223 |
+
| **Normalized (source)** | 4 | 135 MB | Baseline |
|
| 224 |
+
| **Enriched JSON** | 2 | 211 MB | +56% (embedded data) |
|
| 225 |
+
| **Enriched Parquet** | 2 | 10 MB | **-93% vs source!** |
|
| 226 |
+
|
| 227 |
+
**Parquet magic**: Columnar format + compression = 93% space savings
|
| 228 |
+
|
| 229 |
+
---
|
| 230 |
+
|
| 231 |
+
## Maintenance Schedule
|
| 232 |
+
|
| 233 |
+
### Quarterly (Recommended)
|
| 234 |
+
1. Run `fetch_all_datasets.py` to update source data
|
| 235 |
+
2. Run `create_enriched_datasets.py` to regenerate enriched versions
|
| 236 |
+
3. Update Hugging Face repo (if applicable)
|
| 237 |
+
|
| 238 |
+
### As Needed
|
| 239 |
+
- When Joshua Project announces major updates
|
| 240 |
+
- When adding new enriched dataset variants (e.g., by region)
|
| 241 |
+
- When updating documentation
|
| 242 |
+
|
| 243 |
+
### Version Tracking
|
| 244 |
+
- Use `dataset_metadata.json` fetch dates as version identifiers
|
| 245 |
+
- Example: "2025-12-23" = December 23, 2025 snapshot
|
| 246 |
+
|
| 247 |
+
---
|
| 248 |
+
|
| 249 |
+
## Future Enhancements
|
| 250 |
+
|
| 251 |
+
### Potential Additions
|
| 252 |
+
- Regional subsets (by continent/region)
|
| 253 |
+
- Religion-focused datasets
|
| 254 |
+
- Language family aggregations
|
| 255 |
+
- Time-series data (if historical snapshots saved)
|
| 256 |
+
|
| 257 |
+
### When to Add
|
| 258 |
+
- **Regional subsets**: If visualizations focus on specific regions
|
| 259 |
+
- **Specialized views**: Based on actual usage patterns
|
| 260 |
+
- **Historical data**: If tracking changes over time becomes valuable
|
| 261 |
+
|
| 262 |
+
---
|
| 263 |
+
|
| 264 |
+
## Success Metrics
|
| 265 |
+
|
| 266 |
+
✅ **All 4 API datasets** fetched (people_groups, countries, languages, totals)
|
| 267 |
+
✅ **16,382 people groups** with 99.99% country/language coverage
|
| 268 |
+
✅ **Enriched datasets** created in both JSON and Parquet
|
| 269 |
+
✅ **95.5% compression** achieved with Parquet
|
| 270 |
+
✅ **Complete documentation** for all use cases
|
| 271 |
+
✅ **Python utilities** for easy data access
|
| 272 |
+
✅ **Ready for Hugging Face** upload
|
| 273 |
+
|
| 274 |
+
**Total dataset package**: 357 MB (JSON) or 145 MB (with Parquet replacing JSON)
|
| 275 |
+
|
| 276 |
+
---
|
| 277 |
+
|
| 278 |
+
## Questions & Answers
|
| 279 |
+
|
| 280 |
+
**Q: Which format should I use?**
|
| 281 |
+
- Visualization → Enriched JSON
|
| 282 |
+
- Analysis → Enriched Parquet
|
| 283 |
+
- Database → Normalized JSON
|
| 284 |
+
|
| 285 |
+
**Q: How do I update the data?**
|
| 286 |
+
- Run `fetch_all_datasets.py` then `create_enriched_datasets.py`
|
| 287 |
+
|
| 288 |
+
**Q: Can I delete the JSON enriched files?**
|
| 289 |
+
- Yes, if you only need Parquet for analysis (saves 211 MB)
|
| 290 |
+
- Keep them if you need browser-based visualizations
|
| 291 |
+
|
| 292 |
+
**Q: Should I commit large files to git?**
|
| 293 |
+
- **Commit**: Scripts, docs, metadata
|
| 294 |
+
- **Don't commit**: Large dataset files (use Git LFS or .gitignore)
|
| 295 |
+
- **Alternative**: Upload to Hugging Face, reference in README
|
| 296 |
+
|
| 297 |
+
**Q: How do I share this dataset?**
|
| 298 |
+
- Upload Parquet files to Hugging Face
|
| 299 |
+
- Share documentation and data_utilities.py
|
| 300 |
+
- Reference source API for attribution
|
DATASET_CARD.md
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
task_categories:
|
| 4 |
+
- tabular-classification
|
| 5 |
+
- text-classification
|
| 6 |
+
language:
|
| 7 |
+
- en
|
| 8 |
+
tags:
|
| 9 |
+
- demographics
|
| 10 |
+
- linguistics
|
| 11 |
+
- religion
|
| 12 |
+
- geospatial
|
| 13 |
+
- people-groups
|
| 14 |
+
- languages
|
| 15 |
+
- missions
|
| 16 |
+
pretty_name: Joshua Project Global Peoples
|
| 17 |
+
size_categories:
|
| 18 |
+
- 10K<n<100K
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
# Joshua Project Global Peoples Dataset
|
| 22 |
+
|
| 23 |
+
## Dataset Description
|
| 24 |
+
|
| 25 |
+
- **Homepage:** [joshuaproject.net](https://joshuaproject.net)
|
| 26 |
+
- **Repository:** [github.com/lukeslp/joshua-project-data](https://github.com/lukeslp/joshua-project-data)
|
| 27 |
+
- **Point of Contact:** [Luke Steuber](https://lukesteuber.com)
|
| 28 |
+
- **Part of:** [Data Trove](https://dr.eamer.dev/datavis/data_trove/)
|
| 29 |
+
|
| 30 |
+
### Dataset Summary
|
| 31 |
+
|
| 32 |
+
Comprehensive demographic, linguistic, and religious data for people groups worldwide, sourced from the Joshua Project API v1.
|
| 33 |
+
|
| 34 |
+
- **16,382 people groups** across 238 countries
|
| 35 |
+
- **7,134 languages** with Bible translation status
|
| 36 |
+
- **Enriched Parquet files** for fast analysis (95% smaller than JSON)
|
| 37 |
+
- Updated December 2025
|
| 38 |
+
|
| 39 |
+
### Supported Tasks
|
| 40 |
+
|
| 41 |
+
- Geospatial visualization and mapping
|
| 42 |
+
- Demographic analysis and clustering
|
| 43 |
+
- Linguistic diversity research
|
| 44 |
+
- Bible translation gap analysis
|
| 45 |
+
- Cross-cultural studies
|
| 46 |
+
|
| 47 |
+
### Languages
|
| 48 |
+
|
| 49 |
+
Data covers 7,134 languages identified by ISO 639-3 codes.
|
| 50 |
+
|
| 51 |
+
## Dataset Structure
|
| 52 |
+
|
| 53 |
+
### Data Instances
|
| 54 |
+
|
| 55 |
+
Each record in the enriched dataset looks like:
|
| 56 |
+
|
| 57 |
+
```json
|
| 58 |
+
{
|
| 59 |
+
"PeopleID3": 10208,
|
| 60 |
+
"PeopNameInCountry": "Tuareg, Air",
|
| 61 |
+
"Population": 517000,
|
| 62 |
+
"LeastReached": "Y",
|
| 63 |
+
"JPScale": 1,
|
| 64 |
+
"PrimaryReligion": "Islam",
|
| 65 |
+
"country_data": {
|
| 66 |
+
"name": "Niger",
|
| 67 |
+
"percent_christianity": 1.62,
|
| 68 |
+
"total_peoples": 36
|
| 69 |
+
},
|
| 70 |
+
"language_data": {
|
| 71 |
+
"name": "Tamajeq, Tayart",
|
| 72 |
+
"bible_status": 4,
|
| 73 |
+
"has_jesus_film": "N"
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
### Data Fields
|
| 79 |
+
|
| 80 |
+
| Field | Type | Description |
|
| 81 |
+
|-------|------|-------------|
|
| 82 |
+
| `PeopleID3` | int | Unique people-group identifier |
|
| 83 |
+
| `PeopNameInCountry` | str | Name within country context |
|
| 84 |
+
| `ROG3` | str | 3-letter country code |
|
| 85 |
+
| `ROL3` | str | 3-letter language code (ISO 639-3) |
|
| 86 |
+
| `Population` | int | Estimated population |
|
| 87 |
+
| `LeastReached` | str | `Y` / `N` — under 2% evangelical |
|
| 88 |
+
| `JPScale` | int | 1-5 gospel access scale |
|
| 89 |
+
| `PrimaryReligion` | str | Predominant religion |
|
| 90 |
+
| `PercentEvangelical` | float | Evangelical Christian % |
|
| 91 |
+
| `BibleStatus` | int | Translation completeness (0-5) |
|
| 92 |
+
|
| 93 |
+
107 total fields per record. See [FieldDefinitions.csv](https://github.com/lukeslp/joshua-project-data/blob/main/archive/FieldDefinitions.csv) for the complete schema.
|
| 94 |
+
|
| 95 |
+
### Data Splits
|
| 96 |
+
|
| 97 |
+
| Split | Records | Description |
|
| 98 |
+
|-------|---------|-------------|
|
| 99 |
+
| Full (enriched) | 16,382 | All people groups with embedded country/language data |
|
| 100 |
+
| Unreached | 7,124 | Least-reached subset (< 2% evangelical) |
|
| 101 |
+
|
| 102 |
+
## Dataset Creation
|
| 103 |
+
|
| 104 |
+
### Source Data
|
| 105 |
+
|
| 106 |
+
- **Provider:** [Joshua Project](https://joshuaproject.net) via [API v1](https://api.joshuaproject.net/)
|
| 107 |
+
- **API maintainer:** [Missional Digerati](https://missionaldigerati.org)
|
| 108 |
+
- **Collection date:** December 21-23, 2025
|
| 109 |
+
- **Method:** Full API dump via Python fetcher scripts (included in repo)
|
| 110 |
+
- **Recommended refresh:** Quarterly
|
| 111 |
+
|
| 112 |
+
### Considerations for Using the Data
|
| 113 |
+
|
| 114 |
+
**Known biases:**
|
| 115 |
+
- Data is collected with a Christian missions focus — religious categorizations reflect that lens
|
| 116 |
+
- Population figures are estimates, not census data
|
| 117 |
+
- Coverage is more detailed for regions with active missions research
|
| 118 |
+
|
| 119 |
+
**Limitations:**
|
| 120 |
+
- Snapshot from December 2025; populations and percentages change over time
|
| 121 |
+
- Religious categories are simplified; doesn't capture pluralism
|
| 122 |
+
- Some remote groups have sparse information
|
| 123 |
+
|
| 124 |
+
**Ethical use:**
|
| 125 |
+
- Not intended for political targeting or discrimination
|
| 126 |
+
- Population estimates should be cited as approximations
|
| 127 |
+
|
| 128 |
+
## Additional Information
|
| 129 |
+
|
| 130 |
+
### Licensing
|
| 131 |
+
|
| 132 |
+
This packaging is MIT-licensed. The underlying data is provided by Joshua Project for research purposes — see [joshuaproject.net](https://joshuaproject.net) for their terms.
|
| 133 |
+
|
| 134 |
+
### Citation
|
| 135 |
+
|
| 136 |
+
```bibtex
|
| 137 |
+
@dataset{joshua_project_2025,
|
| 138 |
+
title = {Joshua Project Global Peoples Dataset},
|
| 139 |
+
author = {Joshua Project},
|
| 140 |
+
year = {2025},
|
| 141 |
+
url = {https://joshuaproject.net},
|
| 142 |
+
note = {Packaged by Luke Steuber, fetched December 2025 via API v1}
|
| 143 |
+
}
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
### Dataset Card Author
|
| 147 |
+
|
| 148 |
+
[Luke Steuber](https://lukesteuber.com) — [dr.eamer.dev](https://dr.eamer.dev)
|
DATA_INTEGRATION_STRATEGY.md
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Joshua Project Data Integration Strategy
|
| 2 |
+
|
| 3 |
+
## Current State
|
| 4 |
+
|
| 5 |
+
**4 normalized datasets** (database-style structure):
|
| 6 |
+
- `joshua_project_full_dump.json` - 16,382 people groups (130 MB)
|
| 7 |
+
- `joshua_project_countries.json` - 238 countries (286 KB)
|
| 8 |
+
- `joshua_project_languages.json` - 7,134 languages (4.9 MB)
|
| 9 |
+
- `joshua_project_totals.json` - 38 global stats (3.1 KB)
|
| 10 |
+
|
| 11 |
+
**Relationships**: Near-perfect referential integrity
|
| 12 |
+
- All 238 countries in people groups → covered in countries dataset ✅
|
| 13 |
+
- 6,164 languages in people groups → 7,133/7,134 covered (99.99%) ✅
|
| 14 |
+
|
| 15 |
+
## Recommended Approach: Hybrid Architecture
|
| 16 |
+
|
| 17 |
+
### 1. Keep Normalized (Current) ✅
|
| 18 |
+
**Use for**: API-style queries, updates, storage efficiency
|
| 19 |
+
|
| 20 |
+
**Advantages**:
|
| 21 |
+
- Matches source API structure
|
| 22 |
+
- Easy to update individual datasets
|
| 23 |
+
- No data redundancy
|
| 24 |
+
- Clean separation of concerns
|
| 25 |
+
|
| 26 |
+
**Keep as-is**: All 4 JSON files + metadata tracker
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
|
| 30 |
+
### 2. Create Enriched/Denormalized Versions
|
| 31 |
+
|
| 32 |
+
#### A. Full Enriched Dataset
|
| 33 |
+
**File**: `joshua_project_enriched.json` / `.parquet`
|
| 34 |
+
**Purpose**: Complete data for complex visualizations
|
| 35 |
+
|
| 36 |
+
**Structure**: People groups with embedded country + language data
|
| 37 |
+
```json
|
| 38 |
+
{
|
| 39 |
+
"PeopleID3": 10208,
|
| 40 |
+
"PeopNameInCountry": "Tuareg, Air",
|
| 41 |
+
"Population": 517000,
|
| 42 |
+
"LeastReached": "Y",
|
| 43 |
+
"JPScale": 1,
|
| 44 |
+
|
| 45 |
+
// Embedded country data
|
| 46 |
+
"country": {
|
| 47 |
+
"ROG3": "NG",
|
| 48 |
+
"Ctry": "Niger",
|
| 49 |
+
"PercentChristianity": 1.2,
|
| 50 |
+
"CntPeoples": 45,
|
| 51 |
+
"CntPeoplesLR": 38
|
| 52 |
+
},
|
| 53 |
+
|
| 54 |
+
// Embedded language data
|
| 55 |
+
"language": {
|
| 56 |
+
"ROL3": "thz",
|
| 57 |
+
"Language": "Tamajeq, Tayart",
|
| 58 |
+
"BibleStatus": 4,
|
| 59 |
+
"NTYear": "1990-2003",
|
| 60 |
+
"HasJesusFilm": "N"
|
| 61 |
+
}
|
| 62 |
+
}
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
**Size estimate**: ~150-180 MB (adds ~20-50 MB overhead)
|
| 66 |
+
|
| 67 |
+
**Use cases**:
|
| 68 |
+
- D3.js visualizations (maps, networks, charts)
|
| 69 |
+
- Single-file data loading
|
| 70 |
+
- Exploratory analysis
|
| 71 |
+
- Quick prototyping
|
| 72 |
+
|
| 73 |
+
---
|
| 74 |
+
|
| 75 |
+
#### B. Specialized Exports
|
| 76 |
+
|
| 77 |
+
**1. Unreached Peoples Focus**
|
| 78 |
+
- **File**: `joshua_project_unreached.json` / `.parquet`
|
| 79 |
+
- **Filter**: `LeastReached == "Y"` only
|
| 80 |
+
- **Records**: ~7,000 (43% of total)
|
| 81 |
+
- **Use**: Focused visualizations, mission analytics
|
| 82 |
+
|
| 83 |
+
**2. Geographic Clusters**
|
| 84 |
+
- **Files**: `joshua_project_by_region/[region].json`
|
| 85 |
+
- **Split by**: `RegionName` (14 regions)
|
| 86 |
+
- **Use**: Regional dashboards, continent-specific analysis
|
| 87 |
+
|
| 88 |
+
**3. Religion-Focused**
|
| 89 |
+
- **File**: `joshua_project_by_religion.json`
|
| 90 |
+
- **Group by**: `PrimaryReligion`
|
| 91 |
+
- **Use**: Religious demographics, comparative analysis
|
| 92 |
+
|
| 93 |
+
**4. Language Families**
|
| 94 |
+
- **File**: `joshua_project_language_families.json`
|
| 95 |
+
- **Aggregate**: By language with people group arrays
|
| 96 |
+
- **Use**: Bible translation gap analysis
|
| 97 |
+
|
| 98 |
+
---
|
| 99 |
+
|
| 100 |
+
### 3. Hugging Face Dataset Package
|
| 101 |
+
|
| 102 |
+
**Repository structure**:
|
| 103 |
+
```
|
| 104 |
+
joshua-project-dataset/
|
| 105 |
+
├── data/
|
| 106 |
+
│ ├── people_groups.parquet
|
| 107 |
+
│ ├── countries.parquet
|
| 108 |
+
│ ├── languages.parquet
|
| 109 |
+
│ ├── totals.parquet
|
| 110 |
+
│ └── enriched/
|
| 111 |
+
│ ├── full_enriched.parquet
|
| 112 |
+
│ ├── unreached_only.parquet
|
| 113 |
+
│ └── by_region/
|
| 114 |
+
│ ├── africa.parquet
|
| 115 |
+
│ ├── asia.parquet
|
| 116 |
+
│ └── ...
|
| 117 |
+
├── README.md (dataset card)
|
| 118 |
+
├── joshua_project.py (loading script)
|
| 119 |
+
└── metadata.json
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
**Format choice**: **Parquet** (not JSON)
|
| 123 |
+
- ✅ Columnar format: Efficient for analytics
|
| 124 |
+
- ✅ Compressed: 50-70% smaller than JSON
|
| 125 |
+
- ✅ Typed schemas: Better data integrity
|
| 126 |
+
- ✅ Native support: pandas, DuckDB, Polars, Arrow
|
| 127 |
+
- ✅ Hugging Face standard: `datasets` library compatible
|
| 128 |
+
|
| 129 |
+
**Example loading**:
|
| 130 |
+
```python
|
| 131 |
+
from datasets import load_dataset
|
| 132 |
+
|
| 133 |
+
# Load normalized datasets
|
| 134 |
+
ds = load_dataset("your-username/joshua-project")
|
| 135 |
+
people = ds["people_groups"]
|
| 136 |
+
countries = ds["countries"]
|
| 137 |
+
|
| 138 |
+
# Or load enriched version
|
| 139 |
+
enriched = load_dataset("your-username/joshua-project", "enriched")
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
---
|
| 143 |
+
|
| 144 |
+
## Implementation Plan
|
| 145 |
+
|
| 146 |
+
### Phase 1: Build Enrichment Pipeline ✅
|
| 147 |
+
**Script**: `create_enriched_datasets.py`
|
| 148 |
+
|
| 149 |
+
Features:
|
| 150 |
+
- Join people groups + countries + languages
|
| 151 |
+
- Create full enriched version
|
| 152 |
+
- Create specialized subsets (unreached, by region, etc.)
|
| 153 |
+
- Export as both JSON and Parquet
|
| 154 |
+
- Validate data integrity
|
| 155 |
+
- Generate summary statistics
|
| 156 |
+
|
| 157 |
+
### Phase 2: Hugging Face Preparation
|
| 158 |
+
**Script**: `prepare_huggingface_dataset.py`
|
| 159 |
+
|
| 160 |
+
Tasks:
|
| 161 |
+
- Convert all datasets to Parquet
|
| 162 |
+
- Create dataset card (README.md)
|
| 163 |
+
- Generate metadata.json
|
| 164 |
+
- Create loading script
|
| 165 |
+
- Add data fields documentation
|
| 166 |
+
- Include license and citation info
|
| 167 |
+
|
| 168 |
+
### Phase 3: Visualization Utilities
|
| 169 |
+
**Script**: `data_utilities.py`
|
| 170 |
+
|
| 171 |
+
Functions:
|
| 172 |
+
- `load_normalized()` - Load separate datasets
|
| 173 |
+
- `load_enriched()` - Load denormalized version
|
| 174 |
+
- `filter_unreached()` - Get unreached peoples
|
| 175 |
+
- `get_by_country(country_code)` - Country-specific data
|
| 176 |
+
- `get_by_language(language_code)` - Language-specific data
|
| 177 |
+
- `get_by_region(region_name)` - Regional data
|
| 178 |
+
|
| 179 |
+
---
|
| 180 |
+
|
| 181 |
+
## File Size Estimates
|
| 182 |
+
|
| 183 |
+
| Dataset | JSON | Parquet | Use Case |
|
| 184 |
+
|---------|------|---------|----------|
|
| 185 |
+
| People Groups | 130 MB | ~50 MB | Base data |
|
| 186 |
+
| Countries | 286 KB | ~100 KB | Lookups |
|
| 187 |
+
| Languages | 4.9 MB | ~2 MB | Lookups |
|
| 188 |
+
| Totals | 3 KB | ~1 KB | Stats |
|
| 189 |
+
| **Full Enriched** | ~180 MB | ~70 MB | Viz, analysis |
|
| 190 |
+
| **Unreached Only** | ~80 MB | ~30 MB | Focused viz |
|
| 191 |
+
| **By Region (14 files)** | ~10-15 MB each | ~4-6 MB each | Regional dash |
|
| 192 |
+
|
| 193 |
+
---
|
| 194 |
+
|
| 195 |
+
## Recommendations by Use Case
|
| 196 |
+
|
| 197 |
+
### For Visualizations (D3.js, Observable, etc.)
|
| 198 |
+
✅ **Use enriched JSON** - Single file, easy browser loading
|
| 199 |
+
- `joshua_project_enriched.json` for full dataset
|
| 200 |
+
- `joshua_project_unreached.json` for focused view
|
| 201 |
+
- Regional splits for continent-specific maps
|
| 202 |
+
|
| 203 |
+
### For Analysis (Python/R/Julia)
|
| 204 |
+
✅ **Use Parquet files** - Fast loading, efficient storage
|
| 205 |
+
- Load with pandas/polars/DuckDB
|
| 206 |
+
- Columnar operations are 10-100x faster
|
| 207 |
+
- Can load subsets without reading entire file
|
| 208 |
+
|
| 209 |
+
### For Hugging Face Upload
|
| 210 |
+
✅ **Use Parquet** - Platform standard
|
| 211 |
+
- Include both normalized and enriched versions
|
| 212 |
+
- Multiple dataset configs (default, enriched, unreached)
|
| 213 |
+
- Comprehensive dataset card
|
| 214 |
+
|
| 215 |
+
### For Web Apps (Flask/Express APIs)
|
| 216 |
+
✅ **Use normalized JSON** - Easy querying
|
| 217 |
+
- Keep current structure
|
| 218 |
+
- Load into SQLite/Postgres for complex queries
|
| 219 |
+
- Or use DuckDB for zero-setup SQL on Parquet
|
| 220 |
+
|
| 221 |
+
### For Mobile/Embedded
|
| 222 |
+
✅ **Use compressed subsets** - Minimize bandwidth
|
| 223 |
+
- Regional splits
|
| 224 |
+
- Unreached only
|
| 225 |
+
- Pre-filtered by criteria
|
| 226 |
+
|
| 227 |
+
---
|
| 228 |
+
|
| 229 |
+
## Next Steps
|
| 230 |
+
|
| 231 |
+
1. **Run**: `python3 create_enriched_datasets.py`
|
| 232 |
+
- Generates all enriched versions
|
| 233 |
+
- Exports JSON and Parquet formats
|
| 234 |
+
- Creates validation reports
|
| 235 |
+
|
| 236 |
+
2. **Run**: `python3 prepare_huggingface_dataset.py`
|
| 237 |
+
- Prepares complete HF-ready package
|
| 238 |
+
- Generates dataset card
|
| 239 |
+
- Creates upload structure
|
| 240 |
+
|
| 241 |
+
3. **Upload to Hugging Face**:
|
| 242 |
+
```bash
|
| 243 |
+
huggingface-cli login
|
| 244 |
+
huggingface-cli repo create joshua-project --type dataset
|
| 245 |
+
cd huggingface_dataset/
|
| 246 |
+
git add . && git commit -m "Initial commit"
|
| 247 |
+
git push
|
| 248 |
+
```
|
| 249 |
+
|
| 250 |
+
4. **Update visualizations**:
|
| 251 |
+
- Use enriched JSON for browser-based viz
|
| 252 |
+
- Reference HF dataset in documentation
|
| 253 |
+
- Add loading examples to README
|
| 254 |
+
|
| 255 |
+
---
|
| 256 |
+
|
| 257 |
+
## Maintenance Strategy
|
| 258 |
+
|
| 259 |
+
**When to update**:
|
| 260 |
+
- Quarterly: Refresh from API to get latest population estimates
|
| 261 |
+
- On-demand: When Joshua Project announces major updates
|
| 262 |
+
|
| 263 |
+
**Update workflow**:
|
| 264 |
+
```bash
|
| 265 |
+
# 1. Fetch latest normalized data
|
| 266 |
+
python3 fetch_all_datasets.py
|
| 267 |
+
|
| 268 |
+
# 2. Regenerate enriched versions
|
| 269 |
+
python3 create_enriched_datasets.py
|
| 270 |
+
|
| 271 |
+
# 3. Update Hugging Face
|
| 272 |
+
python3 prepare_huggingface_dataset.py
|
| 273 |
+
cd huggingface_dataset && git push
|
| 274 |
+
```
|
| 275 |
+
|
| 276 |
+
**Version tracking**: Use `dataset_metadata.json` fetch dates as version identifiers
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Luke Steuber
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Joshua Project Global Peoples Dataset
|
| 2 |
+
|
| 3 |
+
[](LICENSE)
|
| 4 |
+
[](https://joshuaproject.net)
|
| 5 |
+
[](https://huggingface.co/datasets/lukeslp/joshua-project-peoples)
|
| 6 |
+
[](https://www.kaggle.com/datasets/lukeslp/joshua-project-global-peoples)
|
| 7 |
+
|
| 8 |
+
Comprehensive demographic, linguistic, and religious data for **16,382 people groups** across **238 countries** and **7,134 languages**, fetched directly from the [Joshua Project API](https://api.joshuaproject.net/).
|
| 9 |
+
|
| 10 |
+
Part of the [Data Trove](https://dr.eamer.dev/datavis/data_trove/) collection at [dr.eamer.dev](https://dr.eamer.dev).
|
| 11 |
+
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
## What's Inside
|
| 15 |
+
|
| 16 |
+
| File | Records | Size | Format |
|
| 17 |
+
|------|---------|------|--------|
|
| 18 |
+
| `joshua_project_full_dump.json` | 16,382 people groups | 130 MB | JSON (LFS) |
|
| 19 |
+
| `joshua_project_countries.json` | 238 countries | 286 KB | JSON |
|
| 20 |
+
| `joshua_project_languages.json` | 7,134 languages | 4.9 MB | JSON |
|
| 21 |
+
| `joshua_project_totals.json` | 38 global stats | 3 KB | JSON |
|
| 22 |
+
| `joshua_project_enriched.parquet` | 16,382 (denormalized) | 6.2 MB | Parquet (LFS) |
|
| 23 |
+
| `joshua_project_unreached.parquet` | 7,124 unreached | 3.8 MB | Parquet (LFS) |
|
| 24 |
+
|
| 25 |
+
**Enriched** variants embed country and language data directly into each people-group record -- no joins required.
|
| 26 |
+
|
| 27 |
+
**Parquet** variants are 95% smaller than their JSON equivalents and load 10-100x faster in pandas.
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
## Quick Start
|
| 32 |
+
|
| 33 |
+
### Python / pandas
|
| 34 |
+
|
| 35 |
+
```python
|
| 36 |
+
import pandas as pd
|
| 37 |
+
|
| 38 |
+
# Load the enriched dataset (recommended)
|
| 39 |
+
df = pd.read_parquet("joshua_project_enriched.parquet")
|
| 40 |
+
|
| 41 |
+
# Unreached people groups in South Asia
|
| 42 |
+
unreached_sa = df[(df["LeastReached"] == "Y") & (df["ROG3Continent"] == "Asia")]
|
| 43 |
+
print(f"{len(unreached_sa):,} unreached groups in Asia")
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
### D3.js / JavaScript
|
| 47 |
+
|
| 48 |
+
```javascript
|
| 49 |
+
const data = await d3.json("joshua_project_enriched.json");
|
| 50 |
+
|
| 51 |
+
// Top 10 unreached by population
|
| 52 |
+
const top = data
|
| 53 |
+
.filter(d => d.LeastReached === "Y")
|
| 54 |
+
.sort((a, b) => b.Population - a.Population)
|
| 55 |
+
.slice(0, 10);
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
### Command Line
|
| 59 |
+
|
| 60 |
+
```bash
|
| 61 |
+
# Refresh all datasets from the API
|
| 62 |
+
export JOSHUA_PROJECT_API_KEY="your_key_here"
|
| 63 |
+
python3 fetch_all_datasets.py
|
| 64 |
+
|
| 65 |
+
# Regenerate enriched + parquet files
|
| 66 |
+
python3 create_enriched_datasets.py
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
Get an API key free at [joshuaproject.net/api](https://joshuaproject.net/api).
|
| 70 |
+
|
| 71 |
+
---
|
| 72 |
+
|
| 73 |
+
## Dataset Relationships
|
| 74 |
+
|
| 75 |
+
```
|
| 76 |
+
People Groups ──┬── ROG3 ──▶ Countries
|
| 77 |
+
└── ROL3 ──▶ Languages
|
| 78 |
+
|
| 79 |
+
Totals = global aggregates across all people groups
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
- **`ROG3`** — 3-letter country code (e.g., `IN` = India)
|
| 83 |
+
- **`ROL3`** — 3-letter language code, ISO 639-3 (e.g., `hin` = Hindi)
|
| 84 |
+
- **`PeopleID3`** — unique people-group identifier
|
| 85 |
+
|
| 86 |
+
---
|
| 87 |
+
|
| 88 |
+
## Key Fields
|
| 89 |
+
|
| 90 |
+
| Field | Description |
|
| 91 |
+
|-------|-------------|
|
| 92 |
+
| `PeopNameInCountry` | People group name within a specific country |
|
| 93 |
+
| `Population` | Estimated population |
|
| 94 |
+
| `PrimaryReligion` | Predominant religion |
|
| 95 |
+
| `LeastReached` | `Y` if < 2% evangelical, < 5% Christian adherents |
|
| 96 |
+
| `JPScale` | 1-5 scale of gospel access (1 = least reached) |
|
| 97 |
+
| `BibleStatus` | Bible translation completeness (0-5) |
|
| 98 |
+
| `PercentEvangelical` | Evangelical Christian percentage |
|
| 99 |
+
|
| 100 |
+
Full field definitions: [`archive/FieldDefinitions.csv`](archive/FieldDefinitions.csv)
|
| 101 |
+
|
| 102 |
+
---
|
| 103 |
+
|
| 104 |
+
## Refreshing the Data
|
| 105 |
+
|
| 106 |
+
The Joshua Project updates their data regularly. To pull the latest:
|
| 107 |
+
|
| 108 |
+
```bash
|
| 109 |
+
# 1. Set your API key
|
| 110 |
+
export JOSHUA_PROJECT_API_KEY="your_key_here"
|
| 111 |
+
|
| 112 |
+
# 2. Fetch normalized datasets (~5 seconds)
|
| 113 |
+
python3 fetch_all_datasets.py
|
| 114 |
+
|
| 115 |
+
# 3. Fetch full people groups dump (~30 seconds)
|
| 116 |
+
python3 fetch_full_data.py
|
| 117 |
+
|
| 118 |
+
# 4. Regenerate enriched datasets (~30 seconds)
|
| 119 |
+
python3 create_enriched_datasets.py
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
I recommend refreshing quarterly.
|
| 123 |
+
|
| 124 |
+
---
|
| 125 |
+
|
| 126 |
+
## Project Structure
|
| 127 |
+
|
| 128 |
+
```
|
| 129 |
+
├── joshua_project_full_dump.json # 16,382 people groups (source of truth)
|
| 130 |
+
├── joshua_project_countries.json # 238 countries
|
| 131 |
+
├── joshua_project_languages.json # 7,134 languages
|
| 132 |
+
├── joshua_project_totals.json # 38 global summary stats
|
| 133 |
+
├── joshua_project_enriched.parquet # Denormalized, analysis-ready
|
| 134 |
+
├── joshua_project_unreached.parquet # Unreached subset only
|
| 135 |
+
│
|
| 136 |
+
├── fetch_all_datasets.py # Fetch countries/languages/totals
|
| 137 |
+
├── fetch_full_data.py # Fetch full people groups dump
|
| 138 |
+
├── create_enriched_datasets.py # Generate enriched + parquet
|
| 139 |
+
├── data_utilities.py # Python loading helpers
|
| 140 |
+
│
|
| 141 |
+
├── ARCHITECTURE.md # System design overview
|
| 142 |
+
├── DATASET_CARD.md # HuggingFace dataset card
|
| 143 |
+
├── USAGE_GUIDE.md # Detailed usage examples
|
| 144 |
+
├── LICENSE # MIT
|
| 145 |
+
└── archive/ # Legacy CSVs (2016 era)
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
---
|
| 149 |
+
|
| 150 |
+
## Documentation
|
| 151 |
+
|
| 152 |
+
| Doc | Purpose |
|
| 153 |
+
|-----|---------|
|
| 154 |
+
| [ARCHITECTURE.md](ARCHITECTURE.md) | Normalized vs. enriched design, data flow diagrams |
|
| 155 |
+
| [DATASET_CARD.md](DATASET_CARD.md) | HuggingFace-format dataset card with bias/limitations |
|
| 156 |
+
| [USAGE_GUIDE.md](USAGE_GUIDE.md) | Detailed Python, D3.js, and R usage examples |
|
| 157 |
+
| [DATA_INTEGRATION_STRATEGY.md](DATA_INTEGRATION_STRATEGY.md) | Technical integration and enrichment strategy |
|
| 158 |
+
|
| 159 |
+
---
|
| 160 |
+
|
| 161 |
+
## Data Source & Attribution
|
| 162 |
+
|
| 163 |
+
All data originates from the [Joshua Project](https://joshuaproject.net), a research initiative tracking people groups worldwide. The API is maintained by [Missional Digerati](https://missionaldigerati.org).
|
| 164 |
+
|
| 165 |
+
If you use this dataset, please cite:
|
| 166 |
+
|
| 167 |
+
```bibtex
|
| 168 |
+
@dataset{joshua_project_2025,
|
| 169 |
+
title = {Joshua Project Global Peoples Dataset},
|
| 170 |
+
author = {Joshua Project},
|
| 171 |
+
year = {2025},
|
| 172 |
+
url = {https://joshuaproject.net},
|
| 173 |
+
note = {Packaged by Luke Steuber, fetched December 2025 via API v1}
|
| 174 |
+
}
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
---
|
| 178 |
+
|
| 179 |
+
## Related
|
| 180 |
+
|
| 181 |
+
- [Data Trove](https://dr.eamer.dev/datavis/data_trove/) — full dataset catalog
|
| 182 |
+
- [lukesteuber.com](https://lukesteuber.com) — portfolio
|
| 183 |
+
- [HuggingFace Dataset](https://huggingface.co/datasets/lukeslp/joshua-project-peoples)
|
| 184 |
+
- [Kaggle Dataset](https://www.kaggle.com/datasets/lukeslp/joshua-project-global-peoples)
|
| 185 |
+
|
| 186 |
+
---
|
| 187 |
+
|
| 188 |
+
## License
|
| 189 |
+
|
| 190 |
+
MIT. See [LICENSE](LICENSE).
|
| 191 |
+
|
| 192 |
+
The underlying data is provided by Joshua Project for research purposes. Check [joshuaproject.net](https://joshuaproject.net) for their terms of use.
|
USAGE_GUIDE.md
ADDED
|
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Joshua Project Dataset Usage Guide
|
| 2 |
+
|
| 3 |
+
## 🎯 Quick Start
|
| 4 |
+
|
| 5 |
+
You now have **9 dataset files** ready for visualization and analysis:
|
| 6 |
+
|
| 7 |
+
### Normalized Datasets (For relational queries)
|
| 8 |
+
```
|
| 9 |
+
joshua_project_full_dump.json 130 MB 16,382 people groups
|
| 10 |
+
joshua_project_countries.json 286 KB 238 countries
|
| 11 |
+
joshua_project_languages.json 4.9 MB 7,134 languages
|
| 12 |
+
joshua_project_totals.json 3.1 KB 38 global stats
|
| 13 |
+
```
|
| 14 |
+
|
| 15 |
+
### Enriched Datasets (For visualization - **recommended**)
|
| 16 |
+
```
|
| 17 |
+
joshua_project_enriched.json 139 MB 16,382 people groups (with embedded country/language data)
|
| 18 |
+
joshua_project_enriched.parquet 6.2 MB ↑ Same data, 95.5% smaller ↑
|
| 19 |
+
joshua_project_unreached.json 72 MB 7,124 unreached peoples only
|
| 20 |
+
joshua_project_unreached.parquet 3.8 MB ↑ Same data, compressed ↑
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
---
|
| 24 |
+
|
| 25 |
+
## 📊 Which Format Should I Use?
|
| 26 |
+
|
| 27 |
+
### For D3.js / Observable / Browser Visualizations
|
| 28 |
+
✅ **Use JSON enriched datasets**
|
| 29 |
+
|
| 30 |
+
```javascript
|
| 31 |
+
// Load full enriched data
|
| 32 |
+
d3.json('joshua_project_enriched.json').then(data => {
|
| 33 |
+
// All country and language info embedded - no joins needed!
|
| 34 |
+
const unreached = data.filter(d => d.LeastReached === 'Y');
|
| 35 |
+
|
| 36 |
+
// Create visualization
|
| 37 |
+
svg.selectAll('circle')
|
| 38 |
+
.data(unreached)
|
| 39 |
+
.enter().append('circle')
|
| 40 |
+
.attr('cx', d => projection([d.Longitude, d.Latitude])[0])
|
| 41 |
+
.attr('r', d => Math.sqrt(d.Population) / 100)
|
| 42 |
+
.attr('fill', d => d.country_data.continent === 'Asia' ? 'red' : 'blue');
|
| 43 |
+
});
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
**Why?**
|
| 47 |
+
- ✅ Single file load - no joins needed
|
| 48 |
+
- ✅ All related data embedded (country info, language info)
|
| 49 |
+
- ✅ Works directly in browsers
|
| 50 |
+
- ✅ No dependencies
|
| 51 |
+
|
| 52 |
+
**Size optimization**: Use `joshua_project_unreached.json` (72 MB) if focusing on unreached peoples only.
|
| 53 |
+
|
| 54 |
+
---
|
| 55 |
+
|
| 56 |
+
### For Python Analysis (pandas, polars, etc.)
|
| 57 |
+
✅ **Use Parquet files**
|
| 58 |
+
|
| 59 |
+
```python
|
| 60 |
+
import pandas as pd
|
| 61 |
+
|
| 62 |
+
# Load enriched data (6.2 MB - much faster than 139 MB JSON!)
|
| 63 |
+
df = pd.read_parquet('joshua_project_enriched.parquet')
|
| 64 |
+
|
| 65 |
+
# Query unreached Hindi speakers in India
|
| 66 |
+
unreached_hindi = df[
|
| 67 |
+
(df['ROG3'] == 'IN') &
|
| 68 |
+
(df['ROL3'] == 'hin') &
|
| 69 |
+
(df['LeastReached'] == 'Y')
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
print(f"Found {len(unreached_hindi)} unreached Hindi-speaking groups")
|
| 73 |
+
print(f"Total population: {unreached_hindi['Population'].sum():,}")
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
**Why?**
|
| 77 |
+
- ✅ 95.5% smaller than JSON (6.2 MB vs 139 MB)
|
| 78 |
+
- ✅ 10-100x faster to load
|
| 79 |
+
- ✅ Columnar format = efficient filtering
|
| 80 |
+
- ✅ Strongly typed - no parsing errors
|
| 81 |
+
|
| 82 |
+
---
|
| 83 |
+
|
| 84 |
+
### For Data Utilities (Easy Queries)
|
| 85 |
+
✅ **Use `data_utilities.py`**
|
| 86 |
+
|
| 87 |
+
```python
|
| 88 |
+
from data_utilities import *
|
| 89 |
+
|
| 90 |
+
# Get all people groups in a country
|
| 91 |
+
india = get_by_country('IN')
|
| 92 |
+
print(f"India has {len(india):,} people groups")
|
| 93 |
+
|
| 94 |
+
# Get Hindi speakers
|
| 95 |
+
hindi_speakers = get_by_language('hin')
|
| 96 |
+
|
| 97 |
+
# Get unreached only
|
| 98 |
+
unreached = load_unreached()
|
| 99 |
+
|
| 100 |
+
# Get country details
|
| 101 |
+
india_info = get_country_info('IN')
|
| 102 |
+
print(f"India: {india_info['PercentEvangelical']:.2f}% evangelical")
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
**Why?**
|
| 106 |
+
- ✅ Simple functions for common queries
|
| 107 |
+
- ✅ No need to remember file names
|
| 108 |
+
- ✅ Automatic loading and caching
|
| 109 |
+
- ✅ Works with both JSON and Parquet
|
| 110 |
+
|
| 111 |
+
---
|
| 112 |
+
|
| 113 |
+
## 🗺️ Visualization Examples
|
| 114 |
+
|
| 115 |
+
### Example 1: World Map of Unreached Peoples
|
| 116 |
+
```javascript
|
| 117 |
+
// Load enriched unreached data
|
| 118 |
+
d3.json('joshua_project_unreached.json').then(peoples => {
|
| 119 |
+
// Group by country
|
| 120 |
+
const byCountry = d3.rollup(
|
| 121 |
+
peoples,
|
| 122 |
+
v => ({
|
| 123 |
+
count: v.length,
|
| 124 |
+
population: d3.sum(v, d => d.Population)
|
| 125 |
+
}),
|
| 126 |
+
d => d.ROG3
|
| 127 |
+
);
|
| 128 |
+
|
| 129 |
+
// Color countries by unreached population
|
| 130 |
+
svg.selectAll('.country')
|
| 131 |
+
.data(countries)
|
| 132 |
+
.attr('fill', d => {
|
| 133 |
+
const data = byCountry.get(d.properties.iso_a3);
|
| 134 |
+
return data ? populationScale(data.population) : '#eee';
|
| 135 |
+
});
|
| 136 |
+
});
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
### Example 2: Language Family Tree
|
| 140 |
+
```python
|
| 141 |
+
import pandas as pd
|
| 142 |
+
import plotly.express as px
|
| 143 |
+
|
| 144 |
+
# Load enriched data
|
| 145 |
+
df = pd.read_parquet('joshua_project_enriched.parquet')
|
| 146 |
+
|
| 147 |
+
# Group by language, sum populations
|
| 148 |
+
lang_pop = df.groupby('PrimaryLanguageName')['Population'].sum().reset_index()
|
| 149 |
+
lang_pop = lang_pop.sort_values('Population', ascending=False).head(20)
|
| 150 |
+
|
| 151 |
+
# Create treemap
|
| 152 |
+
fig = px.treemap(
|
| 153 |
+
lang_pop,
|
| 154 |
+
path=['PrimaryLanguageName'],
|
| 155 |
+
values='Population',
|
| 156 |
+
title='Top 20 Languages by People Group Population'
|
| 157 |
+
)
|
| 158 |
+
fig.show()
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
### Example 3: Religion Distribution by Continent
|
| 162 |
+
```python
|
| 163 |
+
import pandas as pd
|
| 164 |
+
import plotly.express as px
|
| 165 |
+
|
| 166 |
+
df = pd.read_parquet('joshua_project_enriched.parquet')
|
| 167 |
+
|
| 168 |
+
# Extract continent from embedded country_data
|
| 169 |
+
df['continent'] = df['country_data'].apply(lambda x: x.get('continent', 'Unknown') if x else 'Unknown')
|
| 170 |
+
|
| 171 |
+
# Count people groups by religion and continent
|
| 172 |
+
religion_by_continent = df.groupby(['continent', 'PrimaryReligion']).size().reset_index(name='count')
|
| 173 |
+
|
| 174 |
+
# Sunburst chart
|
| 175 |
+
fig = px.sunburst(
|
| 176 |
+
religion_by_continent,
|
| 177 |
+
path=['continent', 'PrimaryReligion'],
|
| 178 |
+
values='count',
|
| 179 |
+
title='People Groups by Continent and Religion'
|
| 180 |
+
)
|
| 181 |
+
fig.show()
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
---
|
| 185 |
+
|
| 186 |
+
## 📦 Uploading to Hugging Face
|
| 187 |
+
|
| 188 |
+
### Step 1: Install Hugging Face CLI
|
| 189 |
+
```bash
|
| 190 |
+
pip install huggingface_hub
|
| 191 |
+
huggingface-cli login
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
### Step 2: Create Dataset Repository
|
| 195 |
+
```bash
|
| 196 |
+
huggingface-cli repo create joshua-project --type dataset
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
### Step 3: Prepare Files
|
| 200 |
+
```bash
|
| 201 |
+
mkdir huggingface_dataset
|
| 202 |
+
cd huggingface_dataset
|
| 203 |
+
|
| 204 |
+
# Copy Parquet files (recommended format for HF)
|
| 205 |
+
cp ../joshua_project_enriched.parquet ./data.parquet
|
| 206 |
+
cp ../joshua_project_unreached.parquet ./unreached.parquet
|
| 207 |
+
cp ../joshua_project_countries.json ./countries.json
|
| 208 |
+
cp ../joshua_project_languages.json ./languages.json
|
| 209 |
+
|
| 210 |
+
# Copy dataset card
|
| 211 |
+
cp ../DATASET_CARD.md ./README.md
|
| 212 |
+
|
| 213 |
+
# Create loading script (optional but recommended)
|
| 214 |
+
cat > joshua_project.py << 'EOF'
|
| 215 |
+
import datasets
|
| 216 |
+
|
| 217 |
+
_DESCRIPTION = "Joshua Project global peoples dataset"
|
| 218 |
+
_URLS = {
|
| 219 |
+
"enriched": "data.parquet",
|
| 220 |
+
"unreached": "unreached.parquet",
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
class JoshuaProject(datasets.GeneratorBasedBuilder):
|
| 224 |
+
def _info(self):
|
| 225 |
+
return datasets.DatasetInfo(description=_DESCRIPTION)
|
| 226 |
+
|
| 227 |
+
def _split_generators(self, dl_manager):
|
| 228 |
+
urls = _URLS
|
| 229 |
+
data_dir = dl_manager.download_and_extract(urls)
|
| 230 |
+
return [
|
| 231 |
+
datasets.SplitGenerator(
|
| 232 |
+
name="enriched",
|
| 233 |
+
gen_kwargs={"filepath": data_dir["enriched"]},
|
| 234 |
+
),
|
| 235 |
+
]
|
| 236 |
+
EOF
|
| 237 |
+
```
|
| 238 |
+
|
| 239 |
+
### Step 4: Upload
|
| 240 |
+
```bash
|
| 241 |
+
git add .
|
| 242 |
+
git commit -m "Initial commit: Joshua Project dataset"
|
| 243 |
+
git push
|
| 244 |
+
```
|
| 245 |
+
|
| 246 |
+
### Step 5: Use from Hugging Face
|
| 247 |
+
```python
|
| 248 |
+
from datasets import load_dataset
|
| 249 |
+
|
| 250 |
+
# Load from your HF repo
|
| 251 |
+
ds = load_dataset("your-username/joshua-project", "enriched")
|
| 252 |
+
|
| 253 |
+
# Convert to pandas
|
| 254 |
+
import pandas as pd
|
| 255 |
+
df = ds['enriched'].to_pandas()
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
---
|
| 259 |
+
|
| 260 |
+
## 🔄 Updating the Data
|
| 261 |
+
|
| 262 |
+
### Refresh All Datasets (Quarterly Recommended)
|
| 263 |
+
```bash
|
| 264 |
+
# 1. Fetch latest from API
|
| 265 |
+
python3 fetch_all_datasets.py
|
| 266 |
+
|
| 267 |
+
# 2. Regenerate enriched versions
|
| 268 |
+
python3 create_enriched_datasets.py
|
| 269 |
+
|
| 270 |
+
# 3. Push to Hugging Face (if applicable)
|
| 271 |
+
cd huggingface_dataset
|
| 272 |
+
cp ../joshua_project_enriched.parquet ./data.parquet
|
| 273 |
+
git add . && git commit -m "Update: $(date +%Y-%m-%d)" && git push
|
| 274 |
+
```
|
| 275 |
+
|
| 276 |
+
---
|
| 277 |
+
|
| 278 |
+
## 📚 Data Structure Reference
|
| 279 |
+
|
| 280 |
+
### Enriched Record Structure
|
| 281 |
+
```json
|
| 282 |
+
{
|
| 283 |
+
// Original people group fields (107 fields)
|
| 284 |
+
"PeopleID3": 10208,
|
| 285 |
+
"PeopNameInCountry": "Tuareg, Air",
|
| 286 |
+
"Population": 517000,
|
| 287 |
+
"LeastReached": "Y",
|
| 288 |
+
"JPScale": 1,
|
| 289 |
+
"PrimaryReligion": "Islam",
|
| 290 |
+
"ROG3": "NG",
|
| 291 |
+
"ROL3": "thz",
|
| 292 |
+
|
| 293 |
+
// Embedded country data (9 fields)
|
| 294 |
+
"country_data": {
|
| 295 |
+
"name": "Niger",
|
| 296 |
+
"continent": null,
|
| 297 |
+
"region": "Africa, West and Central",
|
| 298 |
+
"percent_christianity": 1.62,
|
| 299 |
+
"percent_evangelical": 1.02,
|
| 300 |
+
"total_peoples": 36,
|
| 301 |
+
"unreached_peoples": 30,
|
| 302 |
+
"jp_scale": 1
|
| 303 |
+
},
|
| 304 |
+
|
| 305 |
+
// Embedded language data (9 fields)
|
| 306 |
+
"language_data": {
|
| 307 |
+
"name": "Tamajeq, Tayart",
|
| 308 |
+
"hub_country": "Niger",
|
| 309 |
+
"bible_status": 4,
|
| 310 |
+
"bible_year": null,
|
| 311 |
+
"nt_year": "1990-2003",
|
| 312 |
+
"portions_year": "1934-1998",
|
| 313 |
+
"has_jesus_film": "N",
|
| 314 |
+
"has_audio_recordings": "Y",
|
| 315 |
+
"status": "L"
|
| 316 |
+
}
|
| 317 |
+
}
|
| 318 |
+
```
|
| 319 |
+
|
| 320 |
+
### Key Fields Explained
|
| 321 |
+
|
| 322 |
+
| Field | Description | Values |
|
| 323 |
+
|-------|-------------|--------|
|
| 324 |
+
| `LeastReached` | Unreached status | "Y" or "N" |
|
| 325 |
+
| `JPScale` | Gospel access scale | 1 (least) to 5 (most) |
|
| 326 |
+
| `BibleStatus` | Bible translation | 0 (none) to 5 (complete) |
|
| 327 |
+
| `PrimaryReligion` | Predominant religion | "Islam", "Buddhism", "Hinduism", etc. |
|
| 328 |
+
| `Population` | Estimated population | Integer |
|
| 329 |
+
| `PercentEvangelical` | % evangelical Christian | 0.0 to 100.0 |
|
| 330 |
+
|
| 331 |
+
---
|
| 332 |
+
|
| 333 |
+
## 🎨 Recommended Visualizations
|
| 334 |
+
|
| 335 |
+
1. **Choropleth Map**: Countries colored by % unreached peoples
|
| 336 |
+
2. **Bubble Map**: Unreached populations as circles on world map
|
| 337 |
+
3. **Treemap**: Languages by population, colored by Bible translation status
|
| 338 |
+
4. **Sankey Diagram**: Flow from continent → religion → reached status
|
| 339 |
+
5. **Bar Chart**: Top 20 unreached people groups by population
|
| 340 |
+
6. **Network Graph**: Language families and their people groups
|
| 341 |
+
7. **Timeline**: Bible translation progress over time
|
| 342 |
+
8. **Heatmap**: JP Scale by country and religion
|
| 343 |
+
|
| 344 |
+
---
|
| 345 |
+
|
| 346 |
+
## 💡 Tips & Best Practices
|
| 347 |
+
|
| 348 |
+
### Performance
|
| 349 |
+
- ✅ Use Parquet for analysis (95.5% smaller, 10-100x faster)
|
| 350 |
+
- ✅ Use unreached subset when possible (43.5% of data)
|
| 351 |
+
- ✅ Filter by region/continent to reduce data for regional visualizations
|
| 352 |
+
|
| 353 |
+
### Data Quality
|
| 354 |
+
- ⚠️ Population figures are estimates, not exact
|
| 355 |
+
- ⚠️ Some people groups have incomplete language/country data
|
| 356 |
+
- ⚠️ Religious percentages are approximations based on research
|
| 357 |
+
|
| 358 |
+
### Refreshing Data
|
| 359 |
+
- 🔄 Update quarterly to get latest population estimates
|
| 360 |
+
- 🔄 Check Joshua Project announcements for major data updates
|
| 361 |
+
- 🔄 Version your datasets using fetch dates from `dataset_metadata.json`
|
| 362 |
+
|
| 363 |
+
---
|
| 364 |
+
|
| 365 |
+
## 📖 Further Reading
|
| 366 |
+
|
| 367 |
+
- **Strategy Document**: `DATA_INTEGRATION_STRATEGY.md` - Detailed integration architecture
|
| 368 |
+
- **Dataset Card**: `DATASET_CARD.md` - Hugging Face-ready documentation
|
| 369 |
+
- **Main README**: `README.md` - Complete dataset inventory
|
| 370 |
+
- **Metadata**: `dataset_metadata.json` - Fetch dates and record counts
|
| 371 |
+
- **Joshua Project API**: https://api.joshuaproject.net/
|
| 372 |
+
- **Joshua Project Website**: https://joshuaproject.net/
|
archive/AllCountriesListing.csv
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Joshua Project People Group Data
|
| 2 |
+
|
| 3 |
+
ROG3,ISO3,ISO2,Ctry,PoplPeoples,CntPeoples,CntPeoplesLR,PoplPeoplesLR,JPScaleCtry,ROL3OfficialLanguage,OfficialLang,RLG3Primary,ReligionPrimary,PercentChristianity,PercentEvangelical,10_40Window,ROG2,Continent,RegionCode,RegionName,WorkersNeeded
|
| 4 |
+
AF,AFG,AF,Afghanistan,43595000,58,58,43595000,1,pbt,"Pashto, Southern",6,Islam,0.016497872839152,0.015321397584382,Y,ASI,Asia,5,"Asia, Central",900
|
| 5 |
+
AL,ALB,AL,Albania,2721000,10,2,36000,2,aln,"Albanian, Gheg",6,Islam,33.653240520088,0.58951990788792,Y,EUR,Europe,9,"Europe, Eastern and Eurasia",2
|
| 6 |
+
AG,DZA,DZ,Algeria,47390000,37,34,47311000,1,arb,"Arabic, Standard",6,Islam,0.082836679581891,0.049692712755008,Y,AFR,Africa,6,"Africa, North and Middle East",962
|
| 7 |
+
AQ,ASM,AS,"American Samoa",45000,9,2,800,5,smo,Samoan,1,Christianity,95.424546079322,25.325032529434,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 8 |
+
AN,AND,AD,Andorra,82000,9,2,1200,3,cat,Catalan,1,Christianity,90.595275188043,1.3674286474345,N,EUR,Europe,10,"Europe, Western",2
|
| 9 |
+
AO,AGO,AO,Angola,38792000,60,5,354000,5,por,Portuguese,1,Christianity,91.060787785578,23.272612801625,N,AFR,Africa,7,"Africa, East and Southern",8
|
| 10 |
+
AV,AIA,AI,Anguilla,15000,5,,,5,eng,English,1,Christianity,90.603753795263,34.622367272221,N,NAR,"North America",12,"America, North and Caribbean",
|
| 11 |
+
AC,ATG,AG,"Antigua and Barbuda",92000,4,,,5,eng,English,1,Christianity,92.93917590582,25.145831533438,N,NAR,"North America",12,"America, North and Caribbean",
|
| 12 |
+
AR,ARG,AR,Argentina,45455000,78,2,236000,5,spa,Spanish,1,Christianity,93.040134837522,11.067666945651,N,LAM,"South America",11,"America, Latin",5
|
| 13 |
+
AM,ARM,AM,Armenia,2899000,10,3,60000,4,hye,Armenian,1,Christianity,92.224818744642,9.345574016828,N,EUR,Europe,5,"Asia, Central",3
|
| 14 |
+
AA,ABW,AW,Aruba,106000,9,1,,4,nld,Dutch,1,Christianity,95.517956513554,9.9494531033789,N,NAR,"North America",12,"America, North and Caribbean",
|
| 15 |
+
AS,AUS,AU,Australia,26864000,205,38,1779000,5,eng,English,1,Christianity,62.294922072405,13.716889295181,N,AUS,Australia,1,"Australia and Pacific",50
|
| 16 |
+
AU,AUT,AT,Austria,9063000,46,13,411000,3,deu,"German, Standard",1,Christianity,67.36500597223,0.61375890026505,N,EUR,Europe,10,"Europe, Western",15
|
| 17 |
+
AJ,AZE,AZ,Azerbaijan,10272000,34,25,9950000,1,azj,"Azerbaijani, North",6,Islam,2.2342710888352,0.21416827994896,Y,ASI,Asia,5,"Asia, Central",209
|
| 18 |
+
BF,BHS,BS,Bahamas,398000,7,,,5,eng,English,1,Christianity,94.533712557826,40.130268436278,N,NAR,"North America",12,"America, North and Caribbean",
|
| 19 |
+
BA,BHR,BH,Bahrain,1603000,19,10,1204000,4,arb,"Arabic, Standard",6,Islam,8.6677674481377,2.323810140306,Y,ASI,Asia,6,"Africa, North and Middle East",26
|
| 20 |
+
BG,BGD,BD,Bangladesh,174343000,278,256,172407000,1,ben,Bengali,6,Islam,0.50480463828066,,Y,ASI,Asia,4,"Asia, South",3591
|
| 21 |
+
BB,BRB,BB,Barbados,278000,4,1,3900,5,eng,English,1,Christianity,92.571582804209,34.204887130138,N,NAR,"North America",12,"America, North and Caribbean",1
|
| 22 |
+
BO,BLR,BY,Belarus,8928000,16,5,89000,3,bel,Belarusian,1,Christianity,68.972653956349,1.6736402274911,N,EUR,Europe,9,"Europe, Eastern and Eurasia",5
|
| 23 |
+
BE,BEL,BE,Belgium,11601000,54,25,943000,3,vls,"West Flemish",1,Christianity,64.165545739097,1.4434865263031,N,EUR,Europe,10,"Europe, Western",33
|
| 24 |
+
BH,BLZ,BZ,Belize,408000,13,2,8000,5,eng,English,1,Christianity,86.395177233415,24.162920828686,N,NAR,"North America",11,"America, Latin",2
|
| 25 |
+
BN,BEN,BJ,Benin,14608000,62,11,2688000,4,fra,French,4,"Ethnic Religions",31.435675299805,7.5047505427758,Y,AFR,Africa,8,"Africa, West and Central",57
|
| 26 |
+
BD,BMU,BM,Bermuda,63000,6,,,5,eng,English,1,Christianity,91.681532277134,26.843640372347,N,NAR,"North America",12,"America, North and Caribbean",
|
| 27 |
+
BT,BTN,BT,Bhutan,780000,52,51,778000,1,dzo,Dzongkha,2,Buddhism,0.23921040589033,,Y,ASI,Asia,4,"Asia, South",56
|
| 28 |
+
BL,BOL,BO,Bolivia,12456000,43,2,23000,5,spa,Spanish,1,Christianity,91.758350241412,19.42805506425,N,LAM,"South America",11,"America, Latin",2
|
| 29 |
+
BK,BIH,BA,Bosnia-Herzegovina,3091000,8,3,1590000,2,bos,Bosnian,6,Islam,40.663045303825,0.063692294256387,N,EUR,Europe,9,"Europe, Eastern and Eurasia",34
|
| 30 |
+
BC,BWA,BW,Botswana,2505000,45,1,7000,4,eng,English,1,Christianity,65.441913948587,8.3449459917658,N,AFR,Africa,7,"Africa, East and Southern",1
|
| 31 |
+
BR,BRA,BR,Brazil,212153000,321,52,768000,5,por,Portuguese,1,Christianity,89.920709851728,25.158090716103,N,LAM,"South America",11,"America, Latin",62
|
| 32 |
+
IO,IOT,IO,"British Indian Ocean Territory",3400,2,,,5,eng,English,1,Christianity,79.55943039814,33.830281894798,N,ASI,Asia,4,"Asia, South",
|
| 33 |
+
VI,VGB,VG,"British Virgin Islands",39000,4,,,5,eng,English,1,Christianity,90.793911368015,29.206037251124,N,NAR,"North America",12,"America, North and Caribbean",
|
| 34 |
+
BX,BRN,BN,Brunei,459000,24,9,290000,4,zlm,Malay,6,Islam,8.1962162612715,4.1844940987119,Y,ASI,Asia,2,"Asia, Southeast",12
|
| 35 |
+
BU,BGR,BG,Bulgaria,6617000,16,5,607000,3,bul,Bulgarian,1,Christianity,82.158594676585,1.9979172038083,N,EUR,Europe,9,"Europe, Eastern and Eurasia",14
|
| 36 |
+
UV,BFA,BF,"Burkina Faso",23859000,77,27,5680000,5,fra,French,6,Islam,21.097249705317,10.662722014062,Y,AFR,Africa,8,"Africa, West and Central",126
|
| 37 |
+
BY,BDI,BI,Burundi,14214000,6,1,16000,5,run,Rundi,1,Christianity,93.13298043555,30.493582774424,N,AFR,Africa,7,"Africa, East and Southern",1
|
| 38 |
+
CB,KHM,KH,Cambodia,17753000,38,16,17455000,1,khm,Khmer,2,Buddhism,3.3029881999695,1.3846311056069,Y,ASI,Asia,2,"Asia, Southeast",356
|
| 39 |
+
CM,CMR,CM,Cameroon,29719000,292,18,4539000,4,fra,French,1,Christianity,50.248207975232,8.6437022637027,N,AFR,Africa,8,"Africa, West and Central",98
|
| 40 |
+
CA,CAN,CA,Canada,39933000,243,52,2985000,4,eng,English,1,Christianity,70.604284258657,7.6158419075886,N,NAR,"North America",12,"America, North and Caribbean",83
|
| 41 |
+
CV,CPV,CV,"Cape Verde",510000,5,2,67000,4,por,Portuguese,1,Christianity,84.251431883767,8.2109763238309,N,AFR,Africa,8,"Africa, West and Central",2
|
| 42 |
+
CJ,CYM,KY,"Cayman Islands",75000,9,,,5,eng,English,1,Christianity,81.986813142888,21.420122048713,N,NAR,"North America",12,"America, North and Caribbean",
|
| 43 |
+
CT,CAF,CF,"Central African Republic",5447000,80,7,491000,5,fra,French,1,Christianity,70.989152308308,32.198720046378,N,AFR,Africa,8,"Africa, West and Central",12
|
| 44 |
+
CD,TCD,TD,Chad,20853000,141,81,11755000,4,arb,"Arabic, Standard",6,Islam,25.260023850086,7.3174018778335,Y,AFR,Africa,8,"Africa, West and Central",267
|
| 45 |
+
CI,CHL,CL,Chile,19749000,22,2,37000,5,spa,Spanish,1,Christianity,87.074540528418,23.134633493173,N,LAM,"South America",11,"America, Latin",2
|
| 46 |
+
CH,CHN,CN,China,1397676000,547,442,139759000,4,cmn,"Chinese, Mandarin",7,Non-Religious,9.2133827073397,7.5797079114093,Y,ASI,Asia,3,"Asia, Northeast",3020
|
| 47 |
+
HK,HKG,HK,"China, Hong Kong",7340000,19,8,430000,4,cmn,"Chinese, Mandarin",4,"Ethnic Religions",13.062768481909,6.112576809498,Y,ASI,Asia,3,"Asia, Northeast",13
|
| 48 |
+
MC,MAC,MO,"China, Macau",711000,9,3,630000,2,yue,"Chinese, Yue",4,"Ethnic Religions",7.946051588882,1.9420861666247,Y,ASI,Asia,3,"Asia, Northeast",13
|
| 49 |
+
KT,CXR,CX,"Christmas Island",1600,4,2,700,4,eng,English,6,Islam,14.926349206349,2.2177777777778,N,SOP,Oceania,1,"Australia and Pacific",2
|
| 50 |
+
CK,CCK,CC,"Cocos (Keeling) Islands",600,2,1,400,4,eng,English,6,Islam,18.808011303878,2.6868587576968,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 51 |
+
CO,COL,CO,Colombia,53291000,120,15,213000,5,spa,Spanish,1,Christianity,93.303745909752,10.767272235018,N,LAM,"South America",11,"America, Latin",17
|
| 52 |
+
CN,COM,KM,Comoros,858000,7,5,842000,1,swb,"Comorian, Maore",6,Islam,1.5759788221605,0.65666881797813,N,AFR,Africa,7,"Africa, East and Southern",18
|
| 53 |
+
CG,COD,CD,"Congo, Democratic Republic of",111944000,231,4,791000,5,fra,French,1,Christianity,91.0581136989,19.271307740958,N,AFR,Africa,8,"Africa, West and Central",16
|
| 54 |
+
CF,COG,CG,"Congo, Republic of the",6411000,71,3,55000,5,fra,French,1,Christianity,84.846643314738,14.757637434331,N,AFR,Africa,8,"Africa, West and Central",3
|
| 55 |
+
CW,COK,CK,"Cook Islands",13000,8,1,50,5,eng,English,1,Christianity,97.273857580759,13.504133509213,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 56 |
+
CS,CRI,CR,"Costa Rica",5101000,19,2,18000,5,spa,Spanish,1,Christianity,95.392946645474,18.493950492798,N,NAR,"North America",11,"America, Latin",2
|
| 57 |
+
IV,CIV,CI,"Côte d'Ivoire",32161000,105,32,6923000,5,fra,French,6,Islam,37.127028610609,12.725018968352,N,AFR,Africa,8,"Africa, West and Central",149
|
| 58 |
+
HR,HRV,HR,Croatia,3770000,17,3,31000,3,hrv,Croatian,1,Christianity,92.338593156982,0.4026558026884,N,EUR,Europe,9,"Europe, Eastern and Eurasia",3
|
| 59 |
+
CU,CUB,CU,Cuba,10819000,10,2,67000,5,spa,Spanish,1,Christianity,56.004287280157,11.358065995039,N,NAR,"North America",12,"America, North and Caribbean",2
|
| 60 |
+
UC,CUW,CW,Curacao,182000,16,3,1000,4,nld,Dutch,1,Christianity,85.834925032606,6.89947869697,N,NAR,"North America",12,"America, North and Caribbean",2
|
| 61 |
+
CY,CYP,CY,Cyprus,1330000,21,3,264000,3,ell,Greek,1,Christianity,71.232690429816,1.7319360624499,N,EUR,Europe,10,"Europe, Western",7
|
| 62 |
+
EZ,CZE,CZ,Czechia,10452000,16,2,17000,2,ces,Czech,7,Non-Religious,27.906622711011,0.74341833835643,N,EUR,Europe,9,"Europe, Eastern and Eurasia",2
|
| 63 |
+
DA,DNK,DK,Denmark,5870000,50,19,310000,4,dan,Danish,1,Christianity,81.469048947502,3.5897415173045,N,EUR,Europe,10,"Europe, Western",19
|
| 64 |
+
DJ,DJI,DJ,Djibouti,1146000,7,5,1117000,1,arb,"Arabic, Standard",6,Islam,2.2390846707174,0.077659592762469,Y,AFR,Africa,7,"Africa, East and Southern",24
|
| 65 |
+
DO,DMA,DM,Dominica,63000,7,2,1000,5,eng,English,1,Christianity,91.788668823483,18.122026841081,N,NAR,"North America",12,"America, North and Caribbean",2
|
| 66 |
+
DR,DOM,DO,"Dominican Republic",11400000,10,1,32000,5,spa,Spanish,1,Christianity,93.573670335247,10.90740256588,N,NAR,"North America",12,"America, North and Caribbean",1
|
| 67 |
+
EC,ECU,EC,Ecuador,18169000,33,1,50000,5,spa,Spanish,1,Christianity,93.451478446539,10.610093761378,N,LAM,"South America",11,"America, Latin",1
|
| 68 |
+
EG,EGY,EG,Egypt,117980000,45,32,82581000,4,arb,"Arabic, Standard",6,Islam,9.5423494524995,2.4445947895925,Y,AFR,Africa,6,"Africa, North and Middle East",1659
|
| 69 |
+
ES,SLV,SV,"El Salvador",6283000,7,1,26000,5,spa,Spanish,1,Christianity,94.110790046621,45.237356714973,N,NAR,"North America",11,"America, Latin",1
|
| 70 |
+
EK,GNQ,GQ,"Equatorial Guinea",1877000,16,1,39000,4,spa,Spanish,1,Christianity,89.409297536722,4.7766176929991,N,AFR,Africa,8,"Africa, West and Central",1
|
| 71 |
+
ER,ERI,ER,Eritrea,3535000,15,10,1591000,4,tir,Tigrigna,6,Islam,45.958784477648,2.405475582197,Y,AFR,Africa,7,"Africa, East and Southern",35
|
| 72 |
+
EN,EST,EE,Estonia,1295000,10,,,4,ekk,"Estonian, Standard",7,Non-Religious,47.452148281352,4.7251747074177,N,EUR,Europe,9,"Europe, Eastern and Eurasia",
|
| 73 |
+
WZ,SWZ,SZ,Eswatini,1222000,9,2,6700,5,ssw,Swati,1,Christianity,83.690090953407,20.722242558373,N,AFR,Africa,7,"Africa, East and Southern",2
|
| 74 |
+
ET,ETH,ET,Ethiopia,134440000,120,32,30667000,5,amh,Amharic,1,Christianity,58.156069745362,17.898818016202,Y,AFR,Africa,7,"Africa, East and Southern",622
|
| 75 |
+
FK,FLK,FK,"Falkland Islands",2800,2,,,4,eng,English,1,Christianity,64.064606741573,8.9635855776352,N,LAM,"South America",11,"America, Latin",
|
| 76 |
+
FO,FRO,FO,"Faroe Islands",55000,2,,,5,dan,Danish,1,Christianity,90.921529506516,33.205121129312,N,EUR,Europe,10,"Europe, Western",
|
| 77 |
+
FJ,FJI,FJ,Fiji,912000,31,1,4700,5,eng,English,1,Christianity,64.237832893162,26.481553015017,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 78 |
+
FI,FIN,FI,Finland,5511000,26,9,104000,5,fin,Finnish,1,Christianity,80.384401568585,10.235925308609,N,EUR,Europe,10,"Europe, Western",9
|
| 79 |
+
FR,FRA,FR,France,66551000,116,39,4591000,3,fra,French,1,Christianity,62.153365076387,1.25458987074,N,EUR,Europe,10,"Europe, Western",104
|
| 80 |
+
FG,GUF,GF,"French Guiana",303000,21,1,4700,4,fra,French,1,Christianity,80.690905307109,5.7808888418328,N,LAM,"South America",11,"America, Latin",1
|
| 81 |
+
FP,PYF,PF,"French Polynesia",269000,12,,,4,fra,French,1,Christianity,89.380902904047,7.8657532857176,N,SOP,Oceania,1,"Australia and Pacific",
|
| 82 |
+
GB,GAB,GA,Gabon,2535000,52,7,84000,5,fra,French,1,Christianity,73.350169180085,10.775136086625,N,AFR,Africa,8,"Africa, West and Central",7
|
| 83 |
+
GA,GMB,GM,Gambia,2772000,26,14,2354000,1,eng,English,6,Islam,3.4755653379847,0.89067305015235,Y,AFR,Africa,8,"Africa, West and Central",53
|
| 84 |
+
GG,GEO,GE,Georgia,3718000,27,12,288000,3,kat,Georgian,1,Christianity,80.148468262895,1.3900848356138,N,EUR,Europe,9,"Europe, Eastern and Eurasia",16
|
| 85 |
+
GM,DEU,DE,Germany,83991000,104,38,5956000,4,deu,"German, Standard",1,Christianity,61.545217596235,2.1274888081985,N,EUR,Europe,10,"Europe, Western",134
|
| 86 |
+
GH,GHA,GH,Ghana,34891000,108,16,1869000,5,eng,English,1,Christianity,61.967556260874,25.93385795725,N,AFR,Africa,8,"Africa, West and Central",46
|
| 87 |
+
GI,GIB,GI,Gibraltar,39000,6,3,3700,4,eng,English,1,Christianity,79.767660910518,2.187036227606,N,EUR,Europe,10,"Europe, Western",3
|
| 88 |
+
GR,GRC,GR,Greece,9851000,46,13,300000,3,ell,Greek,1,Christianity,87.937600149676,0.48685958208102,N,EUR,Europe,9,"Europe, Eastern and Eurasia",14
|
| 89 |
+
GL,GRL,GL,Greenland,55000,3,,,4,kal,Greenlandic,1,Christianity,95.291735129806,7.681409879752,N,NAR,"North America",12,"America, North and Caribbean",
|
| 90 |
+
GJ,GRD,GD,Grenada,112000,5,,,5,eng,English,1,Christianity,92.32097236821,18.872957902051,N,NAR,"North America",12,"America, North and Caribbean",
|
| 91 |
+
GP,GLP,GP,Guadeloupe,363000,6,1,,4,fra,French,1,Christianity,95.370589257299,4.8044255301544,N,NAR,"North America",12,"America, North and Caribbean",
|
| 92 |
+
GQ,GUM,GU,Guam,163000,14,1,2400,5,eng,English,1,Christianity,92.491027351363,16.056060149636,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 93 |
+
GT,GTM,GT,Guatemala,18516000,57,2,51000,5,spa,Spanish,1,Christianity,95.052067078524,25.089868442008,N,NAR,"North America",11,"America, Latin",2
|
| 94 |
+
GV,GIN,GN,Guinea,14957000,42,28,13004000,1,fra,French,6,Islam,4.1475495050089,0.6760141535836,Y,AFR,Africa,8,"Africa, West and Central",271
|
| 95 |
+
PU,GNB,GW,Guinea-Bissau,2193000,29,16,1059000,4,por,Portuguese,6,Islam,11.706646346468,2.0613370814556,Y,AFR,Africa,8,"Africa, West and Central",30
|
| 96 |
+
GY,GUY,GY,Guyana,813000,17,2,6300,5,eng,English,1,Christianity,40.052438077987,13.2120834137,N,LAM,"South America",11,"America, Latin",2
|
| 97 |
+
HA,HTI,HT,Haiti,11789000,5,1,40000,5,fra,French,1,Christianity,94.524003362086,17.652492599085,N,NAR,"North America",12,"America, North and Caribbean",1
|
| 98 |
+
HO,HND,HN,Honduras,10899000,19,1,40000,5,spa,Spanish,1,Christianity,95.360469747355,27.618395489946,N,NAR,"North America",11,"America, Latin",1
|
| 99 |
+
HU,HUN,HU,Hungary,9524000,17,4,79000,4,hun,Hungarian,1,Christianity,87.008573823822,3.0538073838558,N,EUR,Europe,9,"Europe, Eastern and Eurasia",4
|
| 100 |
+
IC,ISL,IS,Iceland,382000,6,1,1700,4,isl,Icelandic,1,Christianity,90.327194956755,4.3944726993107,N,EUR,Europe,10,"Europe, Western",1
|
| 101 |
+
IN,IND,IN,India,1453714000,2262,2032,1394848000,1,hin,Hindi,5,Hinduism,1.9723787688575,,Y,ASI,Asia,4,"Asia, South",28596
|
| 102 |
+
ID,IDN,ID,Indonesia,284202000,788,234,200484000,4,ind,Indonesian,6,Islam,11.375747867707,3.2541879035488,Y,ASI,Asia,2,"Asia, Southeast",4106
|
| 103 |
+
IR,IRN,IR,Iran,92310000,91,85,91830000,1,pes,"Persian, Iranian",6,Islam,1.7758764951182,1.1998186201441,Y,ASI,Asia,5,"Asia, Central",1868
|
| 104 |
+
IZ,IRQ,IQ,Iraq,46690000,33,27,45995000,1,arb,"Arabic, Standard",6,Islam,0.91426354782943,0.21921306345715,Y,ASI,Asia,6,"Africa, North and Middle East",923
|
| 105 |
+
EI,IRL,IE,Ireland,5189000,36,5,57000,3,eng,English,1,Christianity,90.477230555901,1.6637580209029,N,EUR,Europe,10,"Europe, Western",5
|
| 106 |
+
IM,IMN,IM,"Isle of Man",81000,2,,,4,eng,English,1,Christianity,99.162549084777,7.7906372711942,N,EUR,Europe,10,"Europe, Western",
|
| 107 |
+
IS,ISR,IL,Israel,9460000,47,40,9169000,1,heb,Hebrew,4,"Ethnic Religions",1.4233258654662,0.70162010644136,Y,ASI,Asia,6,"Africa, North and Middle East",200
|
| 108 |
+
IT,ITA,IT,Italy,59062000,102,22,1542000,3,ita,Italian,1,Christianity,81.866606322705,1.4801719234641,N,EUR,Europe,10,"Europe, Western",40
|
| 109 |
+
JM,JAM,JM,Jamaica,2815000,9,1,7500,5,eng,English,1,Christianity,81.122504645975,29.829027308508,N,NAR,"North America",12,"America, North and Caribbean",1
|
| 110 |
+
JA,JPN,JP,Japan,122829000,43,28,120571000,1,jpn,Japanese,2,Buddhism,1.8569865034704,0.44646250126173,Y,ASI,Asia,3,"Asia, Northeast",2424
|
| 111 |
+
JO,JOR,JO,Jordan,11455000,26,20,10658000,1,arb,"Arabic, Standard",6,Islam,2.2315003248031,0.27482207679635,Y,ASI,Asia,6,"Africa, North and Middle East",218
|
| 112 |
+
KZ,KAZ,KZ,Kazakhstan,20670000,53,27,16533000,2,kaz,Kazakh,6,Islam,11.763239046453,0.54359799547943,Y,ASI,Asia,5,"Asia, Central",342
|
| 113 |
+
KE,KEN,KE,Kenya,57389000,105,29,5407000,5,eng,English,1,Christianity,80.260996700658,45.778179032732,N,AFR,Africa,7,"Africa, East and Southern",118
|
| 114 |
+
KR,KIR,KI,"Kiribati (Gilbert)",131000,4,,,4,eng,English,1,Christianity,98.409152651018,8.5822031810507,N,SOP,Oceania,1,"Australia and Pacific",
|
| 115 |
+
KN,PRK,KP,"Korea, North",26493000,4,2,26300000,1,kor,Korean,7,Non-Religious,1.6574525963404,1.5803171178543,Y,ASI,Asia,3,"Asia, Northeast",526
|
| 116 |
+
KS,KOR,KR,"Korea, South",51602000,30,13,488000,5,kor,Korean,7,Non-Religious,29.948420141653,16.282003143986,N,ASI,Asia,3,"Asia, Northeast",15
|
| 117 |
+
KV,XKV,XK,Kosovo,1628000,9,6,1581000,1,aln,"Albanian, Gheg",6,Islam,4.0091331503215,0.20040655649109,N,EUR,Europe,9,"Europe, Eastern and Eurasia",35
|
| 118 |
+
KU,KWT,KW,Kuwait,4925000,31,17,3420000,2,arb,"Arabic, Standard",6,Islam,10.456210697098,1.6087808127848,Y,ASI,Asia,6,"Africa, North and Middle East",73
|
| 119 |
+
KG,KGZ,KG,Kyrgyzstan,7206000,28,22,6781000,1,kir,Kyrgyz,6,Islam,3.684467228685,0.30492568031557,Y,ASI,Asia,5,"Asia, Central",146
|
| 120 |
+
LA,LAO,LA,Laos,7758000,184,142,2225000,4,lao,Lao,2,Buddhism,3.6294631007911,2.5425047526641,Y,ASI,Asia,2,"Asia, Southeast",154
|
| 121 |
+
LG,LVA,LV,Latvia,1796000,10,1,2400,4,lvs,"Latvian, Standard",1,Christianity,60.122876084858,7.6119913149354,N,EUR,Europe,9,"Europe, Eastern and Eurasia",1
|
| 122 |
+
LE,LBN,LB,Lebanon,5782000,26,12,2991000,2,arb,"Arabic, Standard",6,Islam,31.518599065986,0.71609942401596,Y,ASI,Asia,6,"Africa, North and Middle East",64
|
| 123 |
+
LT,LSO,LS,Lesotho,2315000,8,1,13000,5,eng,English,1,Christianity,89.251867822235,13.010712943324,N,AFR,Africa,7,"Africa, East and Southern",1
|
| 124 |
+
LI,LBR,LR,Liberia,5651000,37,6,673000,5,eng,English,4,"Ethnic Religions",39.781534821724,11.73453021691,N,AFR,Africa,8,"Africa, West and Central",14
|
| 125 |
+
LY,LBY,LY,Libya,7373000,45,34,6499000,1,arb,"Arabic, Standard",6,Islam,2.4217160044064,0.17350550628307,Y,AFR,Africa,6,"Africa, North and Middle East",139
|
| 126 |
+
LS,LIE,LI,Liechtenstein,40000,8,2,1600,3,deu,"German, Standard",1,Christianity,77.101611967407,0.55981685628516,N,EUR,Europe,10,"Europe, Western",2
|
| 127 |
+
LH,LTU,LT,Lithuania,2790000,9,3,11000,3,lit,Lithuanian,1,Christianity,84.697430782442,1.3927953977395,N,EUR,Europe,9,"Europe, Eastern and Eurasia",3
|
| 128 |
+
LU,LUX,LU,Luxembourg,643000,15,1,700,3,ltz,Luxembourgish,1,Christianity,80.661309926806,0.99480939736891,N,EUR,Europe,10,"Europe, Western",1
|
| 129 |
+
MA,MDG,MG,Madagascar,32441000,40,11,632000,4,plt,"Malagasy, Merina",1,Christianity,49.419827397068,6.116287253214,N,AFR,Africa,7,"Africa, East and Southern",18
|
| 130 |
+
MI,MWI,MW,Malawi,21970000,24,5,2868000,5,eng,English,1,Christianity,73.748390168966,17.056081048087,N,AFR,Africa,7,"Africa, East and Southern",59
|
| 131 |
+
MY,MYS,MY,Malaysia,35782000,183,78,18066000,4,zlm,Malay,6,Islam,9.7404373735282,3.5820585166669,Y,ASI,Asia,2,"Asia, Southeast",409
|
| 132 |
+
MV,MDV,MV,Maldives,508000,4,4,508000,1,div,Maldivian,6,Islam,0.02788689974892,0.0089077968689739,Y,ASI,Asia,4,"Asia, South",13
|
| 133 |
+
ML,MLI,ML,Mali,24821000,72,43,22703000,1,fra,French,6,Islam,2.7813857753412,0.67102808212664,Y,AFR,Africa,8,"Africa, West and Central",465
|
| 134 |
+
MT,MLT,MT,Malta,526000,4,1,100,3,mlt,Maltese,1,Christianity,96.756300618008,1.551452025103,N,EUR,Europe,10,"Europe, Western",1
|
| 135 |
+
RM,MHL,MH,"Marshall Islands",34000,6,2,400,5,eng,English,1,Christianity,94.627188993821,53.441720938416,N,SOP,Oceania,1,"Australia and Pacific",2
|
| 136 |
+
MB,MTQ,MQ,Martinique,325000,5,1,,4,fra,French,1,Christianity,95.020630628693,6.9979510999069,N,NAR,"North America",12,"America, North and Caribbean",
|
| 137 |
+
MR,MRT,MR,Mauritania,5223000,17,15,5210000,1,arb,"Arabic, Standard",6,Islam,0.20841334260814,0.080143122655534,Y,AFR,Africa,8,"Africa, West and Central",112
|
| 138 |
+
MP,MUS,MU,Mauritius,1224000,12,5,104000,4,eng,English,5,Hinduism,32.559002414616,9.8022849614002,N,AFR,Africa,7,"Africa, East and Southern",5
|
| 139 |
+
MF,MYT,YT,Mayotte,324000,8,6,319000,1,fra,French,6,Islam,1.6255841965791,0.09305054842042,N,AFR,Africa,7,"Africa, East and Southern",10
|
| 140 |
+
MX,MEX,MX,Mexico,131338000,333,4,359000,5,spa,Spanish,1,Christianity,95.125116939794,10.483564173041,N,NAR,"North America",11,"America, Latin",8
|
| 141 |
+
FM,FSM,FM,"Micronesia, Federated States",111000,25,2,1400,5,eng,English,1,Christianity,94.85610762605,22.778819807861,N,SOP,Oceania,1,"Australia and Pacific",2
|
| 142 |
+
MD,MDA,MD,Moldova,2947000,12,2,9400,4,ron,Romanian,1,Christianity,76.381268398036,4.5384726462083,N,EUR,Europe,9,"Europe, Eastern and Eurasia",2
|
| 143 |
+
MN,MCO,MC,Monaco,37000,14,2,700,3,fra,French,1,Christianity,82.29394277625,1.2725081134088,N,EUR,Europe,10,"Europe, Western",1
|
| 144 |
+
MG,MNG,MN,Mongolia,3446000,25,17,3220000,1,khk,"Mongolian, Halh",2,Buddhism,2.3303451354397,1.3311539220695,Y,ASI,Asia,3,"Asia, Northeast",73
|
| 145 |
+
MJ,MNE,ME,Montenegro,614000,12,3,58000,3,srp,Serbian,1,Christianity,73.915533366584,0.27267825600151,N,EUR,Europe,9,"Europe, Eastern and Eurasia",3
|
| 146 |
+
MH,MSR,MS,Montserrat,3900,2,,,5,eng,English,1,Christianity,94.929381443299,27.586141980023,N,NAR,"North America",12,"America, North and Caribbean",
|
| 147 |
+
MO,MAR,MA,Morocco,38103000,29,27,38090000,1,arb,"Arabic, Standard",6,Islam,0.18836065771088,0.10727571474299,Y,AFR,Africa,6,"Africa, North and Middle East",767
|
| 148 |
+
MZ,MOZ,MZ,Mozambique,35294000,52,12,3852000,5,por,Portuguese,1,Christianity,45.843847434597,10.7612767443,N,AFR,Africa,7,"Africa, East and Southern",77
|
| 149 |
+
BM,MMR,MM,"Myanmar (Burma)",54553000,218,59,45405000,4,mya,Burmese,2,Buddhism,8.7409547535155,5.1984265615792,Y,ASI,Asia,2,"Asia, Southeast",929
|
| 150 |
+
WA,NAM,NA,Namibia,3034000,33,1,8300,5,eng,English,1,Christianity,88.292625582263,12.490326101197,N,AFR,Africa,7,"Africa, East and Southern",1
|
| 151 |
+
NR,NRU,NR,Nauru,11000,7,,,5,eng,English,1,Christianity,85.389223123202,12.615533557872,N,SOP,Oceania,1,"Australia and Pacific",
|
| 152 |
+
NP,NPL,NP,Nepal,29218000,195,184,26013000,1,npi,Nepali,5,Hinduism,1.4435514677789,,Y,ASI,Asia,4,"Asia, South",628
|
| 153 |
+
NL,NLD,NL,Netherlands,18209000,71,20,1745000,4,nld,Dutch,1,Christianity,46.740272396328,4.0029961656266,N,EUR,Europe,10,"Europe, Western",40
|
| 154 |
+
NC,NCL,NC,"New Caledonia",286000,43,,,4,fra,French,1,Christianity,78.374823008695,6.210480033787,N,SOP,Oceania,1,"Australia and Pacific",
|
| 155 |
+
NZ,NZL,NZ,"New Zealand",5157000,60,19,443000,5,eng,English,1,Christianity,50.786935539703,17.81644209218,N,SOP,Oceania,1,"Australia and Pacific",22
|
| 156 |
+
NU,NIC,NI,Nicaragua,6895000,16,1,6200,5,spa,Spanish,1,Christianity,96.085514557294,43.348310787134,N,NAR,"North America",11,"America, Latin",1
|
| 157 |
+
NG,NER,NE,Niger,27505000,36,30,27011000,1,fra,French,6,Islam,1.6248082859357,1.0191695347763,Y,AFR,Africa,8,"Africa, West and Central",544
|
| 158 |
+
NI,NGA,NG,Nigeria,237239000,535,49,72318000,5,eng,English,1,Christianity,51.589354736277,26.822768057448,Y,AFR,Africa,8,"Africa, West and Central",1466
|
| 159 |
+
NE,NIU,NU,Niue,1900,2,,,4,eng,English,1,Christianity,95.91908713693,5.3361108559569,N,SOP,Oceania,1,"Australia and Pacific",
|
| 160 |
+
NF,NFK,NF,"Norfolk Island",1700,3,,,5,eng,English,1,Christianity,72.463898916967,20.307460890493,N,SOP,Oceania,1,"Australia and Pacific",
|
| 161 |
+
MK,MKD,MK,"North Macedonia",1793000,16,9,557000,3,mkd,Macedonian,1,Christianity,61.824386516062,0.15990552298302,N,EUR,Europe,9,"Europe, Eastern and Eurasia",17
|
| 162 |
+
CQ,MNP,MP,"Northern Mariana Islands",43000,10,1,700,5,eng,English,1,Christianity,74.969329312481,17.10964232795,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 163 |
+
NO,NOR,NO,Norway,5520000,53,17,314000,4,nor,Norwegian,1,Christianity,86.411123250174,7.9183141805933,N,EUR,Europe,10,"Europe, Western",17
|
| 164 |
+
MU,OMN,OM,Oman,5452000,35,27,4855000,1,arb,"Arabic, Standard",6,Islam,2.8654007309348,0.71563002970579,Y,ASI,Asia,6,"Africa, North and Middle East",107
|
| 165 |
+
PK,PAK,PK,Pakistan,253227000,775,767,250751000,1,urd,Urdu,6,Islam,0.9858495251666,,Y,ASI,Asia,4,"Asia, South",5259
|
| 166 |
+
PS,PLW,PW,Palau,18000,7,1,1100,5,eng,English,1,Christianity,89.669993117306,20.683364994464,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 167 |
+
PM,PAN,PA,Panama,4542000,26,2,20000,5,spa,Spanish,1,Christianity,88.071747120984,21.68887706548,N,NAR,"North America",11,"America, Latin",2
|
| 168 |
+
PP,PNG,PG,"Papua New Guinea",10607000,883,1,30000,5,eng,English,1,Christianity,94.29812051762,23.387234902789,N,AUS,Australia,1,"Australia and Pacific",1
|
| 169 |
+
PA,PRY,PY,Paraguay,6934000,36,6,28000,4,spa,Spanish,1,Christianity,94.606308435745,8.0555754767491,N,LAM,"South America",11,"America, Latin",6
|
| 170 |
+
PE,PER,PE,Peru,34323000,103,15,208000,5,spa,Spanish,1,Christianity,94.072133220896,14.487751747348,N,LAM,"South America",11,"America, Latin",16
|
| 171 |
+
RP,PHL,PH,Philippines,116164000,200,28,6332000,5,tgl,Tagalog,1,Christianity,88.571827013423,11.359479046379,N,ASI,Asia,2,"Asia, Southeast",137
|
| 172 |
+
PC,PCN,PN,"Pitcairn Islands",50,1,,,4,eng,English,1,Christianity,96,10,N,SOP,Oceania,1,"Australia and Pacific",
|
| 173 |
+
PL,POL,PL,Poland,38007000,24,4,52000,3,pol,Polish,1,Christianity,89.632095679023,0.31116349849262,N,EUR,Europe,9,"Europe, Eastern and Eurasia",4
|
| 174 |
+
PO,PRT,PT,Portugal,10324000,35,5,121000,4,por,Portuguese,1,Christianity,92.594564470413,3.5221208409734,N,EUR,Europe,10,"Europe, Western",6
|
| 175 |
+
RQ,PRI,PR,"Puerto Rico",3182000,9,2,12000,5,spa,Spanish,1,Christianity,94.434593234283,33.036055172,N,NAR,"North America",12,"America, North and Caribbean",2
|
| 176 |
+
QA,QAT,QA,Qatar,3081000,25,14,2654000,2,arb,"Arabic, Standard",6,Islam,6.4168279048544,0.95866430550811,Y,ASI,Asia,6,"Africa, North and Middle East",54
|
| 177 |
+
RE,REU,RE,Reunion,857000,15,5,55000,4,fra,French,1,Christianity,86.304196767152,6.9324541338121,N,AFR,Africa,7,"Africa, East and Southern",5
|
| 178 |
+
RO,ROU,RO,Romania,18724000,21,6,87000,4,ron,Romanian,1,Christianity,93.639825309291,6.3914995053846,N,EUR,Europe,9,"Europe, Eastern and Eurasia",6
|
| 179 |
+
RS,RUS,RU,Russia,143850000,171,115,17720000,3,rus,Russian,1,Christianity,57.029700576731,1.420732380504,N,ASI,Asia,9,"Europe, Eastern and Eurasia",413
|
| 180 |
+
RW,RWA,RW,Rwanda,14351000,8,2,40000,5,kin,Kinyarwanda,1,Christianity,90.202314503919,26.7928373773,N,AFR,Africa,7,"Africa, East and Southern",2
|
| 181 |
+
SH,SHN,SH,"Saint Helena",4800,1,,,4,eng,English,1,Christianity,95,8.8,N,AFR,Africa,7,"Africa, East and Southern",
|
| 182 |
+
SC,KNA,KN,"Saint Kitts and Nevis",46000,4,1,,5,eng,English,1,Christianity,92.528501452165,22.124790844857,N,NAR,"North America",12,"America, North and Caribbean",
|
| 183 |
+
ST,LCA,LC,"Saint Lucia",175000,5,1,,5,eng,English,1,Christianity,94.804125360683,18.632373964963,N,NAR,"North America",12,"America, North and Caribbean",
|
| 184 |
+
SB,SPM,PM,"Saint Pierre and Miquelon",5800,2,,,3,fra,French,1,Christianity,96.479958890031,0.63369304556355,N,NAR,"North America",12,"America, North and Caribbean",
|
| 185 |
+
WS,WSM,WS,Samoa,216000,6,2,2100,5,smo,Samoan,1,Christianity,95.241898893391,18.497427033043,N,SOP,Oceania,1,"Australia and Pacific",2
|
| 186 |
+
SM,SMR,SM,"San Marino",33000,2,1,,3,ita,Italian,1,Christianity,84.002902446166,0.074069739732055,N,EUR,Europe,10,"Europe, Western",
|
| 187 |
+
TP,STP,ST,"São Tomé and Príncipe",228000,6,1,1000,4,por,Portuguese,1,Christianity,86.172519540089,5.917739814408,N,AFR,Africa,8,"Africa, West and Central",1
|
| 188 |
+
SA,SAU,SA,"Saudi Arabia",34435000,57,43,31108000,1,arb,"Arabic, Standard",6,Islam,3.6920941811422,0.52317864243651,Y,ASI,Asia,6,"Africa, North and Middle East",631
|
| 189 |
+
SG,SEN,SN,Senegal,18689000,54,28,15303000,1,fra,French,6,Islam,4.5456900804239,0.18826716444847,Y,AFR,Africa,8,"Africa, West and Central",314
|
| 190 |
+
RI,SRB,RS,Serbia,6560000,27,6,168000,3,srp,Serbian,1,Christianity,78.124101839497,0.71328805794689,N,EUR,Europe,9,"Europe, Eastern and Eurasia",8
|
| 191 |
+
SE,SYC,SC,Seychelles,129000,4,,,4,crs,"Seychelles French Creole",1,Christianity,96.179852091231,7.0230384068482,N,AFR,Africa,7,"Africa, East and Southern",
|
| 192 |
+
SL,SLE,SL,"Sierra Leone",8767000,27,12,1669000,4,eng,English,6,Islam,12.778984933523,4.94806195912,N,AFR,Africa,8,"Africa, West and Central",36
|
| 193 |
+
SN,SGP,SG,Singapore,5806000,40,17,1019000,4,cmn,"Chinese, Mandarin",2,Buddhism,12.871017002944,7.1251824073657,N,ASI,Asia,2,"Asia, Southeast",29
|
| 194 |
+
NN,SXM,SX,"Sint Maarten",43000,8,,,4,eng,English,1,Christianity,88.561019120147,7.9470597824821,N,NAR,"North America",12,"America, North and Caribbean",
|
| 195 |
+
LO,SVK,SK,Slovakia,5403000,15,2,18000,3,slk,Slovak,1,Christianity,92.486772879434,1.4560101002986,N,EUR,Europe,9,"Europe, Eastern and Eurasia",2
|
| 196 |
+
SI,SVN,SI,Slovenia,2080000,12,1,90000,3,slv,Slovene,1,Christianity,53.969112701183,0.20672165399578,N,EUR,Europe,9,"Europe, Eastern and Eurasia",2
|
| 197 |
+
BP,SLB,SB,"Solomon Islands",814000,74,1,3000,5,eng,English,1,Christianity,95.40320238694,31.323433895135,N,SOP,Oceania,1,"Australia and Pacific",1
|
| 198 |
+
SO,SOM,SO,Somalia,19468000,22,20,19407000,1,som,Somali,6,Islam,0.47906572893526,0.14544794856861,Y,AFR,Africa,7,"Africa, East and Southern",396
|
| 199 |
+
SF,ZAF,ZA,"South Africa",64347000,62,8,1284000,5,eng,English,1,Christianity,76.804479299365,21.104616980983,N,AFR,Africa,7,"Africa, East and Southern",27
|
| 200 |
+
OD,SSD,SS,"South Sudan",12106000,80,6,682000,5,eng,English,1,Christianity,63.700120480738,19.81730380084,N,AFR,Africa,7,"Africa, East and Southern",16
|
| 201 |
+
SP,ESP,ES,Spain,47786000,77,11,1242000,3,spa,Spanish,1,Christianity,76.990786544628,1.598750617263,N,EUR,Europe,10,"Europe, Western",30
|
| 202 |
+
CE,LKA,LK,"Sri Lanka",23006000,139,55,3652000,2,sin,Sinhala,2,Buddhism,7.5930306172887,,Y,ASI,Asia,4,"Asia, South",106
|
| 203 |
+
VC,VCT,VC,"St Vincent and Grenadines",96000,6,,,5,eng,English,1,Christianity,89.15497080291,41.484004658942,N,NAR,"North America",12,"America, North and Caribbean",
|
| 204 |
+
SU,SDN,SD,Sudan,51216000,198,168,48612000,1,arb,"Arabic, Standard",6,Islam,2.2627202055663,0.48352640178419,Y,AFR,Africa,7,"Africa, East and Southern",1034
|
| 205 |
+
NS,SUR,SR,Suriname,622000,22,2,33000,5,nld,Dutch,1,Christianity,48.612238477834,17.320382647573,N,LAM,"South America",11,"America, Latin",2
|
| 206 |
+
SV,SJM,SJ,Svalbard,2500,2,,,4,nor,Norwegian,1,Christianity,90.903807615231,6.641122244489,N,EUR,Europe,10,"Europe, Western",
|
| 207 |
+
SW,SWE,SE,Sweden,10552000,74,24,578000,4,swe,Swedish,1,Christianity,54.842232525091,5.6689381115349,N,EUR,Europe,10,"Europe, Western",25
|
| 208 |
+
SZ,CHE,CH,Switzerland,8849000,58,17,326000,4,gsw,"German, Swiss",1,Christianity,75.632198655083,3.9558389546282,N,EUR,Europe,10,"Europe, Western",18
|
| 209 |
+
SY,SYR,SY,Syria,25300000,30,19,8967000,2,arb,"Arabic, Standard",6,Islam,5.3567862820684,0.17246219587861,Y,ASI,Asia,6,"Africa, North and Middle East",184
|
| 210 |
+
TW,TWN,TW,Taiwan,23049000,32,8,4353000,4,cmn,"Chinese, Mandarin",4,"Ethnic Religions",6.373509229752,3.325295819292,Y,ASI,Asia,3,"Asia, Northeast",91
|
| 211 |
+
TI,TJK,TJ,Tajikistan,10734000,29,25,10682000,1,tgk,Tajik,6,Islam,0.39644587240147,0.071762166762154,Y,ASI,Asia,5,"Asia, Central",229
|
| 212 |
+
TZ,TZA,TZ,Tanzania,70041000,154,24,6340000,5,swh,Swahili,1,Christianity,50.457469860602,11.044554987714,N,AFR,Africa,7,"Africa, East and Southern",131
|
| 213 |
+
TH,THA,TH,Thailand,71255000,110,73,61267000,1,tha,Thai,2,Buddhism,1.8382541201861,0.76148621123182,Y,ASI,Asia,2,"Asia, Southeast",1262
|
| 214 |
+
TT,TLS,TL,Timor-Leste,1379000,21,1,6000,4,por,Portuguese,1,Christianity,90.667703413759,2.4893315941981,Y,ASI,Asia,2,"Asia, Southeast",1
|
| 215 |
+
TO,TGO,TG,Togo,9584000,54,9,757000,5,fra,French,1,Christianity,45.200361958933,11.09538130643,N,AFR,Africa,8,"Africa, West and Central",18
|
| 216 |
+
TL,TKL,TK,Tokelau,2900,1,,,4,tkl,Tokelauan,1,Christianity,100,3.4,N,SOP,Oceania,1,"Australia and Pacific",
|
| 217 |
+
TN,TON,TO,Tonga,100000,4,,,5,ton,Tongan,1,Christianity,95.861497534778,15.777554979948,N,SOP,Oceania,1,"Australia and Pacific",
|
| 218 |
+
TD,TTO,TT,"Trinidad and Tobago",1463000,9,,,5,eng,English,1,Christianity,66.591956463391,23.907184375309,N,NAR,"North America",12,"America, North and Caribbean",
|
| 219 |
+
TS,TUN,TN,Tunisia,12316000,18,16,12220000,1,arb,"Arabic, Standard",6,Islam,0.45631347459133,0.029827960887887,Y,AFR,Africa,6,"Africa, North and Middle East",254
|
| 220 |
+
TU,TUR,TR,"Türkiye (Turkey)",87515000,85,61,86812000,1,tur,Turkish,6,Islam,0.6452488153655,0.043780415959696,Y,ASI,Asia,5,"Asia, Central",1759
|
| 221 |
+
TX,TKM,TM,Turkmenistan,7506000,28,20,7144000,1,tuk,Turkmen,6,Islam,3.7243695898205,0.10404998331358,Y,ASI,Asia,5,"Asia, Central",151
|
| 222 |
+
TK,TCA,TC,"Turks and Caicos Islands",45000,3,,,5,eng,English,1,Christianity,90.68446644859,32.196585182566,N,NAR,"North America",12,"America, North and Caribbean",
|
| 223 |
+
TV,TUV,TV,Tuvalu,8700,2,,,5,tvl,Tuvaluan,1,Christianity,98,22.942242130368,N,SOP,Oceania,1,"Australia and Pacific",
|
| 224 |
+
UG,UGA,UG,Uganda,51075000,66,2,1071000,5,eng,English,1,Christianity,84.342293565099,34.14466860129,N,AFR,Africa,7,"Africa, East and Southern",22
|
| 225 |
+
UP,UKR,UA,Ukraine,38516000,56,16,450000,4,ukr,Ukrainian,1,Christianity,72.549338939272,3.6566611584598,N,EUR,Europe,9,"Europe, Eastern and Eurasia",20
|
| 226 |
+
AE,ARE,AE,"United Arab Emirates",11239000,44,36,8436000,2,arb,"Arabic, Standard",6,Islam,6.643436604654,1.4523117100436,Y,ASI,Asia,6,"Africa, North and Middle East",174
|
| 227 |
+
UK,GBR,GB,"United Kingdom",69417000,122,40,6221000,4,eng,English,1,Christianity,55.603264537673,7.5087893890687,N,EUR,Europe,10,"Europe, Western",140
|
| 228 |
+
US,USA,US,"United States",345733000,496,90,14792000,5,eng,English,1,Christianity,76.535553387099,26.581540973029,N,NAR,"North America",12,"America, North and Caribbean",328
|
| 229 |
+
UY,URY,UY,Uruguay,3304000,24,2,23000,4,spa,Spanish,1,Christianity,66.314394282826,7.3383329285255,N,LAM,"South America",11,"America, Latin",2
|
| 230 |
+
UZ,UZB,UZ,Uzbekistan,36695000,44,26,35388000,1,uzn,"Uzbek, Northern",6,Islam,2.3305175016903,0.18776204612836,Y,ASI,Asia,5,"Asia, Central",718
|
| 231 |
+
NH,VUT,VU,Vanuatu,321000,109,,,5,bis,Bislama,1,Christianity,91.134034733956,41.574605812228,N,SOP,Oceania,1,"Australia and Pacific",
|
| 232 |
+
VT,VAT,VA,"Vatican City",1000,1,,,4,lat,Latin,1,Christianity,100,2.5,N,EUR,Europe,10,"Europe, Western",
|
| 233 |
+
VE,VEN,VE,Venezuela,28346000,63,4,119000,5,spa,Spanish,1,Christianity,82.361595338315,12.227707750094,N,LAM,"South America",11,"America, Latin",5
|
| 234 |
+
VM,VNM,VN,Vietnam,100766000,116,69,9630000,4,vie,Vietnamese,2,Buddhism,10.185595385353,2.1723101176772,Y,ASI,Asia,2,"Asia, Southeast",231
|
| 235 |
+
VQ,VIR,VI,"Virgin Islands (U.S.)",82000,6,,,5,eng,English,1,Christianity,94.205282776038,24.506437434739,N,NAR,"North America",12,"America, North and Caribbean",
|
| 236 |
+
WF,WLF,WF,"Wallis and Futuna Islands",11000,3,,,3,fra,French,1,Christianity,98.464819776691,1.7733104995397,N,SOP,Oceania,1,"Australia and Pacific",
|
| 237 |
+
WE,PSE,PS,"West Bank / Gaza",5538000,8,6,5485000,1,ajp,,6,Islam,2.4593904456881,1.1057708986915,Y,ASI,Asia,6,"Africa, North and Middle East",113
|
| 238 |
+
WI,ESH,EH,"Western Sahara",578000,10,10,578000,1,ary,"Arabic, Moroccan",6,Islam,0.027811938526662,0.00076596405681288,Y,AFR,Africa,6,"Africa, North and Middle East",16
|
| 239 |
+
YM,YEM,YE,Yemen,41367000,28,20,41244000,1,arb,"Arabic, Standard",6,Islam,0.1969352586869,0.020438467826086,Y,ASI,Asia,6,"Africa, North and Middle East",827
|
| 240 |
+
ZA,ZMB,ZM,Zambia,21729000,75,4,127000,5,eng,English,1,Christianity,86.340922082192,25.081579831499,N,AFR,Africa,7,"Africa, East and Southern",4
|
| 241 |
+
ZI,ZWE,ZW,Zimbabwe,16761000,40,3,102000,5,eng,English,1,Christianity,76.634669664795,25.337914991512,N,AFR,Africa,7,"Africa, East and Southern",3
|
| 242 |
+
|
| 243 |
+
Bible Translation status:
|
| 244 |
+
0,Unspecified
|
| 245 |
+
1,Translation Needed
|
| 246 |
+
2,Translation Started
|
| 247 |
+
3,Portions
|
| 248 |
+
4,New Testament
|
| 249 |
+
5,Complete Bible
|
| 250 |
+
|
| 251 |
+
"Joshua Project welcomes corrections / updates to this data. Please send feedback to:"
|
| 252 |
+
|
| 253 |
+
Email:,info@joshuaproject.net
|
| 254 |
+
Web:,www.joshuaproject.net
|
archive/AllLanguageListing.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/AllPeoplesAcrossCountries.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/AllPeoplesInCountry.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/FieldDefinitions.csv
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Joshua Project People Group Data
|
| 2 |
+
|
| 3 |
+
TableName,FieldName,FieldDescription,FieldType
|
| 4 |
+
jpabsum,AffinityBloc,"Affinity Bloc name",Text
|
| 5 |
+
jpabsum,NbrLR,"Number of least-reached People Groups in Country (PGIC)","Long Integer"
|
| 6 |
+
jpabsum,NbrPC,"Number of People Clusters","Long Integer"
|
| 7 |
+
jpabsum,NbrPCLR,"Number of least-reached People Clusters","Long Integer"
|
| 8 |
+
jpabsum,NbrPGAC,"Number of People Groups Across Countries (PGAC)","Long Integer"
|
| 9 |
+
jpabsum,NbrPGIC,"Number of People Groups In Country (PGIC)","Long Integer"
|
| 10 |
+
jpabsum,PeopleID1,"Affinity Bloc code","Long Integer"
|
| 11 |
+
jpabsum,PercentLR,"Percent least-reached",Single
|
| 12 |
+
jpabsum,PercentPoplLR,"Percent Affinity Bloc population in least-reached peoples",Single
|
| 13 |
+
jpabsum,ROP1,"Registry of Peoples - Affinity Bloc ID",Text
|
| 14 |
+
jpabsum,SumAB,"Affinity Bloc population (summed from people group populations)",Single
|
| 15 |
+
jpabsum,SumABLR,"Affinity Bloc least-reached peoples population",Single
|
| 16 |
+
jpabsum,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 17 |
+
jpcontinentsum,Cnt1040Window,"Count of 10/40 Window countries in continent","Long Integer"
|
| 18 |
+
jpcontinentsum,Continent,"Continent name",Text
|
| 19 |
+
jpcontinentsum,NbrCountries,"Number of countries in continent","Long Integer"
|
| 20 |
+
jpcontinentsum,NbrCountriesLR,"Number of countries considered least-reached in continent","Long Integer"
|
| 21 |
+
jpcontinentsum,NbrLR,"Number of least-reached people groups in continent","Long Integer"
|
| 22 |
+
jpcontinentsum,NbrPGIC,"Number of people groups in continent","Long Integer"
|
| 23 |
+
jpcontinentsum,PercentChristian,"Continent percent Christian Adherents (generated from people group % Christian Adherents)",Double
|
| 24 |
+
jpcontinentsum,PercentEvangelical,"Continent percent Christian Evangelicals (generated from people group % Evangelicals)",Double
|
| 25 |
+
jpcontinentsum,PercentLR,"Percent of least-reached people groups in continent",Double
|
| 26 |
+
jpcontinentsum,PercentPoplLR,"Percent of groups considered least-reached in continent",Double
|
| 27 |
+
jpcontinentsum,PercentUrbanized,"Continent percent urbanized",Double
|
| 28 |
+
jpcontinentsum,ROG2,"Continent code",Text
|
| 29 |
+
jpcontinentsum,SumContinent,"Population of continent (summed from people group populations)",Double
|
| 30 |
+
jpcontinentsum,SumContinentLR,"Population of least-reached people groups in continent",Double
|
| 31 |
+
jpcontinentsum,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 32 |
+
jpcountries,10_40Window,"Part of 10/40 Window according to Window International Network (WIN)",Text
|
| 33 |
+
jpcountries,CntPeoples,"Count of people groups","Long Integer"
|
| 34 |
+
jpcountries,CntPeoplesLR,"Count of people groups considered unreached","Long Integer"
|
| 35 |
+
jpcountries,Continent,Continent,Text
|
| 36 |
+
jpcountries,Ctry,"Country name",Text
|
| 37 |
+
jpcountries,ISO2,"ISO 2 character code for country",Text
|
| 38 |
+
jpcountries,ISO3,"ISO 3 character code for country",Text
|
| 39 |
+
jpcountries,JPScaleCtry,"Joshua Project Progress Scale for overall country",Text
|
| 40 |
+
jpcountries,OfficialLang,"Official language name for this country",Text
|
| 41 |
+
jpcountries,PercentChristianity,"Percent Christian Adherent from summing people group values",Double
|
| 42 |
+
jpcountries,PercentEvangelical,"Percent Evangelical from summing people group values",Double
|
| 43 |
+
jpcountries,PoplPeoples,"Population of people groups, should be very close to field Population",Double
|
| 44 |
+
jpcountries,PoplPeoplesLR,"Population of people groups considered unreached",Double
|
| 45 |
+
jpcountries,RegionCode,"Ethne Regions","Long Integer"
|
| 46 |
+
jpcountries,RegionName,"Region name",Text
|
| 47 |
+
jpcountries,ReligionPrimary,"Primary Religion for this country",Text
|
| 48 |
+
jpcountries,RLG3Primary,"Code for primary Religion for this country","Long Integer"
|
| 49 |
+
jpcountries,ROG2,"Continent code",Text
|
| 50 |
+
jpcountries,ROG3,"2 digit FIPS code for country: FIPS PUB 10-4",Text
|
| 51 |
+
jpcountries,ROL3OfficialLanguage,"Code for official language for this country",Text
|
| 52 |
+
jpcountries,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 53 |
+
jplangpeopctry,Language,"Language name",Text
|
| 54 |
+
jplangpeopctry,LanguageDialect,"Dialect name",Text
|
| 55 |
+
jplangpeopctry,LanguageRank,"P = Primary, S = Secondary",Text
|
| 56 |
+
jplangpeopctry,PeopleID3,"People group code","Long Integer"
|
| 57 |
+
jplangpeopctry,ROG3,"Country code",Text
|
| 58 |
+
jplangpeopctry,ROL3,"Language code",Text
|
| 59 |
+
jplangpeopctry,ROL4,"Dialect code",Text
|
| 60 |
+
jplangpeopctry,Speakers,"World speakers",Number
|
| 61 |
+
jplanguages,AudioRecordings,"Global Recordings Network available",Text
|
| 62 |
+
jplanguages,BibleStatus,"Bible translation status: 0=Questionable translation need; 1=None, definite need; 2=Portions: 3=NT; ","Long Integer"
|
| 63 |
+
jplanguages,BibleYear,"Year of complete Bible translation (BibleStatus=4)",Text
|
| 64 |
+
jplanguages,JF,"Jesus Film available",Text
|
| 65 |
+
jplanguages,JPScale,"Joshua Project Scale",Text
|
| 66 |
+
jplanguages,Language,"Language Name",Text
|
| 67 |
+
jplanguages,LeastReached,"Considered Least-reached / unreached based on people group data",Text
|
| 68 |
+
jplanguages,NbrPGICs,"Number of people groups speaking this as primary language","Long Integer"
|
| 69 |
+
jplanguages,NTYear,"Year of NT translation (BibleStatus=3)",Text
|
| 70 |
+
jplanguages,PortionsYear,"Year of portions translation (BibleStatus=2)",Text
|
| 71 |
+
jplanguages,PrimaryReligion,"Largest religion name based on people groups primary religion",Text
|
| 72 |
+
jplanguages,RLG3,"Largest religion code based on people groups primary religion","Long Integer"
|
| 73 |
+
jplanguages,ROL3,"Language Code (ISO and Ethnologue)",Text
|
| 74 |
+
jplanguages,YouVersion_ID,"YouVersion Bible ID",Text
|
| 75 |
+
jppeopleclusters,AffinityBloc,"Affinity Bloc name",Text
|
| 76 |
+
jppeopleclusters,FrontierPC,"People Cluster considered frontier based on summation of people groups",Text
|
| 77 |
+
jppeopleclusters,JPScale,"People Cluster progress scale level","Long Integer"
|
| 78 |
+
jppeopleclusters,LR,"People Cluster considered unreached based on summation of people groups",Text
|
| 79 |
+
jppeopleclusters,NbrFrontier,"People Cluster number of frontier People Groups In Countries (PGIC)","Long Integer"
|
| 80 |
+
jppeopleclusters,NbrLanguages,"People Cluster number of languages spoken","Long Integer"
|
| 81 |
+
jppeopleclusters,NbrLR,"People Cluster number of least-reached People Groups In Countries (PGIC)","Long Integer"
|
| 82 |
+
jppeopleclusters,NbrPGAC,"People Cluster number of People Groups Across Countries (PGAC)","Long Integer"
|
| 83 |
+
jppeopleclusters,NbrPGIC,"People Cluster number of People Groups In Countries (PGIC)","Long Integer"
|
| 84 |
+
jppeopleclusters,PeopleCluster,"People Cluster name",Text
|
| 85 |
+
jppeopleclusters,PeopleID1,"Affinity Bloc code","Long Integer"
|
| 86 |
+
jppeopleclusters,PeopleID2,"People Cluster code","Long Integer"
|
| 87 |
+
jppeopleclusters,PercentChristianPC,"People Cluster percent Christian Adherents based on summation of people group % Christian Adherents",Single
|
| 88 |
+
jppeopleclusters,PercentEvangelicalPC,"People Cluster percent Evangelical based on summation of people group % Evangelical",Single
|
| 89 |
+
jppeopleclusters,PoplLR,"Population of least-reached people groups in People Cluster","Long Integer"
|
| 90 |
+
jppeopleclusters,Population,"People Cluster population based on summation of people group populations","Long Integer"
|
| 91 |
+
jppeopleclusters,PrimaryLanguage,"People Cluster primary language",Text
|
| 92 |
+
jppeopleclusters,PrimaryReligion,"People Cluster primary religion",Text
|
| 93 |
+
jppeopleclusters,RLG3,"Religion code","Long Integer"
|
| 94 |
+
jppeopleclusters,ROL3,"Language code",Text
|
| 95 |
+
jppeopleclusters,ROP1,"Registry of Peoples - Affinity Bloc ID",Text
|
| 96 |
+
jppeopleclusters,ROP2,"Registry of Peoples - People Cluster ID",Text
|
| 97 |
+
jppeopleclusters,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 98 |
+
jppeoples,10_40Window,"Y = in 10/40 Window",Text
|
| 99 |
+
jppeoples,AffinityBloc,"Affinity Bloc for this people group",Text
|
| 100 |
+
jppeoples,BibleStatus,"Bible status","Long Integer"
|
| 101 |
+
jppeoples,Continent,Continent,Text
|
| 102 |
+
jppeoples,CountOfCountries,"Number of countries of residence","Long Integer"
|
| 103 |
+
jppeoples,Ctry,"Country name",Text
|
| 104 |
+
jppeoples,Frontier,"Frontier People Group",Text
|
| 105 |
+
jppeoples,IndigenousCode,"Is this group indigenous to this country",Text
|
| 106 |
+
jppeoples,JPScale,"See http://www.joshuaproject.net/definitions.php",Text
|
| 107 |
+
jppeoples,Latitude,"Latitude value of language polygon or highest density district centroid, for Google maps colored dot",Double
|
| 108 |
+
jppeoples,LeastReached,"Y = Least Reached / unreached. JPScale < 2.0",Text
|
| 109 |
+
jppeoples,Longitude,"Longitude value of language polygon or highest density district centroid, for Google maps colored do",Double
|
| 110 |
+
jppeoples,PctChristianRange,"Percent Christian Range based off PercentAdherent value",Text
|
| 111 |
+
jppeoples,PctEvangelicalRange,"Percent Evangelical Range based off PercentEvaneglical value",Text
|
| 112 |
+
jppeoples,PeopleCluster,"People cluster",Text
|
| 113 |
+
jppeoples,PeopleID1,"Affinity Bloc code","Long Integer"
|
| 114 |
+
jppeoples,PeopleID2,"People cluster code","Long Integer"
|
| 115 |
+
jppeoples,PeopleID3,"People-Group-Across-Countries ID number","Long Integer"
|
| 116 |
+
jppeoples,PeopNameAcrossCountries,"Name of people group across countries of residence",Text
|
| 117 |
+
jppeoples,PeopNameInCountry,"Name of people group in this country",Text
|
| 118 |
+
jppeoples,PercentAdherents,"% Christian Adherents for this people group",Double
|
| 119 |
+
jppeoples,PercentEvangelical,"% Evangelical for this people group",Double
|
| 120 |
+
jppeoples,Population,"Population in this country","Long Integer"
|
| 121 |
+
jppeoples,PrimaryLanguageName,"Primary language in this country",Text
|
| 122 |
+
jppeoples,PrimaryReligion,"Primary religion in this country",Text
|
| 123 |
+
jppeoples,RegionCode,"Region code for this country","Long Integer"
|
| 124 |
+
jppeoples,RegionName,"Region name",Text
|
| 125 |
+
jppeoples,RLG3,"Primary religion code","Long Integer"
|
| 126 |
+
jppeoples,ROG2,"Registry of Geographic Places continent code",Text
|
| 127 |
+
jppeoples,ROG3,"FIPS-2 country code",Text
|
| 128 |
+
jppeoples,ROL3,"Ethnologue language code, 17th Edition",Text
|
| 129 |
+
jppeoples,ROP1,"Registry of Peoples - Affinity Bloc ID",Text
|
| 130 |
+
jppeoples,ROP2,"Registry of Peoples - People Cluster ID",Text
|
| 131 |
+
jppeoples,ROP3,"Registry of Peoples - People Group ID","Long Integer"
|
| 132 |
+
jppeoples,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 133 |
+
jpregionsum,Cnt1040Window,"Count of countries in 10/40 Window in region","Long Integer"
|
| 134 |
+
jpregionsum,NbrCountries,"Number of countries in region","Long Integer"
|
| 135 |
+
jpregionsum,NbrCountriesLR,"Number of countries considered least-reached in region","Long Integer"
|
| 136 |
+
jpregionsum,NbrLR,"Number of people groups considered least-reached in region","Long Integer"
|
| 137 |
+
jpregionsum,NbrPGIC,"Number of people groups in region","Long Integer"
|
| 138 |
+
jpregionsum,PercentChristian,"Region percent Christian Adherents (generated from people group % Christian Adherents)",Double
|
| 139 |
+
jpregionsum,PercentEvangelical,"Region percent Christian Evangelicals (generated from people group % Evangelicals)",Double
|
| 140 |
+
jpregionsum,PercentLR,"Percent of people groups considered least-reached in region",Double
|
| 141 |
+
jpregionsum,PercentPoplLR,"Percent of population living in least-reached people groups in region",Double
|
| 142 |
+
jpregionsum,PercentUrbanized,"Region percent urbanized",Double
|
| 143 |
+
jpregionsum,RegionCode,"Region code","Long Integer"
|
| 144 |
+
jpregionsum,RegionName,"Region name",Text
|
| 145 |
+
jpregionsum,SumRegion,"Population of region (summed from people group populations)",Double
|
| 146 |
+
jpregionsum,SumRegionLR,"Population of least-reached people groups in region",Double
|
| 147 |
+
jpregionsum,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 148 |
+
jpreligionsum,NbrLR,"Number of least-reached people groups with this primary religion",Double
|
| 149 |
+
jpreligionsum,NbrReligion,"Number of people groups with this primary religion","Long Integer"
|
| 150 |
+
jpreligionsum,PercentLR,"Percent of least-reached people groups with this primary religion",Double
|
| 151 |
+
jpreligionsum,PercentPoplLR,"Percent of least-reached people group populations with this primary religion",Double
|
| 152 |
+
jpreligionsum,PercentReligion,"Percent of world population with this primary religion",Double
|
| 153 |
+
jpreligionsum,PrimaryReligion,"Primary Religion name",Text
|
| 154 |
+
jpreligionsum,RLG3,"Primary Religion code","Long Integer"
|
| 155 |
+
jpreligionsum,SumReligion,"Population with this primary religion",Double
|
| 156 |
+
jpreligionsum,SumReligionLR,"Sum of least-reached people group populations with this primary religion",Double
|
| 157 |
+
jpreligionsum,WorkersNeeded,"Estimated workers needed based on 1:50,000","Long Integer"
|
| 158 |
+
jpresources,Category,"Resource category",Text
|
| 159 |
+
jpresources,CategoryCode,"Resource category code",Text
|
| 160 |
+
jpresources,ID,"Resource ID",Text
|
| 161 |
+
jpresources,ROL3,"Language code",Text
|
| 162 |
+
jpresources,URL,"Resource URL",Text
|
| 163 |
+
jpresources,WebText,"Resource name (web version)",Text
|
| 164 |
+
jpscalesum,CountofPGIC,"Count of people groups at this progress level","Long Integer"
|
| 165 |
+
jpscalesum,JPScale,"Joshua Project Progress Scale value",Text
|
| 166 |
+
jpscalesum,JPScaleAdherents,"Joshua Project Progress Scale percent Christian Adherents criteria",Text
|
| 167 |
+
jpscalesum,JPScaleDescription,"Joshua Project Progress Scale description",Text
|
| 168 |
+
jpscalesum,JPScaleEvangelicals,"Joshua Project Progress Scale percent Evangelicals criteria",Text
|
| 169 |
+
jpscalesum,JPStage,"Joshua Project Progress Scale name",Text
|
| 170 |
+
jpscalesum,PctGlobalPGIC,"Percent of all people groups at this progress level",Double
|
| 171 |
+
jpscalesum,PctGlobalPopulation,"Joshua Project Progress Scale percent of world population",Double
|
| 172 |
+
jpscalesum,TotalPopulation,"Joshua Project Progress Scale population based on sum of people group populations",Double
|
| 173 |
+
jpsouthasia,Adm1Name,"State / Province name",Text
|
| 174 |
+
jpsouthasia,Adm2Name,"District name",Text
|
| 175 |
+
jpsouthasia,Buddhist,"Buddhist population",Double
|
| 176 |
+
jpsouthasia,Christian,"Christian population",Double
|
| 177 |
+
jpsouthasia,FlashMapIDProvince,"Flash Map State / Province ID",Text
|
| 178 |
+
jpsouthasia,Hindu,"Hindu population",Double
|
| 179 |
+
jpsouthasia,JSMapIDProvince,"JS Map State / Province ID",Text
|
| 180 |
+
jpsouthasia,Muslim,"Muslim population",Double
|
| 181 |
+
jpsouthasia,Other,"Other population",Double
|
| 182 |
+
jpsouthasia,PeopleID3,"People code","Long Integer"
|
| 183 |
+
jpsouthasia,PeopleID3ROG3,"Unique key (concatination of People code and Country code)",Text
|
| 184 |
+
jpsouthasia,Population,"Population in this district",Double
|
| 185 |
+
jpsouthasia,ROG3,"Country code",Text
|
| 186 |
+
jpsouthasia,ROG4,"State / Province code",Text
|
| 187 |
+
jpsouthasia,ROG5,"District code",Text
|
| 188 |
+
jpsouthasia,Sikh,"Sikh population",Double
|
| 189 |
+
jpsouthasiasum,Adm1Name,"State / District name",Text
|
| 190 |
+
jpsouthasiasum,cntTotalPeoples,"Count of people groups","Long Integer"
|
| 191 |
+
jpsouthasiasum,cntTotalROG5,"Count of total districts","Long Integer"
|
| 192 |
+
jpsouthasiasum,FlashMapIDProvince,"Flash Map State / Province ID",Text
|
| 193 |
+
jpsouthasiasum,JSMapIDProvince,"JS Map State / Province ID",Text
|
| 194 |
+
jpsouthasiasum,ROG3,"Country code",Text
|
| 195 |
+
jpsouthasiasum,ROG4,"State / District code",Text
|
| 196 |
+
jpsouthasiasum,SumOfPopulation,"Sum of population of people groups",Double
|
| 197 |
+
jptotals,ID,"Total field",Text
|
| 198 |
+
jptotals,IDValue,"Total value",Double
|
| 199 |
+
jpupgotd,10_40Window,"In the 10/40 Window",Text
|
| 200 |
+
jpupgotd,AudioRecordings,"Global Recordings available",Text
|
| 201 |
+
jpupgotd,Bible,"Bible status",Text
|
| 202 |
+
jpupgotd,BibleYear,"Complete Bible year",Text
|
| 203 |
+
jpupgotd,Continent,"Continent name",Text
|
| 204 |
+
jpupgotd,Ctry,"Country name",Text
|
| 205 |
+
jpupgotd,JF,"Jesus Film available",Text
|
| 206 |
+
jpupgotd,JPScale,"Joshua Project Progress Scale value",Text
|
| 207 |
+
jpupgotd,LRofTheDayDay,"Least-reached of the day day","Long Integer"
|
| 208 |
+
jpupgotd,LRofTheDayMonth,"Least-reached of the day month","Long Integer"
|
| 209 |
+
jpupgotd,LRofTheDaySet,"Least-reached of the day set number","Long Integer"
|
| 210 |
+
jpupgotd,NTOnline,"Is the New Testament online in text and/or audio",Text
|
| 211 |
+
jpupgotd,NTYear,"New Testament year",Text
|
| 212 |
+
jpupgotd,PeopleID3,"People code","Long Integer"
|
| 213 |
+
jpupgotd,PeopleID3ROG3,"Unique key (concatination of People code and Country code)",Text
|
| 214 |
+
jpupgotd,PeopNameInCountry,"People group name in specific country",Text
|
| 215 |
+
jpupgotd,PercentAdherents,"Percent Christian Adherents",Double
|
| 216 |
+
jpupgotd,PercentEvangelical,"Percent Evangelical",Double
|
| 217 |
+
jpupgotd,Population,"People group population in this country","Long Integer"
|
| 218 |
+
jpupgotd,PortionsYear,"Bible Portions year",Text
|
| 219 |
+
jpupgotd,PrimaryLanguageName,"Primary language name",Text
|
| 220 |
+
jpupgotd,PrimaryReligion,"Primary religion name",Text
|
| 221 |
+
jpupgotd,RegionCode,"Region name",Text
|
| 222 |
+
jpupgotd,RegionName,"Region name",Text
|
| 223 |
+
jpupgotd,RLG3,"Religion code","Long Integer"
|
| 224 |
+
jpupgotd,ROG2,"Continent code",Text
|
| 225 |
+
jpupgotd,ROG3,"Country code",Text
|
| 226 |
+
jpupgotd,ROL3,"Language code",Text
|
| 227 |
+
jpupgotd,WorldPopulation,"Global people group population",Double
|
| 228 |
+
|
| 229 |
+
Bible Translation status:
|
| 230 |
+
0,Unspecified
|
| 231 |
+
1,Translation Needed
|
| 232 |
+
2,Translation Started
|
| 233 |
+
3,Portions
|
| 234 |
+
4,New Testament
|
| 235 |
+
5,Complete Bible
|
| 236 |
+
|
| 237 |
+
"Joshua Project welcomes corrections / updates to this data. Please send feedback to:"
|
| 238 |
+
|
| 239 |
+
Email:,info@joshuaproject.net
|
| 240 |
+
Web:,www.joshuaproject.net
|
archive/PeopleCtryLangListing.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/UnreachedPeoplesByCountry.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/analyze_api_data.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File Purpose: Fetch and analyze Joshua Project API data.
|
| 3 |
+
Primary Functions:
|
| 4 |
+
- Fetch data from API.
|
| 5 |
+
- Analyze structure and quality indicators.
|
| 6 |
+
- Compare with local CSV data.
|
| 7 |
+
Inputs:
|
| 8 |
+
- API Key (hardcoded for this script)
|
| 9 |
+
- joshua-project/AllPeoplesInCountry.csv
|
| 10 |
+
Outputs:
|
| 11 |
+
- api_data_sample.json
|
| 12 |
+
- Console output with analysis
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import requests
|
| 16 |
+
import pandas as pd
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
API_KEY = "143a3df23d27"
|
| 21 |
+
BASE_URL = "https://api.joshuaproject.net/v1/people_groups.json"
|
| 22 |
+
# Use absolute path or relative to script execution
|
| 23 |
+
CSV_PATH = "AllPeoplesInCountry.csv"
|
| 24 |
+
OUTPUT_JSON = "api_data_sample.json"
|
| 25 |
+
|
| 26 |
+
def fetch_data(limit=50):
|
| 27 |
+
url = f"{BASE_URL}?api_key={API_KEY}&limit={limit}"
|
| 28 |
+
print(f"Fetching data from {url}...")
|
| 29 |
+
try:
|
| 30 |
+
response = requests.get(url)
|
| 31 |
+
response.raise_for_status()
|
| 32 |
+
return response.json()
|
| 33 |
+
except requests.exceptions.RequestException as e:
|
| 34 |
+
print(f"Error fetching data: {e}")
|
| 35 |
+
return None
|
| 36 |
+
|
| 37 |
+
def analyze_structure(data):
|
| 38 |
+
if not data or not isinstance(data, list):
|
| 39 |
+
print("Invalid data format.")
|
| 40 |
+
return
|
| 41 |
+
|
| 42 |
+
print(f"\nFetched {len(data)} records.")
|
| 43 |
+
first_record = data[0]
|
| 44 |
+
print("\nKeys in first record:")
|
| 45 |
+
print(list(first_record.keys()))
|
| 46 |
+
|
| 47 |
+
# Check for quality indicators
|
| 48 |
+
quality_keywords = ['source', 'date', 'updated', 'precision', 'status']
|
| 49 |
+
print("\nPotential Quality Indicators found in keys:")
|
| 50 |
+
found_quality = [k for k in first_record.keys() if any(q in k.lower() for q in quality_keywords)]
|
| 51 |
+
for k in found_quality:
|
| 52 |
+
print(f" - {k}: {first_record[k]}")
|
| 53 |
+
|
| 54 |
+
def compare_with_csv(api_data, csv_path):
|
| 55 |
+
if not os.path.exists(csv_path):
|
| 56 |
+
print(f"\nCSV file {csv_path} not found. Skipping comparison.")
|
| 57 |
+
return
|
| 58 |
+
|
| 59 |
+
print(f"\nLoading CSV from {csv_path}...")
|
| 60 |
+
try:
|
| 61 |
+
# Skip first 2 lines as per previous analysis script
|
| 62 |
+
df = pd.read_csv(csv_path, skiprows=2)
|
| 63 |
+
print(f"Loaded {len(df)} rows from CSV.")
|
| 64 |
+
except Exception as e:
|
| 65 |
+
print(f"Error reading CSV: {e}")
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
# Clean columns just in case
|
| 69 |
+
df.columns = df.columns.str.strip()
|
| 70 |
+
|
| 71 |
+
print("\nComparing API sample with CSV data (matching on PeopleID3)...")
|
| 72 |
+
|
| 73 |
+
matches = 0
|
| 74 |
+
mismatches = 0
|
| 75 |
+
|
| 76 |
+
# Prepare CSV data for matching
|
| 77 |
+
if 'PeopleID3' not in df.columns:
|
| 78 |
+
print("PeopleID3 column missing in CSV.")
|
| 79 |
+
return
|
| 80 |
+
|
| 81 |
+
# Create a string column for PeopleID3 to handle float/int discrepancies
|
| 82 |
+
# Handle NaN values and convert float (e.g. 10208.0) to int (10208) then string
|
| 83 |
+
df_clean = df.dropna(subset=['PeopleID3']).copy()
|
| 84 |
+
df_clean['PeopleID3_str'] = df_clean['PeopleID3'].astype(int).astype(str).str.strip()
|
| 85 |
+
|
| 86 |
+
for record in api_data:
|
| 87 |
+
# API PeopleID3 might be int or str
|
| 88 |
+
pid_api = record.get("PeopleID3")
|
| 89 |
+
pid_api_str = str(pid_api).strip()
|
| 90 |
+
|
| 91 |
+
# Find in DF
|
| 92 |
+
match = df_clean[df_clean['PeopleID3_str'] == pid_api_str]
|
| 93 |
+
|
| 94 |
+
if not match.empty:
|
| 95 |
+
matches += 1
|
| 96 |
+
csv_row = match.iloc[0]
|
| 97 |
+
# Compare a few fields
|
| 98 |
+
api_name = record.get("PeopNameInCountry")
|
| 99 |
+
csv_name = csv_row.get("PeopNameInCountry")
|
| 100 |
+
|
| 101 |
+
api_pop = record.get("Population")
|
| 102 |
+
csv_pop = csv_row.get("Population")
|
| 103 |
+
|
| 104 |
+
if matches <= 5: # Print first 5 matches details
|
| 105 |
+
print(f"Match found for PeopleID3 {pid_api}:")
|
| 106 |
+
print(f" Name - API: {api_name}, CSV: {csv_name}")
|
| 107 |
+
print(f" Pop - API: {api_pop}, CSV: {csv_pop}")
|
| 108 |
+
else:
|
| 109 |
+
mismatches += 1
|
| 110 |
+
if mismatches <= 5:
|
| 111 |
+
print(f"No match in CSV for PeopleID3 {pid_api} (Name: {record.get('PeopNameInCountry')})")
|
| 112 |
+
|
| 113 |
+
print(f"\nTotal Matches: {matches}")
|
| 114 |
+
print(f"Total Mismatches (in sample): {mismatches}")
|
| 115 |
+
|
| 116 |
+
def main():
|
| 117 |
+
data = fetch_data(limit=50)
|
| 118 |
+
if data:
|
| 119 |
+
analyze_structure(data)
|
| 120 |
+
compare_with_csv(data, CSV_PATH)
|
| 121 |
+
|
| 122 |
+
print(f"\nSaving fetched data to {OUTPUT_JSON}...")
|
| 123 |
+
with open(OUTPUT_JSON, 'w') as f:
|
| 124 |
+
json.dump(data, f, indent=2)
|
| 125 |
+
print("Done.")
|
| 126 |
+
|
| 127 |
+
if __name__ == "__main__":
|
| 128 |
+
main()
|
archive/analyze_data.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File Purpose: Analyze and compare Joshua Project data files.
|
| 3 |
+
Primary Functions:
|
| 4 |
+
- Load CSV data into pandas DataFrames.
|
| 5 |
+
- Compare 'master' dataset (AllPeoplesInCountry.csv) with CPPI cross-reference dataset.
|
| 6 |
+
- Identify duplicates, unique records, and data discrepancies.
|
| 7 |
+
- Generate a summary report.
|
| 8 |
+
Inputs:
|
| 9 |
+
- /home/coolhand/html/datavis/joshua-project/AllPeoplesInCountry.csv
|
| 10 |
+
- /home/coolhand/html/datavis/joshua-project/extracted_cppi/jp-cppi-cross-reference.csv
|
| 11 |
+
Outputs:
|
| 12 |
+
- Printed summary of analysis.
|
| 13 |
+
- joshua_data_summary.md (Report file)
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import pandas as pd
|
| 17 |
+
import os
|
| 18 |
+
|
| 19 |
+
# Paths
|
| 20 |
+
BASE_DIR = "/home/coolhand/html/datavis/joshua-project"
|
| 21 |
+
MASTER_CSV = os.path.join(BASE_DIR, "AllPeoplesInCountry.csv")
|
| 22 |
+
CPPI_CSV = os.path.join(BASE_DIR, "extracted_cppi", "jp-cppi-cross-reference.csv")
|
| 23 |
+
OUTPUT_REPORT = os.path.join(BASE_DIR, "joshua_data_summary.md")
|
| 24 |
+
|
| 25 |
+
def load_data():
|
| 26 |
+
"""Load the CSV files into DataFrames."""
|
| 27 |
+
print("Loading data...")
|
| 28 |
+
try:
|
| 29 |
+
# Load Master - Skip first 2 lines (Title + Blank)
|
| 30 |
+
master_df = pd.read_csv(MASTER_CSV, encoding='utf-8', on_bad_lines='skip', skiprows=2)
|
| 31 |
+
print(f"Loaded Master CSV: {len(master_df)} rows")
|
| 32 |
+
|
| 33 |
+
# Load CPPI
|
| 34 |
+
# CPPI might have encoding issues or whitespace in headers
|
| 35 |
+
cppi_df = pd.read_csv(CPPI_CSV, encoding='latin1', on_bad_lines='skip') # Fallback encoding often needed
|
| 36 |
+
print(f"Loaded CPPI CSV: {len(cppi_df)} rows")
|
| 37 |
+
|
| 38 |
+
return master_df, cppi_df
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f"Error loading data: {e}")
|
| 41 |
+
return None, None
|
| 42 |
+
|
| 43 |
+
def clean_columns(df):
|
| 44 |
+
"""Strip whitespace from column names."""
|
| 45 |
+
df.columns = df.columns.str.strip()
|
| 46 |
+
return df
|
| 47 |
+
|
| 48 |
+
def analyze(master_df, cppi_df):
|
| 49 |
+
"""Compare the two DataFrames."""
|
| 50 |
+
print("\nAnalyzing data...")
|
| 51 |
+
|
| 52 |
+
# Clean headers
|
| 53 |
+
master_df = clean_columns(master_df)
|
| 54 |
+
cppi_df = clean_columns(cppi_df)
|
| 55 |
+
|
| 56 |
+
# Check for keys
|
| 57 |
+
join_keys = ['ROG3', 'PeopleID3']
|
| 58 |
+
for key in join_keys:
|
| 59 |
+
if key not in master_df.columns:
|
| 60 |
+
print(f"Error: '{key}' not in Master.")
|
| 61 |
+
return
|
| 62 |
+
if key not in cppi_df.columns:
|
| 63 |
+
print(f"Error: '{key}' not in CPPI.")
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
# Convert keys to string
|
| 67 |
+
for key in join_keys:
|
| 68 |
+
master_df[key] = master_df[key].astype(str).str.strip()
|
| 69 |
+
cppi_df[key] = cppi_df[key].astype(str).str.strip()
|
| 70 |
+
|
| 71 |
+
# Create a composite key for easier set operations
|
| 72 |
+
master_df['KEY'] = master_df['ROG3'] + "_" + master_df['PeopleID3']
|
| 73 |
+
cppi_df['KEY'] = cppi_df['ROG3'] + "_" + cppi_df['PeopleID3']
|
| 74 |
+
|
| 75 |
+
# Sets of Keys
|
| 76 |
+
master_keys = set(master_df['KEY'])
|
| 77 |
+
cppi_keys = set(cppi_df['KEY'])
|
| 78 |
+
|
| 79 |
+
# Intersections and differences
|
| 80 |
+
common_keys = master_keys.intersection(cppi_keys)
|
| 81 |
+
only_master_keys = master_keys - cppi_keys
|
| 82 |
+
only_cppi_keys = cppi_keys - master_keys
|
| 83 |
+
|
| 84 |
+
# Summarize findings
|
| 85 |
+
summary = []
|
| 86 |
+
summary.append("# Joshua Project Data Analysis Summary\n")
|
| 87 |
+
summary.append(f"## Dataset Overview")
|
| 88 |
+
summary.append(f"- **Master Dataset** (`AllPeoplesInCountry.csv`): {len(master_df)} records")
|
| 89 |
+
summary.append(f"- **CPPI Cross-Ref** (`jp-cppi-cross-reference.csv`): {len(cppi_df)} records")
|
| 90 |
+
|
| 91 |
+
summary.append(f"\n## Comparison by ROG3 + PeopleID3")
|
| 92 |
+
summary.append(f"- **Common Records**: {len(common_keys)}")
|
| 93 |
+
summary.append(f"- **Only in Master**: {len(only_master_keys)}")
|
| 94 |
+
summary.append(f"- **Only in CPPI**: {len(only_cppi_keys)}")
|
| 95 |
+
|
| 96 |
+
# Data Consistency Check (Population)
|
| 97 |
+
summary.append(f"\n## Data Consistency (Common Records)")
|
| 98 |
+
|
| 99 |
+
if 'Population' in master_df.columns and 'JPPopulation' in cppi_df.columns:
|
| 100 |
+
# Merge on Keys
|
| 101 |
+
merged = pd.merge(master_df, cppi_df, on=join_keys, suffixes=('_master', '_cppi'))
|
| 102 |
+
|
| 103 |
+
def clean_pop(val):
|
| 104 |
+
if isinstance(val, str):
|
| 105 |
+
val = val.replace(',', '').strip()
|
| 106 |
+
if val == '': return 0.0
|
| 107 |
+
return float(val)
|
| 108 |
+
return float(val)
|
| 109 |
+
|
| 110 |
+
merged['Population_master'] = merged['Population'].apply(clean_pop)
|
| 111 |
+
merged['JPPopulation_cppi'] = merged['JPPopulation'].apply(clean_pop)
|
| 112 |
+
|
| 113 |
+
merged['diff'] = merged['Population_master'] - merged['JPPopulation_cppi']
|
| 114 |
+
# Consider a match if difference is small (e.g. < 10) just in case
|
| 115 |
+
exact_matches = merged[merged['diff'].abs() < 1]
|
| 116 |
+
discrepancies = merged[merged['diff'].abs() >= 1]
|
| 117 |
+
|
| 118 |
+
summary.append(f"- **Population Exact Matches**: {len(exact_matches)} / {len(merged)}")
|
| 119 |
+
summary.append(f"- **Population Discrepancies**: {len(discrepancies)}")
|
| 120 |
+
|
| 121 |
+
if not discrepancies.empty:
|
| 122 |
+
summary.append(f"\n### Top 10 Population Discrepancies")
|
| 123 |
+
name_col_master = 'PeopNameInCountry' if 'PeopNameInCountry' in master_df.columns else 'JPPeopleGroup_master'
|
| 124 |
+
name_col_cppi = 'JPPeopleGroup' if 'JPPeopleGroup' in cppi_df.columns else 'JPPeopleGroup_cppi'
|
| 125 |
+
|
| 126 |
+
summary.append(f"| ROG3 | PeopleID3 | Name | Pop (Master) | Pop (CPPI) | Diff |")
|
| 127 |
+
summary.append("|---|---|---|---|---|---|")
|
| 128 |
+
for _, row in discrepancies.sort_values('diff', key=abs, ascending=False).head(10).iterrows():
|
| 129 |
+
name = row.get(name_col_master, 'N/A')
|
| 130 |
+
summary.append(f"| {row['ROG3']} | {row['PeopleID3']} | {name} | {row['Population_master']:.0f} | {row['JPPopulation_cppi']:.0f} | {row['diff']:.0f} |")
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
summary.append("- Could not compare population (missing columns).")
|
| 134 |
+
|
| 135 |
+
# Write report
|
| 136 |
+
with open(OUTPUT_REPORT, 'w') as f:
|
| 137 |
+
f.write('\n'.join(summary))
|
| 138 |
+
|
| 139 |
+
print('\n'.join(summary))
|
| 140 |
+
print(f"\nReport saved to: {OUTPUT_REPORT}")
|
| 141 |
+
|
| 142 |
+
if __name__ == "__main__":
|
| 143 |
+
m_df, c_df = load_data()
|
| 144 |
+
if m_df is not None and c_df is not None:
|
| 145 |
+
analyze(m_df, c_df)
|
archive/api_data_sample.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/extracted_cppi/README.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CPPI <-> Joshua Project Cross-Reference File
|
| 2 |
+
|
| 3 |
+
The included CSV/Excel and Access files are a cross-reference between IMB CPPI (www.peoplegroups.org) people group data and Joshua Project (www.joshuaproject.net) people group data.
|
| 4 |
+
|
| 5 |
+
The classifications are:
|
| 6 |
+
|
| 7 |
+
Type 1 - This people group is on both Joshua Project and CPPI
|
| 8 |
+
Type 2 - This people group is only on Joshua Project
|
| 9 |
+
Type 3 - This people group is only on CPPI
|
| 10 |
+
|
| 11 |
+
For further information contact info@joshuaproject.net
|
archive/extracted_cppi/jp-cppi-cross-reference.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
archive/extracted_cppi/jp-cppi-cross-reference.xlsx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eed4dcb27034964be676292acac97dded29735d4d9695dbece839c97de4aeb9c
|
| 3 |
+
size 2164334
|
archive/joshua_data_summary.md
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Joshua Project Data Analysis Summary
|
| 2 |
+
|
| 3 |
+
## Dataset Overview
|
| 4 |
+
- **Master Dataset** (`AllPeoplesInCountry.csv`): 16392 records
|
| 5 |
+
- **CPPI Cross-Ref** (`jp-cppi-cross-reference.csv`): 19375 records
|
| 6 |
+
|
| 7 |
+
## Comparison by ROG3 + PeopleID3
|
| 8 |
+
- **Common Records**: 15957
|
| 9 |
+
- **Only in Master**: 435
|
| 10 |
+
- **Only in CPPI**: 1481
|
| 11 |
+
|
| 12 |
+
## Data Consistency (Common Records)
|
| 13 |
+
- **Population Exact Matches**: 4843 / 15957
|
| 14 |
+
- **Population Discrepancies**: 11086
|
| 15 |
+
|
| 16 |
+
### Top 10 Population Discrepancies
|
| 17 |
+
| ROG3 | PeopleID3 | Name | Pop (Master) | Pop (CPPI) | Diff |
|
| 18 |
+
|---|---|---|---|---|---|
|
| 19 |
+
| IN | 18084.0 | Shaikh unspecified | 100866000 | 81439000 | 19427000 |
|
| 20 |
+
| IN | 16187.0 | Yadav (Hindu traditions) | 57595000 | 46502000 | 11093000 |
|
| 21 |
+
| IN | 16521.0 | Brahmin unspecified | 48723000 | 39339000 | 9384000 |
|
| 22 |
+
| IN | 17554.0 | Mahratta unspecified | 44162000 | 35656000 | 8506000 |
|
| 23 |
+
| NI | 16057.0 | Yoruba | 46817000 | 39123000 | 7694000 |
|
| 24 |
+
| IN | 17928.0 | Rajput (Hindu traditions) | 38905000 | 31412000 | 7493000 |
|
| 25 |
+
| CH | 12051.0 | Han Chinese, Mandarin | 912955000 | 918811000 | -5856000 |
|
| 26 |
+
| EG | 11722.0 | Arab, Egyptian Muslim | 67286000 | 72865000 | -5579000 |
|
| 27 |
+
| IN | 16561.0 | Chamar (Hindu traditions) | 46307000 | 51679000 | -5372000 |
|
| 28 |
+
| IN | 16318.0 | Bania unspecified | 23939000 | 19328000 | 4611000 |
|
archive/jp-cppi-cross-reference-csv.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3753df2f513db5278ca94bf3383cd190035904f45f63c86317cb24825111f277
|
| 3 |
+
size 2773230
|
create_enriched_datasets.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File Purpose: Create enriched/denormalized versions of Joshua Project data.
|
| 3 |
+
Primary Functions:
|
| 4 |
+
- Loads normalized datasets (people_groups, countries, languages)
|
| 5 |
+
- Joins data to create enriched versions with embedded lookups
|
| 6 |
+
- Generates specialized subsets (unreached, by region, etc.)
|
| 7 |
+
- Exports to JSON and Parquet formats
|
| 8 |
+
- Validates data integrity
|
| 9 |
+
|
| 10 |
+
Inputs:
|
| 11 |
+
- joshua_project_full_dump.json (people groups)
|
| 12 |
+
- joshua_project_countries.json
|
| 13 |
+
- joshua_project_languages.json
|
| 14 |
+
|
| 15 |
+
Outputs:
|
| 16 |
+
- joshua_project_enriched.json (full denormalized)
|
| 17 |
+
- joshua_project_enriched.parquet
|
| 18 |
+
- joshua_project_unreached.json (unreached only)
|
| 19 |
+
- joshua_project_unreached.parquet
|
| 20 |
+
- enriched_metadata.json (stats and validation report)
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
import json
|
| 24 |
+
import os
|
| 25 |
+
from datetime import datetime
|
| 26 |
+
|
| 27 |
+
def load_datasets():
|
| 28 |
+
"""Load all normalized datasets."""
|
| 29 |
+
print("\n" + "="*70)
|
| 30 |
+
print("LOADING NORMALIZED DATASETS")
|
| 31 |
+
print("="*70)
|
| 32 |
+
|
| 33 |
+
datasets = {}
|
| 34 |
+
|
| 35 |
+
files = {
|
| 36 |
+
'people_groups': 'joshua_project_full_dump.json',
|
| 37 |
+
'countries': 'joshua_project_countries.json',
|
| 38 |
+
'languages': 'joshua_project_languages_enriched_geo.json', # Use geo-enriched version with family names
|
| 39 |
+
'totals': 'joshua_project_totals.json'
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
for name, filename in files.items():
|
| 43 |
+
print(f"\nLoading {name}...")
|
| 44 |
+
try:
|
| 45 |
+
with open(filename, 'r', encoding='utf-8') as f:
|
| 46 |
+
data = json.load(f)
|
| 47 |
+
|
| 48 |
+
count = len(data) if isinstance(data, list) else len(data.keys())
|
| 49 |
+
print(f" ✅ Loaded {count:,} records from {filename}")
|
| 50 |
+
datasets[name] = data
|
| 51 |
+
|
| 52 |
+
except FileNotFoundError:
|
| 53 |
+
print(f" ❌ File not found: {filename}")
|
| 54 |
+
return None
|
| 55 |
+
except json.JSONDecodeError as e:
|
| 56 |
+
print(f" ❌ JSON error in {filename}: {e}")
|
| 57 |
+
return None
|
| 58 |
+
|
| 59 |
+
return datasets
|
| 60 |
+
|
| 61 |
+
def create_lookups(datasets):
|
| 62 |
+
"""Create fast lookup dictionaries."""
|
| 63 |
+
print("\n" + "="*70)
|
| 64 |
+
print("CREATING LOOKUP INDICES")
|
| 65 |
+
print("="*70)
|
| 66 |
+
|
| 67 |
+
# Country lookup by ROG3
|
| 68 |
+
countries_lookup = {c['ROG3']: c for c in datasets['countries']}
|
| 69 |
+
print(f"✅ Country lookup: {len(countries_lookup)} entries")
|
| 70 |
+
|
| 71 |
+
# Language lookup by ROL3
|
| 72 |
+
languages_lookup = {l['ROL3']: l for l in datasets['languages']}
|
| 73 |
+
print(f"✅ Language lookup: {len(languages_lookup)} entries")
|
| 74 |
+
|
| 75 |
+
# Totals as dict
|
| 76 |
+
totals_lookup = {t['id']: t for t in datasets['totals']}
|
| 77 |
+
print(f"✅ Totals lookup: {len(totals_lookup)} entries")
|
| 78 |
+
|
| 79 |
+
return {
|
| 80 |
+
'countries': countries_lookup,
|
| 81 |
+
'languages': languages_lookup,
|
| 82 |
+
'totals': totals_lookup
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
def enrich_people_group(people_group, lookups):
|
| 86 |
+
"""Enrich a single people group record with country and language data."""
|
| 87 |
+
enriched = people_group.copy()
|
| 88 |
+
|
| 89 |
+
# Add country data
|
| 90 |
+
country_code = people_group.get('ROG3')
|
| 91 |
+
if country_code and country_code in lookups['countries']:
|
| 92 |
+
country = lookups['countries'][country_code]
|
| 93 |
+
enriched['country_data'] = {
|
| 94 |
+
'name': country.get('Ctry'),
|
| 95 |
+
'continent': country.get('Continent'),
|
| 96 |
+
'region': country.get('RegionName'),
|
| 97 |
+
'percent_christianity': country.get('PercentChristianity'),
|
| 98 |
+
'percent_evangelical': country.get('PercentEvangelical'),
|
| 99 |
+
'total_peoples': country.get('CntPeoples'),
|
| 100 |
+
'unreached_peoples': country.get('CntPeoplesLR'),
|
| 101 |
+
'jp_scale': country.get('JPScaleCtry')
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
# Add language data
|
| 105 |
+
language_code = people_group.get('ROL3')
|
| 106 |
+
if language_code and language_code in lookups['languages']:
|
| 107 |
+
language = lookups['languages'][language_code]
|
| 108 |
+
enriched['language_data'] = {
|
| 109 |
+
'name': language.get('Language'),
|
| 110 |
+
'hub_country': language.get('HubCountry'),
|
| 111 |
+
'bible_status': language.get('BibleStatus'),
|
| 112 |
+
'bible_year': language.get('BibleYear'),
|
| 113 |
+
'nt_year': language.get('NTYear'),
|
| 114 |
+
'portions_year': language.get('PortionsYear'),
|
| 115 |
+
'has_jesus_film': language.get('HasJesusFilm'),
|
| 116 |
+
'has_audio_recordings': language.get('AudioRecordings'),
|
| 117 |
+
'status': language.get('Status'),
|
| 118 |
+
# Geographic enrichment fields from Glottolog
|
| 119 |
+
'latitude': language.get('latitude'),
|
| 120 |
+
'longitude': language.get('longitude'),
|
| 121 |
+
'glottocode': language.get('glottocode'),
|
| 122 |
+
'family_name': language.get('family_name'),
|
| 123 |
+
'family_id': language.get('family_id'),
|
| 124 |
+
'macroarea': language.get('macroarea')
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
return enriched
|
| 128 |
+
|
| 129 |
+
def create_full_enriched(datasets, lookups):
|
| 130 |
+
"""Create fully enriched dataset with all people groups."""
|
| 131 |
+
print("\n" + "="*70)
|
| 132 |
+
print("CREATING FULL ENRICHED DATASET")
|
| 133 |
+
print("="*70)
|
| 134 |
+
|
| 135 |
+
people_groups = datasets['people_groups']
|
| 136 |
+
enriched_records = []
|
| 137 |
+
|
| 138 |
+
total = len(people_groups)
|
| 139 |
+
for i, pg in enumerate(people_groups):
|
| 140 |
+
enriched = enrich_people_group(pg, lookups)
|
| 141 |
+
enriched_records.append(enriched)
|
| 142 |
+
|
| 143 |
+
# Progress indicator
|
| 144 |
+
if (i + 1) % 1000 == 0:
|
| 145 |
+
print(f" Progress: {i+1:,}/{total:,} ({100*(i+1)/total:.1f}%)")
|
| 146 |
+
|
| 147 |
+
print(f"\n✅ Created {len(enriched_records):,} enriched records")
|
| 148 |
+
return enriched_records
|
| 149 |
+
|
| 150 |
+
def create_unreached_subset(enriched_records):
|
| 151 |
+
"""Create subset with only unreached people groups."""
|
| 152 |
+
print("\n" + "="*70)
|
| 153 |
+
print("CREATING UNREACHED SUBSET")
|
| 154 |
+
print("="*70)
|
| 155 |
+
|
| 156 |
+
unreached = [r for r in enriched_records if r.get('LeastReached') == 'Y']
|
| 157 |
+
|
| 158 |
+
print(f"✅ Filtered to {len(unreached):,} unreached people groups")
|
| 159 |
+
print(f" ({100*len(unreached)/len(enriched_records):.1f}% of total)")
|
| 160 |
+
|
| 161 |
+
return unreached
|
| 162 |
+
|
| 163 |
+
def save_json(data, filename, description):
|
| 164 |
+
"""Save data to JSON file."""
|
| 165 |
+
print(f"\nSaving {description} to {filename}...")
|
| 166 |
+
|
| 167 |
+
try:
|
| 168 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
| 169 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 170 |
+
|
| 171 |
+
size_mb = os.path.getsize(filename) / (1024 * 1024)
|
| 172 |
+
print(f"✅ Saved {size_mb:.2f} MB ({len(data):,} records)")
|
| 173 |
+
return True
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
print(f"❌ Error saving: {e}")
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
def save_parquet(data, filename, description):
|
| 180 |
+
"""Save data to Parquet file."""
|
| 181 |
+
print(f"\nSaving {description} to {filename}...")
|
| 182 |
+
|
| 183 |
+
try:
|
| 184 |
+
import pyarrow as pa
|
| 185 |
+
import pyarrow.parquet as pq
|
| 186 |
+
|
| 187 |
+
# Convert to PyArrow table
|
| 188 |
+
table = pa.Table.from_pylist(data)
|
| 189 |
+
|
| 190 |
+
# Write with compression
|
| 191 |
+
pq.write_table(table, filename, compression='snappy')
|
| 192 |
+
|
| 193 |
+
size_mb = os.path.getsize(filename) / (1024 * 1024)
|
| 194 |
+
print(f"✅ Saved {size_mb:.2f} MB ({len(data):,} records)")
|
| 195 |
+
return True
|
| 196 |
+
|
| 197 |
+
except ImportError:
|
| 198 |
+
print(f"⚠️ PyArrow not installed. Run: pip install pyarrow")
|
| 199 |
+
print(f" Skipping Parquet export for {filename}")
|
| 200 |
+
return False
|
| 201 |
+
except Exception as e:
|
| 202 |
+
print(f"❌ Error saving: {e}")
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
def generate_enrichment_metadata(datasets, enriched, unreached):
|
| 206 |
+
"""Generate metadata about enrichment process."""
|
| 207 |
+
metadata = {
|
| 208 |
+
"generated_at": datetime.now().isoformat(),
|
| 209 |
+
"source_datasets": {
|
| 210 |
+
"people_groups": len(datasets['people_groups']),
|
| 211 |
+
"countries": len(datasets['countries']),
|
| 212 |
+
"languages": len(datasets['languages']),
|
| 213 |
+
"totals": len(datasets['totals'])
|
| 214 |
+
},
|
| 215 |
+
"enriched_datasets": {
|
| 216 |
+
"full_enriched": {
|
| 217 |
+
"records": len(enriched),
|
| 218 |
+
"json_file": "joshua_project_enriched.json",
|
| 219 |
+
"parquet_file": "joshua_project_enriched.parquet"
|
| 220 |
+
},
|
| 221 |
+
"unreached_only": {
|
| 222 |
+
"records": len(unreached),
|
| 223 |
+
"json_file": "joshua_project_unreached.json",
|
| 224 |
+
"parquet_file": "joshua_project_unreached.parquet",
|
| 225 |
+
"percentage_of_total": round(100 * len(unreached) / len(enriched), 2)
|
| 226 |
+
}
|
| 227 |
+
},
|
| 228 |
+
"enrichment_details": {
|
| 229 |
+
"added_fields": [
|
| 230 |
+
"country_data (9 fields)",
|
| 231 |
+
"language_data (9 fields)"
|
| 232 |
+
],
|
| 233 |
+
"original_fields_per_record": 107,
|
| 234 |
+
"enriched_fields_per_record": 109 # 107 + country_data + language_data
|
| 235 |
+
}
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
return metadata
|
| 239 |
+
|
| 240 |
+
def main():
|
| 241 |
+
"""Main execution function."""
|
| 242 |
+
print("\n" + "="*70)
|
| 243 |
+
print("JOSHUA PROJECT DATA ENRICHMENT PIPELINE")
|
| 244 |
+
print("="*70)
|
| 245 |
+
|
| 246 |
+
# Load datasets
|
| 247 |
+
datasets = load_datasets()
|
| 248 |
+
if not datasets:
|
| 249 |
+
print("\n❌ Failed to load datasets. Exiting.")
|
| 250 |
+
return
|
| 251 |
+
|
| 252 |
+
# Create lookups
|
| 253 |
+
lookups = create_lookups(datasets)
|
| 254 |
+
|
| 255 |
+
# Create full enriched dataset
|
| 256 |
+
enriched = create_full_enriched(datasets, lookups)
|
| 257 |
+
|
| 258 |
+
# Create unreached subset
|
| 259 |
+
unreached = create_unreached_subset(enriched)
|
| 260 |
+
|
| 261 |
+
# Save outputs
|
| 262 |
+
print("\n" + "="*70)
|
| 263 |
+
print("SAVING ENRICHED DATASETS")
|
| 264 |
+
print("="*70)
|
| 265 |
+
|
| 266 |
+
results = {
|
| 267 |
+
'full_json': save_json(enriched, 'joshua_project_enriched.json', 'full enriched dataset'),
|
| 268 |
+
'full_parquet': save_parquet(enriched, 'joshua_project_enriched.parquet', 'full enriched dataset'),
|
| 269 |
+
'unreached_json': save_json(unreached, 'joshua_project_unreached.json', 'unreached subset'),
|
| 270 |
+
'unreached_parquet': save_parquet(unreached, 'joshua_project_unreached.parquet', 'unreached subset')
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
# Generate and save metadata
|
| 274 |
+
metadata = generate_enrichment_metadata(datasets, enriched, unreached)
|
| 275 |
+
save_json(metadata, 'enriched_metadata.json', 'enrichment metadata')
|
| 276 |
+
|
| 277 |
+
# Print summary
|
| 278 |
+
print("\n" + "="*70)
|
| 279 |
+
print("ENRICHMENT SUMMARY")
|
| 280 |
+
print("="*70)
|
| 281 |
+
|
| 282 |
+
success_count = sum(1 for v in results.values() if v)
|
| 283 |
+
print(f"\nFiles created: {success_count}/{len(results)}")
|
| 284 |
+
for name, success in results.items():
|
| 285 |
+
status = "✅" if success else "❌"
|
| 286 |
+
print(f" {status} {name}")
|
| 287 |
+
|
| 288 |
+
print(f"\nEnriched records: {len(enriched):,}")
|
| 289 |
+
print(f"Unreached subset: {len(unreached):,} ({100*len(unreached)/len(enriched):.1f}%)")
|
| 290 |
+
|
| 291 |
+
if results['full_parquet']:
|
| 292 |
+
json_size = os.path.getsize('joshua_project_enriched.json') / (1024 * 1024)
|
| 293 |
+
parquet_size = os.path.getsize('joshua_project_enriched.parquet') / (1024 * 1024)
|
| 294 |
+
savings = 100 * (json_size - parquet_size) / json_size
|
| 295 |
+
print(f"\nParquet compression: {savings:.1f}% smaller than JSON")
|
| 296 |
+
print(f" JSON: {json_size:.2f} MB")
|
| 297 |
+
print(f" Parquet: {parquet_size:.2f} MB")
|
| 298 |
+
|
| 299 |
+
print("\n" + "="*70)
|
| 300 |
+
print("✅ ENRICHMENT COMPLETE")
|
| 301 |
+
print("="*70 + "\n")
|
| 302 |
+
|
| 303 |
+
print("Next steps:")
|
| 304 |
+
print(" 1. Use joshua_project_enriched.json for visualizations")
|
| 305 |
+
print(" 2. Use joshua_project_enriched.parquet for analysis (pandas/polars)")
|
| 306 |
+
print(" 3. Use joshua_project_unreached.json for mission-focused visualizations")
|
| 307 |
+
print(" 4. Run prepare_huggingface_dataset.py to prepare for HF upload")
|
| 308 |
+
print()
|
| 309 |
+
|
| 310 |
+
if __name__ == "__main__":
|
| 311 |
+
main()
|
data_utilities.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Joshua Project Data Utilities
|
| 3 |
+
Easy loading functions for different use cases.
|
| 4 |
+
|
| 5 |
+
Usage examples:
|
| 6 |
+
|
| 7 |
+
# For visualizations (enriched data)
|
| 8 |
+
>>> from data_utilities import load_enriched
|
| 9 |
+
>>> data = load_enriched()
|
| 10 |
+
>>> unreached = [p for p in data if p['LeastReached'] == 'Y']
|
| 11 |
+
|
| 12 |
+
# For analysis (pandas)
|
| 13 |
+
>>> from data_utilities import load_parquet
|
| 14 |
+
>>> import pandas as pd
|
| 15 |
+
>>> df = pd.read_parquet(load_parquet('enriched'))
|
| 16 |
+
|
| 17 |
+
# For specific queries
|
| 18 |
+
>>> from data_utilities import get_by_country
|
| 19 |
+
>>> india_peoples = get_by_country('IN')
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import json
|
| 23 |
+
import os
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
|
| 26 |
+
# Dataset file paths
|
| 27 |
+
DATASET_DIR = Path(__file__).parent
|
| 28 |
+
|
| 29 |
+
FILES = {
|
| 30 |
+
'people_groups': DATASET_DIR / 'joshua_project_full_dump.json',
|
| 31 |
+
'countries': DATASET_DIR / 'joshua_project_countries.json',
|
| 32 |
+
'languages': DATASET_DIR / 'joshua_project_languages.json',
|
| 33 |
+
'totals': DATASET_DIR / 'joshua_project_totals.json',
|
| 34 |
+
'enriched': DATASET_DIR / 'joshua_project_enriched.json',
|
| 35 |
+
'unreached': DATASET_DIR / 'joshua_project_unreached.json',
|
| 36 |
+
'enriched_parquet': DATASET_DIR / 'joshua_project_enriched.parquet',
|
| 37 |
+
'unreached_parquet': DATASET_DIR / 'joshua_project_unreached.parquet',
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
def load_json(dataset_name):
|
| 41 |
+
"""
|
| 42 |
+
Load a JSON dataset by name.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
dataset_name: One of 'people_groups', 'countries', 'languages',
|
| 46 |
+
'totals', 'enriched', 'unreached'
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Parsed JSON data (list or dict)
|
| 50 |
+
"""
|
| 51 |
+
if dataset_name not in FILES:
|
| 52 |
+
raise ValueError(f"Unknown dataset: {dataset_name}")
|
| 53 |
+
|
| 54 |
+
filepath = FILES[dataset_name]
|
| 55 |
+
|
| 56 |
+
if not filepath.exists():
|
| 57 |
+
raise FileNotFoundError(f"Dataset not found: {filepath}")
|
| 58 |
+
|
| 59 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 60 |
+
return json.load(f)
|
| 61 |
+
|
| 62 |
+
def load_normalized():
|
| 63 |
+
"""
|
| 64 |
+
Load all normalized datasets.
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
dict with keys: people_groups, countries, languages, totals
|
| 68 |
+
"""
|
| 69 |
+
return {
|
| 70 |
+
'people_groups': load_json('people_groups'),
|
| 71 |
+
'countries': load_json('countries'),
|
| 72 |
+
'languages': load_json('languages'),
|
| 73 |
+
'totals': load_json('totals')
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
def load_enriched():
|
| 77 |
+
"""
|
| 78 |
+
Load the enriched dataset (people groups with embedded country/language data).
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
list of enriched people group records
|
| 82 |
+
"""
|
| 83 |
+
return load_json('enriched')
|
| 84 |
+
|
| 85 |
+
def load_unreached():
|
| 86 |
+
"""
|
| 87 |
+
Load only unreached people groups (LeastReached == 'Y').
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
list of unreached people group records (enriched format)
|
| 91 |
+
"""
|
| 92 |
+
return load_json('unreached')
|
| 93 |
+
|
| 94 |
+
def load_parquet(dataset_name='enriched'):
|
| 95 |
+
"""
|
| 96 |
+
Get the path to a Parquet file for loading with pandas/polars.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
dataset_name: 'enriched' or 'unreached'
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
Path object to the Parquet file
|
| 103 |
+
|
| 104 |
+
Example:
|
| 105 |
+
>>> import pandas as pd
|
| 106 |
+
>>> df = pd.read_parquet(load_parquet('enriched'))
|
| 107 |
+
"""
|
| 108 |
+
parquet_key = f'{dataset_name}_parquet'
|
| 109 |
+
|
| 110 |
+
if parquet_key not in FILES:
|
| 111 |
+
raise ValueError(f"Unknown parquet dataset: {dataset_name}")
|
| 112 |
+
|
| 113 |
+
filepath = FILES[parquet_key]
|
| 114 |
+
|
| 115 |
+
if not filepath.exists():
|
| 116 |
+
raise FileNotFoundError(f"Parquet file not found: {filepath}")
|
| 117 |
+
|
| 118 |
+
return filepath
|
| 119 |
+
|
| 120 |
+
def get_by_country(country_code, enriched=True):
|
| 121 |
+
"""
|
| 122 |
+
Get all people groups in a specific country.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
country_code: 3-letter country code (ROG3), e.g., 'IN' for India
|
| 126 |
+
enriched: If True, use enriched dataset; if False, use normalized
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
list of people group records for that country
|
| 130 |
+
"""
|
| 131 |
+
dataset = load_enriched() if enriched else load_json('people_groups')
|
| 132 |
+
return [p for p in dataset if p.get('ROG3') == country_code]
|
| 133 |
+
|
| 134 |
+
def get_by_language(language_code, enriched=True):
|
| 135 |
+
"""
|
| 136 |
+
Get all people groups speaking a specific language.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
language_code: 3-letter language code (ROL3), e.g., 'hin' for Hindi
|
| 140 |
+
enriched: If True, use enriched dataset; if False, use normalized
|
| 141 |
+
|
| 142 |
+
Returns:
|
| 143 |
+
list of people group records speaking that language
|
| 144 |
+
"""
|
| 145 |
+
dataset = load_enriched() if enriched else load_json('people_groups')
|
| 146 |
+
return [p for p in dataset if p.get('ROL3') == language_code]
|
| 147 |
+
|
| 148 |
+
def get_by_religion(religion, enriched=True):
|
| 149 |
+
"""
|
| 150 |
+
Get all people groups with a specific primary religion.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
religion: Religion name, e.g., 'Islam', 'Buddhism', 'Hinduism'
|
| 154 |
+
enriched: If True, use enriched dataset; if False, use normalized
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
list of people group records with that primary religion
|
| 158 |
+
"""
|
| 159 |
+
dataset = load_enriched() if enriched else load_json('people_groups')
|
| 160 |
+
return [p for p in dataset if p.get('PrimaryReligion') == religion]
|
| 161 |
+
|
| 162 |
+
def filter_unreached(data=None):
|
| 163 |
+
"""
|
| 164 |
+
Filter dataset to only unreached people groups.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
data: Dataset to filter (if None, loads enriched dataset)
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
list of unreached people group records
|
| 171 |
+
"""
|
| 172 |
+
if data is None:
|
| 173 |
+
data = load_enriched()
|
| 174 |
+
|
| 175 |
+
return [p for p in data if p.get('LeastReached') == 'Y']
|
| 176 |
+
|
| 177 |
+
def get_totals():
|
| 178 |
+
"""
|
| 179 |
+
Get global summary statistics.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
dict mapping statistic ID to value
|
| 183 |
+
"""
|
| 184 |
+
totals = load_json('totals')
|
| 185 |
+
return {t['id']: t['Value'] for t in totals}
|
| 186 |
+
|
| 187 |
+
def get_country_info(country_code):
|
| 188 |
+
"""
|
| 189 |
+
Get detailed information about a specific country.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
country_code: 3-letter country code (ROG3)
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
dict with country data, or None if not found
|
| 196 |
+
"""
|
| 197 |
+
countries = load_json('countries')
|
| 198 |
+
for country in countries:
|
| 199 |
+
if country['ROG3'] == country_code:
|
| 200 |
+
return country
|
| 201 |
+
return None
|
| 202 |
+
|
| 203 |
+
def get_language_info(language_code):
|
| 204 |
+
"""
|
| 205 |
+
Get detailed information about a specific language.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
language_code: 3-letter language code (ROL3)
|
| 209 |
+
|
| 210 |
+
Returns:
|
| 211 |
+
dict with language data, or None if not found
|
| 212 |
+
"""
|
| 213 |
+
languages = load_json('languages')
|
| 214 |
+
for language in languages:
|
| 215 |
+
if language['ROL3'] == language_code:
|
| 216 |
+
return language
|
| 217 |
+
return None
|
| 218 |
+
|
| 219 |
+
# Example usage and tests
|
| 220 |
+
if __name__ == "__main__":
|
| 221 |
+
print("Joshua Project Data Utilities - Examples")
|
| 222 |
+
print("=" * 60)
|
| 223 |
+
|
| 224 |
+
# Example 1: Load enriched data
|
| 225 |
+
print("\n1. Loading enriched dataset...")
|
| 226 |
+
data = load_enriched()
|
| 227 |
+
print(f" Loaded {len(data):,} people groups")
|
| 228 |
+
|
| 229 |
+
# Example 2: Get unreached peoples
|
| 230 |
+
print("\n2. Filtering unreached peoples...")
|
| 231 |
+
unreached = filter_unreached(data)
|
| 232 |
+
print(f" Found {len(unreached):,} unreached people groups")
|
| 233 |
+
|
| 234 |
+
# Example 3: Get by country
|
| 235 |
+
print("\n3. Getting people groups in India (ROG3='IN')...")
|
| 236 |
+
india = get_by_country('IN')
|
| 237 |
+
print(f" Found {len(india):,} people groups in India")
|
| 238 |
+
|
| 239 |
+
# Example 4: Get by language
|
| 240 |
+
print("\n4. Getting Hindi-speaking people groups (ROL3='hin')...")
|
| 241 |
+
hindi = get_by_language('hin')
|
| 242 |
+
print(f" Found {len(hindi):,} Hindi-speaking people groups")
|
| 243 |
+
|
| 244 |
+
# Example 5: Get by religion
|
| 245 |
+
print("\n5. Getting Buddhist people groups...")
|
| 246 |
+
buddhist = get_by_religion('Buddhism')
|
| 247 |
+
print(f" Found {len(buddhist):,} Buddhist people groups")
|
| 248 |
+
|
| 249 |
+
# Example 6: Global statistics
|
| 250 |
+
print("\n6. Global statistics...")
|
| 251 |
+
totals = get_totals()
|
| 252 |
+
print(f" Total countries: {totals.get('CntCountries', 'N/A')}")
|
| 253 |
+
print(f" Buddhist people groups: {totals.get('CntBuddhistPeopGroups', 'N/A')}")
|
| 254 |
+
|
| 255 |
+
print("\n" + "=" * 60)
|
| 256 |
+
print("All examples completed successfully!")
|
dataset_metadata.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"people_groups": {
|
| 3 |
+
"file": "joshua_project_full_dump.json",
|
| 4 |
+
"records": 16382,
|
| 5 |
+
"fetched": "2025-12-21",
|
| 6 |
+
"endpoint": "/v1/people_groups.json",
|
| 7 |
+
"description": "People groups in countries (PGIC)"
|
| 8 |
+
},
|
| 9 |
+
"countries": {
|
| 10 |
+
"file": "joshua_project_countries.json",
|
| 11 |
+
"records": 238,
|
| 12 |
+
"fetched": "2025-12-23",
|
| 13 |
+
"endpoint": "/v1/countries.json",
|
| 14 |
+
"description": "Country-level statistics and demographics"
|
| 15 |
+
},
|
| 16 |
+
"languages": {
|
| 17 |
+
"file": "joshua_project_languages.json",
|
| 18 |
+
"records": 7134,
|
| 19 |
+
"fetched": "2025-12-23",
|
| 20 |
+
"endpoint": "/v1/languages.json",
|
| 21 |
+
"description": "Language details and translation status"
|
| 22 |
+
},
|
| 23 |
+
"totals": {
|
| 24 |
+
"file": "joshua_project_totals.json",
|
| 25 |
+
"records": 38,
|
| 26 |
+
"fetched": "2025-12-23",
|
| 27 |
+
"endpoint": "/v1/totals.json",
|
| 28 |
+
"description": "Global summary statistics"
|
| 29 |
+
}
|
| 30 |
+
}
|
enrich_with_coordinates.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Joshua Project Geographic Enrichment
|
| 4 |
+
Enriches Joshua Project datasets with geographic coordinates.
|
| 5 |
+
|
| 6 |
+
Merges:
|
| 7 |
+
1. People groups with country centroids (via ISO country codes)
|
| 8 |
+
2. Languages with Glottolog coordinates (via ISO 639-3 codes)
|
| 9 |
+
|
| 10 |
+
Creates valuable IP: Comprehensive people group + language data with full geographic coverage.
|
| 11 |
+
|
| 12 |
+
Usage:
|
| 13 |
+
python3 enrich_with_coordinates.py
|
| 14 |
+
|
| 15 |
+
Output:
|
| 16 |
+
- joshua_project_enriched_geo.json
|
| 17 |
+
- joshua_project_languages_enriched_geo.json
|
| 18 |
+
- enrichment_metadata.json
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import json
|
| 22 |
+
import pandas as pd
|
| 23 |
+
import numpy as np
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
from datetime import datetime
|
| 26 |
+
|
| 27 |
+
# Configuration
|
| 28 |
+
BASE_DIR = Path(__file__).parent.parent
|
| 29 |
+
JOSHUA_DIR = Path(__file__).parent
|
| 30 |
+
GEOGRAPHIC_DIR = BASE_DIR / 'data' / 'geographic'
|
| 31 |
+
LINGUISTIC_DIR = BASE_DIR / 'data' / 'linguistic'
|
| 32 |
+
|
| 33 |
+
def load_data():
|
| 34 |
+
"""Load all required datasets."""
|
| 35 |
+
print("=" * 70)
|
| 36 |
+
print("JOSHUA PROJECT GEOGRAPHIC ENRICHMENT")
|
| 37 |
+
print("=" * 70)
|
| 38 |
+
print("\n📂 Loading datasets...")
|
| 39 |
+
|
| 40 |
+
# Load Joshua Project data
|
| 41 |
+
with open(JOSHUA_DIR / 'joshua_project_full_dump.json') as f:
|
| 42 |
+
people_groups = json.load(f)
|
| 43 |
+
print(f" ✓ Loaded {len(people_groups):,} people groups")
|
| 44 |
+
|
| 45 |
+
with open(JOSHUA_DIR / 'joshua_project_languages.json') as f:
|
| 46 |
+
languages = json.load(f)
|
| 47 |
+
print(f" ✓ Loaded {len(languages):,} languages")
|
| 48 |
+
|
| 49 |
+
with open(JOSHUA_DIR / 'joshua_project_countries.json') as f:
|
| 50 |
+
countries_jp = json.load(f)
|
| 51 |
+
print(f" ✓ Loaded {len(countries_jp):,} countries (Joshua Project)")
|
| 52 |
+
|
| 53 |
+
# Load geographic data
|
| 54 |
+
with open(GEOGRAPHIC_DIR / 'country_centroids.json') as f:
|
| 55 |
+
centroids = json.load(f)
|
| 56 |
+
print(f" ✓ Loaded {len(centroids):,} country centroids (Natural Earth)")
|
| 57 |
+
|
| 58 |
+
# Load Glottolog coordinates
|
| 59 |
+
with open(LINGUISTIC_DIR / 'glottolog_coordinates.json') as f:
|
| 60 |
+
glottolog = json.load(f)
|
| 61 |
+
print(f" ✓ Loaded {len(glottolog):,} language coordinates (Glottolog)")
|
| 62 |
+
|
| 63 |
+
# Load Glottolog languoid data for family lookup
|
| 64 |
+
glottolog_languoid_path = LINGUISTIC_DIR / 'glottolog_languoid.csv'
|
| 65 |
+
glottolog_languoid = pd.read_csv(glottolog_languoid_path)
|
| 66 |
+
print(f" ✓ Loaded {len(glottolog_languoid):,} Glottolog languoid entries")
|
| 67 |
+
|
| 68 |
+
# Load ISO 639-3 codes for reference
|
| 69 |
+
with open(LINGUISTIC_DIR / 'iso_639_3.json') as f:
|
| 70 |
+
iso_codes = json.load(f)
|
| 71 |
+
print(f" ✓ Loaded {len(iso_codes):,} ISO 639-3 language codes")
|
| 72 |
+
|
| 73 |
+
return people_groups, languages, countries_jp, centroids, glottolog, glottolog_languoid, iso_codes
|
| 74 |
+
|
| 75 |
+
def build_lookup_tables(centroids, glottolog, glottolog_languoid):
|
| 76 |
+
"""Build fast lookup dictionaries for matching."""
|
| 77 |
+
print("\n🔍 Building lookup tables...")
|
| 78 |
+
|
| 79 |
+
# Country centroids by ISO code
|
| 80 |
+
centroid_lookup = {}
|
| 81 |
+
for country in centroids:
|
| 82 |
+
iso_a2 = country.get('iso_a2')
|
| 83 |
+
iso_a3 = country.get('iso_a3')
|
| 84 |
+
if iso_a2:
|
| 85 |
+
centroid_lookup[iso_a2] = country
|
| 86 |
+
if iso_a3:
|
| 87 |
+
centroid_lookup[iso_a3] = country
|
| 88 |
+
|
| 89 |
+
print(f" ✓ Indexed {len(centroid_lookup)} country codes")
|
| 90 |
+
|
| 91 |
+
# Glottolog by ISO code (one-to-many - language can have multiple dialects)
|
| 92 |
+
glottolog_lookup = {}
|
| 93 |
+
for lang in glottolog:
|
| 94 |
+
iso_codes = str(lang.get('isocodes', '')).strip()
|
| 95 |
+
if iso_codes and iso_codes != 'nan':
|
| 96 |
+
# Handle comma-separated codes
|
| 97 |
+
for code in iso_codes.split(','):
|
| 98 |
+
code = code.strip()
|
| 99 |
+
if code:
|
| 100 |
+
if code not in glottolog_lookup:
|
| 101 |
+
glottolog_lookup[code] = []
|
| 102 |
+
glottolog_lookup[code].append(lang)
|
| 103 |
+
|
| 104 |
+
print(f" ✓ Indexed {len(glottolog_lookup)} ISO language codes")
|
| 105 |
+
|
| 106 |
+
# Build family lookup dictionaries from glottolog_languoid
|
| 107 |
+
family_lookup = {} # family_id → family_name
|
| 108 |
+
glottocode_to_family = {} # glottocode → family_id
|
| 109 |
+
|
| 110 |
+
for _, row in glottolog_languoid.iterrows():
|
| 111 |
+
if row['level'] == 'family':
|
| 112 |
+
family_lookup[row['id']] = row['name']
|
| 113 |
+
if pd.notna(row['family_id']):
|
| 114 |
+
glottocode_to_family[row['id']] = row['family_id']
|
| 115 |
+
|
| 116 |
+
print(f" ✓ Indexed {len(family_lookup)} language families")
|
| 117 |
+
print(f" ✓ Mapped {len(glottocode_to_family)} glottocodes to families")
|
| 118 |
+
|
| 119 |
+
return centroid_lookup, glottolog_lookup, family_lookup, glottocode_to_family
|
| 120 |
+
|
| 121 |
+
def enrich_people_groups(people_groups, centroid_lookup):
|
| 122 |
+
"""Enrich people groups with country centroids."""
|
| 123 |
+
print("\n🌍 Enriching people groups with coordinates...")
|
| 124 |
+
|
| 125 |
+
enriched = []
|
| 126 |
+
matched = 0
|
| 127 |
+
unmatched_countries = set()
|
| 128 |
+
|
| 129 |
+
for pg in people_groups:
|
| 130 |
+
pg_enriched = pg.copy()
|
| 131 |
+
|
| 132 |
+
# Get country code (ROG3 is 3-letter ISO code)
|
| 133 |
+
country_code = pg.get('ROG3', '')
|
| 134 |
+
|
| 135 |
+
if country_code in centroid_lookup:
|
| 136 |
+
centroid = centroid_lookup[country_code]
|
| 137 |
+
pg_enriched['country_latitude'] = centroid['latitude']
|
| 138 |
+
pg_enriched['country_longitude'] = centroid['longitude']
|
| 139 |
+
pg_enriched['continent'] = centroid.get('continent', '')
|
| 140 |
+
pg_enriched['region_un'] = centroid.get('region_un', '')
|
| 141 |
+
pg_enriched['coordinate_source'] = 'Natural Earth (country centroid)'
|
| 142 |
+
matched += 1
|
| 143 |
+
else:
|
| 144 |
+
pg_enriched['country_latitude'] = None
|
| 145 |
+
pg_enriched['country_longitude'] = None
|
| 146 |
+
pg_enriched['continent'] = None
|
| 147 |
+
pg_enriched['region_un'] = None
|
| 148 |
+
pg_enriched['coordinate_source'] = None
|
| 149 |
+
if country_code:
|
| 150 |
+
unmatched_countries.add(country_code)
|
| 151 |
+
|
| 152 |
+
enriched.append(pg_enriched)
|
| 153 |
+
|
| 154 |
+
match_rate = 100 * matched / len(people_groups)
|
| 155 |
+
print(f" ✓ Matched {matched:,} / {len(people_groups):,} ({match_rate:.1f}%)")
|
| 156 |
+
|
| 157 |
+
if unmatched_countries:
|
| 158 |
+
print(f" ⚠ {len(unmatched_countries)} unmatched country codes: {sorted(unmatched_countries)[:10]}")
|
| 159 |
+
|
| 160 |
+
return enriched
|
| 161 |
+
|
| 162 |
+
def enrich_languages(languages, glottolog_lookup, family_lookup, glottocode_to_family):
|
| 163 |
+
"""Enrich Joshua Project languages with Glottolog coordinates."""
|
| 164 |
+
print("\n🗣️ Enriching languages with coordinates...")
|
| 165 |
+
|
| 166 |
+
enriched = []
|
| 167 |
+
matched = 0
|
| 168 |
+
unmatched_iso_codes = set()
|
| 169 |
+
|
| 170 |
+
for lang in languages:
|
| 171 |
+
lang_enriched = lang.copy()
|
| 172 |
+
|
| 173 |
+
# Get ISO 639-3 code (ROL3)
|
| 174 |
+
iso_code = lang.get('ROL3', '')
|
| 175 |
+
|
| 176 |
+
if iso_code and iso_code in glottolog_lookup:
|
| 177 |
+
# Get first Glottolog entry (usually the main language)
|
| 178 |
+
glotto_entries = glottolog_lookup[iso_code]
|
| 179 |
+
glotto = glotto_entries[0] # Take first match
|
| 180 |
+
|
| 181 |
+
# Convert NaN to None for proper JSON null
|
| 182 |
+
lat = glotto.get('latitude')
|
| 183 |
+
lng = glotto.get('longitude')
|
| 184 |
+
lang_enriched['latitude'] = None if pd.isna(lat) else lat
|
| 185 |
+
lang_enriched['longitude'] = None if pd.isna(lng) else lng
|
| 186 |
+
|
| 187 |
+
glottocode = glotto.get('glottocode', '')
|
| 188 |
+
lang_enriched['glottocode'] = glottocode
|
| 189 |
+
|
| 190 |
+
# Get family name via 2-step lookup: glottocode → family_id → family_name
|
| 191 |
+
if glottocode and glottocode in glottocode_to_family:
|
| 192 |
+
family_id = glottocode_to_family[glottocode]
|
| 193 |
+
family_name = family_lookup.get(family_id, '')
|
| 194 |
+
lang_enriched['family_name'] = family_name
|
| 195 |
+
lang_enriched['family_id'] = family_id
|
| 196 |
+
else:
|
| 197 |
+
# Fallback: check if this IS a family-level entry
|
| 198 |
+
if glottocode and glottocode in family_lookup:
|
| 199 |
+
lang_enriched['family_name'] = family_lookup[glottocode]
|
| 200 |
+
lang_enriched['family_id'] = glottocode
|
| 201 |
+
else:
|
| 202 |
+
lang_enriched['family_name'] = 'Isolate' if glottocode else ''
|
| 203 |
+
lang_enriched['family_id'] = ''
|
| 204 |
+
|
| 205 |
+
lang_enriched['macroarea'] = glotto.get('macroarea', '')
|
| 206 |
+
lang_enriched['coordinate_source'] = 'Glottolog'
|
| 207 |
+
lang_enriched['glottolog_match_count'] = len(glotto_entries)
|
| 208 |
+
matched += 1
|
| 209 |
+
else:
|
| 210 |
+
lang_enriched['latitude'] = None
|
| 211 |
+
lang_enriched['longitude'] = None
|
| 212 |
+
lang_enriched['glottocode'] = None
|
| 213 |
+
lang_enriched['family_name'] = None
|
| 214 |
+
lang_enriched['family_id'] = None
|
| 215 |
+
lang_enriched['macroarea'] = None
|
| 216 |
+
lang_enriched['coordinate_source'] = None
|
| 217 |
+
lang_enriched['glottolog_match_count'] = 0
|
| 218 |
+
if iso_code:
|
| 219 |
+
unmatched_iso_codes.add(iso_code)
|
| 220 |
+
|
| 221 |
+
enriched.append(lang_enriched)
|
| 222 |
+
|
| 223 |
+
match_rate = 100 * matched / len(languages)
|
| 224 |
+
print(f" ✓ Matched {matched:,} / {len(languages):,} ({match_rate:.1f}%)")
|
| 225 |
+
|
| 226 |
+
if unmatched_iso_codes:
|
| 227 |
+
print(f" ⚠ {len(unmatched_iso_codes)} unmatched ISO codes (sample): {sorted(unmatched_iso_codes)[:10]}")
|
| 228 |
+
|
| 229 |
+
return enriched
|
| 230 |
+
|
| 231 |
+
def save_enriched_data(people_groups_enriched, languages_enriched):
|
| 232 |
+
"""Save enriched datasets."""
|
| 233 |
+
print("\n💾 Saving enriched datasets...")
|
| 234 |
+
|
| 235 |
+
# Save people groups
|
| 236 |
+
pg_file = JOSHUA_DIR / 'joshua_project_enriched_geo.json'
|
| 237 |
+
with open(pg_file, 'w', encoding='utf-8') as f:
|
| 238 |
+
json.dump(people_groups_enriched, f, indent=2, ensure_ascii=False)
|
| 239 |
+
|
| 240 |
+
file_size_mb = pg_file.stat().st_size / (1024 * 1024)
|
| 241 |
+
print(f" ✓ People groups: {pg_file}")
|
| 242 |
+
print(f" Size: {file_size_mb:.1f} MB")
|
| 243 |
+
|
| 244 |
+
# Save languages
|
| 245 |
+
lang_file = JOSHUA_DIR / 'joshua_project_languages_enriched_geo.json'
|
| 246 |
+
with open(lang_file, 'w', encoding='utf-8') as f:
|
| 247 |
+
json.dump(languages_enriched, f, indent=2, ensure_ascii=False)
|
| 248 |
+
|
| 249 |
+
file_size_mb = lang_file.stat().st_size / (1024 * 1024)
|
| 250 |
+
print(f" ✓ Languages: {lang_file}")
|
| 251 |
+
print(f" Size: {file_size_mb:.1f} MB")
|
| 252 |
+
|
| 253 |
+
# Count coordinates
|
| 254 |
+
pg_with_coords = sum(1 for pg in people_groups_enriched if pg.get('country_latitude'))
|
| 255 |
+
lang_with_coords = sum(1 for lang in languages_enriched if lang.get('latitude'))
|
| 256 |
+
|
| 257 |
+
# Create metadata
|
| 258 |
+
metadata = {
|
| 259 |
+
'enrichment_date': datetime.now().isoformat(),
|
| 260 |
+
'source_datasets': {
|
| 261 |
+
'joshua_project': 'Joshua Project API v1',
|
| 262 |
+
'natural_earth': 'Natural Earth 1:10m Admin 0 Label Points',
|
| 263 |
+
'glottolog': 'Glottolog languages_and_dialects_geo.csv'
|
| 264 |
+
},
|
| 265 |
+
'people_groups': {
|
| 266 |
+
'total': len(people_groups_enriched),
|
| 267 |
+
'with_coordinates': pg_with_coords,
|
| 268 |
+
'coverage': f'{100 * pg_with_coords / len(people_groups_enriched):.1f}%'
|
| 269 |
+
},
|
| 270 |
+
'languages': {
|
| 271 |
+
'total': len(languages_enriched),
|
| 272 |
+
'with_coordinates': lang_with_coords,
|
| 273 |
+
'coverage': f'{100 * lang_with_coords / len(languages_enriched):.1f}%'
|
| 274 |
+
},
|
| 275 |
+
'new_fields': {
|
| 276 |
+
'people_groups': ['country_latitude', 'country_longitude', 'continent', 'region_un', 'coordinate_source'],
|
| 277 |
+
'languages': ['latitude', 'longitude', 'glottocode', 'family_name', 'family_id', 'macroarea', 'coordinate_source', 'glottolog_match_count']
|
| 278 |
+
},
|
| 279 |
+
'license': 'Compiled dataset - see individual source licenses',
|
| 280 |
+
'description': 'Joshua Project data enriched with geographic coordinates from Natural Earth and Glottolog'
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
meta_file = JOSHUA_DIR / 'enrichment_metadata.json'
|
| 284 |
+
with open(meta_file, 'w', encoding='utf-8') as f:
|
| 285 |
+
json.dump(metadata, f, indent=2)
|
| 286 |
+
|
| 287 |
+
print(f" ✓ Metadata: {meta_file}")
|
| 288 |
+
|
| 289 |
+
return metadata
|
| 290 |
+
|
| 291 |
+
def print_summary(metadata):
|
| 292 |
+
"""Print summary statistics."""
|
| 293 |
+
print("\n" + "=" * 70)
|
| 294 |
+
print("ENRICHMENT SUMMARY")
|
| 295 |
+
print("=" * 70)
|
| 296 |
+
|
| 297 |
+
print(f"\n📊 People Groups:")
|
| 298 |
+
print(f" Total: {metadata['people_groups']['total']:,}")
|
| 299 |
+
print(f" With coordinates: {metadata['people_groups']['with_coordinates']:,}")
|
| 300 |
+
print(f" Coverage: {metadata['people_groups']['coverage']}")
|
| 301 |
+
|
| 302 |
+
print(f"\n🗣️ Languages:")
|
| 303 |
+
print(f" Total: {metadata['languages']['total']:,}")
|
| 304 |
+
print(f" With coordinates: {metadata['languages']['with_coordinates']:,}")
|
| 305 |
+
print(f" Coverage: {metadata['languages']['coverage']}")
|
| 306 |
+
|
| 307 |
+
print("\n✨ New Fields Added:")
|
| 308 |
+
print(f" People groups: {', '.join(metadata['new_fields']['people_groups'])}")
|
| 309 |
+
print(f" Languages: {', '.join(metadata['new_fields']['languages'])}")
|
| 310 |
+
|
| 311 |
+
print("\n" + "=" * 70)
|
| 312 |
+
print("✅ Geographic enrichment complete!")
|
| 313 |
+
print("=" * 70)
|
| 314 |
+
|
| 315 |
+
def main():
|
| 316 |
+
"""Main execution function."""
|
| 317 |
+
try:
|
| 318 |
+
# Load all data
|
| 319 |
+
people_groups, languages, countries_jp, centroids, glottolog, glottolog_languoid, iso_codes = load_data()
|
| 320 |
+
|
| 321 |
+
# Build lookup tables
|
| 322 |
+
centroid_lookup, glottolog_lookup, family_lookup, glottocode_to_family = build_lookup_tables(centroids, glottolog, glottolog_languoid)
|
| 323 |
+
|
| 324 |
+
# Enrich datasets
|
| 325 |
+
people_groups_enriched = enrich_people_groups(people_groups, centroid_lookup)
|
| 326 |
+
languages_enriched = enrich_languages(languages, glottolog_lookup, family_lookup, glottocode_to_family)
|
| 327 |
+
|
| 328 |
+
# Save results
|
| 329 |
+
metadata = save_enriched_data(people_groups_enriched, languages_enriched)
|
| 330 |
+
|
| 331 |
+
# Print summary
|
| 332 |
+
print_summary(metadata)
|
| 333 |
+
|
| 334 |
+
return 0
|
| 335 |
+
|
| 336 |
+
except Exception as e:
|
| 337 |
+
print(f"\n❌ Error: {e}")
|
| 338 |
+
import traceback
|
| 339 |
+
traceback.print_exc()
|
| 340 |
+
return 1
|
| 341 |
+
|
| 342 |
+
if __name__ == "__main__":
|
| 343 |
+
exit(main())
|
enriched_metadata.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"generated_at": "2026-01-06T16:08:34.106145",
|
| 3 |
+
"source_datasets": {
|
| 4 |
+
"people_groups": 16382,
|
| 5 |
+
"countries": 238,
|
| 6 |
+
"languages": 7134,
|
| 7 |
+
"totals": 38
|
| 8 |
+
},
|
| 9 |
+
"enriched_datasets": {
|
| 10 |
+
"full_enriched": {
|
| 11 |
+
"records": 16382,
|
| 12 |
+
"json_file": "joshua_project_enriched.json",
|
| 13 |
+
"parquet_file": "joshua_project_enriched.parquet"
|
| 14 |
+
},
|
| 15 |
+
"unreached_only": {
|
| 16 |
+
"records": 7124,
|
| 17 |
+
"json_file": "joshua_project_unreached.json",
|
| 18 |
+
"parquet_file": "joshua_project_unreached.parquet",
|
| 19 |
+
"percentage_of_total": 43.49
|
| 20 |
+
}
|
| 21 |
+
},
|
| 22 |
+
"enrichment_details": {
|
| 23 |
+
"added_fields": [
|
| 24 |
+
"country_data (9 fields)",
|
| 25 |
+
"language_data (9 fields)"
|
| 26 |
+
],
|
| 27 |
+
"original_fields_per_record": 107,
|
| 28 |
+
"enriched_fields_per_record": 109
|
| 29 |
+
}
|
| 30 |
+
}
|
enrichment_metadata.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"enrichment_date": "2026-01-06T16:06:56.385748",
|
| 3 |
+
"source_datasets": {
|
| 4 |
+
"joshua_project": "Joshua Project API v1",
|
| 5 |
+
"natural_earth": "Natural Earth 1:10m Admin 0 Label Points",
|
| 6 |
+
"glottolog": "Glottolog languages_and_dialects_geo.csv"
|
| 7 |
+
},
|
| 8 |
+
"people_groups": {
|
| 9 |
+
"total": 16382,
|
| 10 |
+
"with_coordinates": 0,
|
| 11 |
+
"coverage": "0.0%"
|
| 12 |
+
},
|
| 13 |
+
"languages": {
|
| 14 |
+
"total": 7134,
|
| 15 |
+
"with_coordinates": 7000,
|
| 16 |
+
"coverage": "98.1%"
|
| 17 |
+
},
|
| 18 |
+
"new_fields": {
|
| 19 |
+
"people_groups": [
|
| 20 |
+
"country_latitude",
|
| 21 |
+
"country_longitude",
|
| 22 |
+
"continent",
|
| 23 |
+
"region_un",
|
| 24 |
+
"coordinate_source"
|
| 25 |
+
],
|
| 26 |
+
"languages": [
|
| 27 |
+
"latitude",
|
| 28 |
+
"longitude",
|
| 29 |
+
"glottocode",
|
| 30 |
+
"family_name",
|
| 31 |
+
"family_id",
|
| 32 |
+
"macroarea",
|
| 33 |
+
"coordinate_source",
|
| 34 |
+
"glottolog_match_count"
|
| 35 |
+
]
|
| 36 |
+
},
|
| 37 |
+
"license": "Compiled dataset - see individual source licenses",
|
| 38 |
+
"description": "Joshua Project data enriched with geographic coordinates from Natural Earth and Glottolog"
|
| 39 |
+
}
|
fetch_all_datasets.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File Purpose: Fetch all available Joshua Project datasets from the API.
|
| 3 |
+
Primary Functions:
|
| 4 |
+
- Fetches countries, languages, and totals datasets
|
| 5 |
+
- Saves each dataset to separate JSON files
|
| 6 |
+
- Generates metadata file with fetch timestamps and record counts
|
| 7 |
+
- Provides progress indicators and error handling
|
| 8 |
+
|
| 9 |
+
Inputs:
|
| 10 |
+
- API Key (via JOSHUA_PROJECT_API_KEY env var)
|
| 11 |
+
|
| 12 |
+
Outputs:
|
| 13 |
+
- joshua_project_countries.json
|
| 14 |
+
- joshua_project_languages.json
|
| 15 |
+
- joshua_project_totals.json
|
| 16 |
+
- dataset_metadata.json (metadata tracker)
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import requests
|
| 20 |
+
import json
|
| 21 |
+
import os
|
| 22 |
+
import time
|
| 23 |
+
from datetime import datetime
|
| 24 |
+
|
| 25 |
+
API_KEY = os.environ.get("JOSHUA_PROJECT_API_KEY", "YOUR_API_KEY_HERE")
|
| 26 |
+
BASE_URL = "https://api.joshuaproject.net/v1"
|
| 27 |
+
|
| 28 |
+
# Dataset definitions
|
| 29 |
+
DATASETS = {
|
| 30 |
+
"countries": {
|
| 31 |
+
"endpoint": "countries.json",
|
| 32 |
+
"output_file": "joshua_project_countries.json",
|
| 33 |
+
"expected_records": 238,
|
| 34 |
+
"description": "Country-level statistics and demographics"
|
| 35 |
+
},
|
| 36 |
+
"languages": {
|
| 37 |
+
"endpoint": "languages.json",
|
| 38 |
+
"output_file": "joshua_project_languages.json",
|
| 39 |
+
"expected_records": 7134,
|
| 40 |
+
"description": "Language details and translation status"
|
| 41 |
+
},
|
| 42 |
+
"totals": {
|
| 43 |
+
"endpoint": "totals.json",
|
| 44 |
+
"output_file": "joshua_project_totals.json",
|
| 45 |
+
"expected_records": 38,
|
| 46 |
+
"description": "Global summary statistics"
|
| 47 |
+
}
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
def fetch_dataset(dataset_name, endpoint, expected_records):
|
| 51 |
+
"""Fetch a dataset from the API with progress indicators."""
|
| 52 |
+
# Use high limit to ensure we get all records
|
| 53 |
+
limit = 20000
|
| 54 |
+
url = f"{BASE_URL}/{endpoint}?api_key={API_KEY}&limit={limit}"
|
| 55 |
+
|
| 56 |
+
print(f"\n{'='*60}")
|
| 57 |
+
print(f"Fetching {dataset_name}...")
|
| 58 |
+
print(f"Endpoint: {endpoint}")
|
| 59 |
+
print(f"Expected records: ~{expected_records}")
|
| 60 |
+
print(f"{'='*60}")
|
| 61 |
+
|
| 62 |
+
start_time = time.time()
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
response = requests.get(url, stream=True, timeout=30)
|
| 66 |
+
response.raise_for_status()
|
| 67 |
+
|
| 68 |
+
# Parse JSON
|
| 69 |
+
data = response.json()
|
| 70 |
+
duration = time.time() - start_time
|
| 71 |
+
|
| 72 |
+
count = len(data)
|
| 73 |
+
print(f"✅ Success! Downloaded {count} records in {duration:.2f} seconds.")
|
| 74 |
+
|
| 75 |
+
# Warn if record count differs significantly from expected
|
| 76 |
+
if abs(count - expected_records) > 10:
|
| 77 |
+
print(f"⚠️ Warning: Expected ~{expected_records} records, got {count}")
|
| 78 |
+
|
| 79 |
+
return data
|
| 80 |
+
|
| 81 |
+
except requests.exceptions.Timeout:
|
| 82 |
+
print(f"❌ Error: Request timed out after 30 seconds")
|
| 83 |
+
return None
|
| 84 |
+
except requests.exceptions.RequestException as e:
|
| 85 |
+
print(f"❌ Network error: {e}")
|
| 86 |
+
return None
|
| 87 |
+
except json.JSONDecodeError as e:
|
| 88 |
+
print(f"❌ JSON decode error: {e}")
|
| 89 |
+
return None
|
| 90 |
+
except Exception as e:
|
| 91 |
+
print(f"❌ Unexpected error: {e}")
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
+
def save_dataset(data, filepath, dataset_name):
|
| 95 |
+
"""Save dataset to JSON file with progress indicator."""
|
| 96 |
+
print(f"Saving {dataset_name} to {filepath}...")
|
| 97 |
+
|
| 98 |
+
try:
|
| 99 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
| 100 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 101 |
+
|
| 102 |
+
size_mb = os.path.getsize(filepath) / (1024 * 1024)
|
| 103 |
+
print(f"✅ Saved {size_mb:.2f} MB to {filepath}")
|
| 104 |
+
return True
|
| 105 |
+
|
| 106 |
+
except Exception as e:
|
| 107 |
+
print(f"❌ Error saving file: {e}")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
def create_metadata(results):
|
| 111 |
+
"""Create metadata file tracking all datasets."""
|
| 112 |
+
metadata = {}
|
| 113 |
+
|
| 114 |
+
# Add existing people_groups data
|
| 115 |
+
if os.path.exists("joshua_project_full_dump.json"):
|
| 116 |
+
try:
|
| 117 |
+
with open("joshua_project_full_dump.json", 'r') as f:
|
| 118 |
+
people_data = json.load(f)
|
| 119 |
+
metadata["people_groups"] = {
|
| 120 |
+
"file": "joshua_project_full_dump.json",
|
| 121 |
+
"records": len(people_data),
|
| 122 |
+
"fetched": "2025-12-21",
|
| 123 |
+
"endpoint": "/v1/people_groups.json",
|
| 124 |
+
"description": "People groups in countries (PGIC)"
|
| 125 |
+
}
|
| 126 |
+
except:
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
# Add newly fetched datasets
|
| 130 |
+
for dataset_name, info in results.items():
|
| 131 |
+
if info["success"]:
|
| 132 |
+
metadata[dataset_name] = {
|
| 133 |
+
"file": DATASETS[dataset_name]["output_file"],
|
| 134 |
+
"records": info["records"],
|
| 135 |
+
"fetched": info["timestamp"],
|
| 136 |
+
"endpoint": f"/v1/{DATASETS[dataset_name]['endpoint']}",
|
| 137 |
+
"description": DATASETS[dataset_name]["description"]
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
# Save metadata
|
| 141 |
+
metadata_file = "dataset_metadata.json"
|
| 142 |
+
try:
|
| 143 |
+
with open(metadata_file, 'w', encoding='utf-8') as f:
|
| 144 |
+
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
| 145 |
+
print(f"\n✅ Metadata saved to {metadata_file}")
|
| 146 |
+
return True
|
| 147 |
+
except Exception as e:
|
| 148 |
+
print(f"\n❌ Error saving metadata: {e}")
|
| 149 |
+
return False
|
| 150 |
+
|
| 151 |
+
def main():
|
| 152 |
+
"""Main execution function."""
|
| 153 |
+
print("\n" + "="*60)
|
| 154 |
+
print("Joshua Project Complete Dataset Fetcher")
|
| 155 |
+
print("="*60)
|
| 156 |
+
print(f"Fetching {len(DATASETS)} datasets from API...")
|
| 157 |
+
|
| 158 |
+
results = {}
|
| 159 |
+
total_start = time.time()
|
| 160 |
+
|
| 161 |
+
# Fetch each dataset
|
| 162 |
+
for dataset_name, config in DATASETS.items():
|
| 163 |
+
data = fetch_dataset(
|
| 164 |
+
dataset_name,
|
| 165 |
+
config["endpoint"],
|
| 166 |
+
config["expected_records"]
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if data:
|
| 170 |
+
success = save_dataset(data, config["output_file"], dataset_name)
|
| 171 |
+
results[dataset_name] = {
|
| 172 |
+
"success": success,
|
| 173 |
+
"records": len(data),
|
| 174 |
+
"timestamp": datetime.now().strftime("%Y-%m-%d")
|
| 175 |
+
}
|
| 176 |
+
else:
|
| 177 |
+
results[dataset_name] = {
|
| 178 |
+
"success": False,
|
| 179 |
+
"records": 0,
|
| 180 |
+
"timestamp": None
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
# Brief pause between requests to be polite to the API
|
| 184 |
+
time.sleep(0.5)
|
| 185 |
+
|
| 186 |
+
total_duration = time.time() - total_start
|
| 187 |
+
|
| 188 |
+
# Print summary
|
| 189 |
+
print("\n" + "="*60)
|
| 190 |
+
print("FETCH SUMMARY")
|
| 191 |
+
print("="*60)
|
| 192 |
+
|
| 193 |
+
success_count = sum(1 for r in results.values() if r["success"])
|
| 194 |
+
total_records = sum(r["records"] for r in results.values() if r["success"])
|
| 195 |
+
|
| 196 |
+
print(f"Datasets fetched: {success_count}/{len(DATASETS)}")
|
| 197 |
+
print(f"Total records: {total_records:,}")
|
| 198 |
+
print(f"Total time: {total_duration:.2f} seconds")
|
| 199 |
+
|
| 200 |
+
for dataset_name, result in results.items():
|
| 201 |
+
status = "✅" if result["success"] else "❌"
|
| 202 |
+
records = f"{result['records']:,} records" if result["success"] else "FAILED"
|
| 203 |
+
print(f" {status} {dataset_name}: {records}")
|
| 204 |
+
|
| 205 |
+
# Create metadata file
|
| 206 |
+
if success_count > 0:
|
| 207 |
+
create_metadata(results)
|
| 208 |
+
|
| 209 |
+
print("\n" + "="*60)
|
| 210 |
+
if success_count == len(DATASETS):
|
| 211 |
+
print("🎉 All datasets fetched successfully!")
|
| 212 |
+
else:
|
| 213 |
+
print(f"⚠️ {len(DATASETS) - success_count} dataset(s) failed to fetch")
|
| 214 |
+
print("="*60 + "\n")
|
| 215 |
+
|
| 216 |
+
if __name__ == "__main__":
|
| 217 |
+
main()
|
fetch_full_data.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File Purpose: Fetch the complete Joshua Project people groups dataset.
|
| 3 |
+
Primary Functions:
|
| 4 |
+
- Fetches all people group records (up to 20k) from the API.
|
| 5 |
+
- Saves the data to a local JSON file.
|
| 6 |
+
- Provides basic stats on the downloaded data.
|
| 7 |
+
Inputs:
|
| 8 |
+
- API Key (via JOSHUA_PROJECT_API_KEY env var)
|
| 9 |
+
Outputs:
|
| 10 |
+
- joshua_project_full_dump.json
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import requests
|
| 14 |
+
import json
|
| 15 |
+
import os
|
| 16 |
+
import time
|
| 17 |
+
|
| 18 |
+
API_KEY = os.environ.get("JOSHUA_PROJECT_API_KEY", "YOUR_API_KEY_HERE")
|
| 19 |
+
BASE_URL = "https://api.joshuaproject.net/v1/people_groups.json"
|
| 20 |
+
OUTPUT_FILE = "joshua_project_full_dump.json"
|
| 21 |
+
|
| 22 |
+
def fetch_full_dataset():
|
| 23 |
+
# Based on our check, the total count is ~16k, so 20000 covers it.
|
| 24 |
+
limit = 20000
|
| 25 |
+
url = f"{BASE_URL}?api_key={API_KEY}&limit={limit}"
|
| 26 |
+
|
| 27 |
+
print(f"Fetching full dataset from {BASE_URL}...")
|
| 28 |
+
print(f"Limit set to: {limit}")
|
| 29 |
+
|
| 30 |
+
start_time = time.time()
|
| 31 |
+
try:
|
| 32 |
+
response = requests.get(url, stream=True)
|
| 33 |
+
response.raise_for_status()
|
| 34 |
+
|
| 35 |
+
# Parse JSON
|
| 36 |
+
data = response.json()
|
| 37 |
+
duration = time.time() - start_time
|
| 38 |
+
|
| 39 |
+
count = len(data)
|
| 40 |
+
print(f"\nSuccess! Downloaded {count} records in {duration:.2f} seconds.")
|
| 41 |
+
|
| 42 |
+
return data
|
| 43 |
+
|
| 44 |
+
except requests.exceptions.RequestException as e:
|
| 45 |
+
print(f"Network error: {e}")
|
| 46 |
+
return None
|
| 47 |
+
except json.JSONDecodeError as e:
|
| 48 |
+
print(f"JSON decode error: {e}")
|
| 49 |
+
return None
|
| 50 |
+
|
| 51 |
+
def save_data(data, filepath):
|
| 52 |
+
print(f"Saving data to {filepath}...")
|
| 53 |
+
try:
|
| 54 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
| 55 |
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
| 56 |
+
|
| 57 |
+
size_mb = os.path.getsize(filepath) / (1024 * 1024)
|
| 58 |
+
print(f"Saved {size_mb:.2f} MB to {filepath}")
|
| 59 |
+
except Exception as e:
|
| 60 |
+
print(f"Error saving file: {e}")
|
| 61 |
+
|
| 62 |
+
if __name__ == "__main__":
|
| 63 |
+
data = fetch_full_dataset()
|
| 64 |
+
if data:
|
| 65 |
+
save_data(data, OUTPUT_FILE)
|
joshua_project_countries.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
joshua_project_enriched.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c51aa4cbabe07be528e4a53089547c8e8c8c333504ba6d75054e24a3cb7c60f1
|
| 3 |
+
size 6672625
|
joshua_project_full_dump.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:298cdb33853fad97aef67b4e72b6965e6c83939356cb307e92dcf255af742c2a
|
| 3 |
+
size 135705435
|
joshua_project_languages.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
joshua_project_totals.json
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"id": "CntBuddhistPeopGroups",
|
| 4 |
+
"Value": 635,
|
| 5 |
+
"RoundPrecision": 0
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"id": "CntChristianPeopGroups",
|
| 9 |
+
"Value": 6459,
|
| 10 |
+
"RoundPrecision": 0
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"id": "CntContinents",
|
| 14 |
+
"Value": 7,
|
| 15 |
+
"RoundPrecision": 0
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"id": "CntCountries",
|
| 19 |
+
"Value": 238,
|
| 20 |
+
"RoundPrecision": 0
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"id": "CntCountries1040",
|
| 24 |
+
"Value": 68,
|
| 25 |
+
"RoundPrecision": 0
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"id": "CntCountriesLR",
|
| 29 |
+
"Value": 43,
|
| 30 |
+
"RoundPrecision": 0
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"id": "CntCtryChristian",
|
| 34 |
+
"Value": 165,
|
| 35 |
+
"RoundPrecision": 0
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"id": "CntHinduPeopGroups",
|
| 39 |
+
"Value": 2338,
|
| 40 |
+
"RoundPrecision": 0
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"id": "CntLangJesusFilm",
|
| 44 |
+
"Value": 2043,
|
| 45 |
+
"RoundPrecision": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"id": "CntLangNoResources",
|
| 49 |
+
"Value": 2193,
|
| 50 |
+
"RoundPrecision": 0
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"id": "CntLangPortions",
|
| 54 |
+
"Value": 4066,
|
| 55 |
+
"RoundPrecision": 0
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"id": "CntLangRecordings",
|
| 59 |
+
"Value": 5056,
|
| 60 |
+
"RoundPrecision": 0
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"id": "CntMuslimPeopGroups",
|
| 64 |
+
"Value": 3786,
|
| 65 |
+
"RoundPrecision": 0
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"id": "CntPCFPG",
|
| 69 |
+
"Value": 52,
|
| 70 |
+
"RoundPrecision": 0
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"id": "CntPCLR",
|
| 74 |
+
"Value": 124,
|
| 75 |
+
"RoundPrecision": 0
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"id": "CntPeopCtry",
|
| 79 |
+
"Value": 16382,
|
| 80 |
+
"RoundPrecision": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"id": "CntPeopCtry1040",
|
| 84 |
+
"Value": 8572,
|
| 85 |
+
"RoundPrecision": 0
|
| 86 |
+
},
|
| 87 |
+
{
|
| 88 |
+
"id": "CntPeopCtryFrontier",
|
| 89 |
+
"Value": 4767,
|
| 90 |
+
"RoundPrecision": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"id": "CntPeopCtryGreat50KLR",
|
| 94 |
+
"Value": 2893,
|
| 95 |
+
"RoundPrecision": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"id": "CntPeopCtryLR",
|
| 99 |
+
"Value": 7124,
|
| 100 |
+
"RoundPrecision": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"id": "CntPeopCtryLR1040",
|
| 104 |
+
"Value": 5910,
|
| 105 |
+
"RoundPrecision": 0
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"id": "CntPeopleID1",
|
| 109 |
+
"Value": 16,
|
| 110 |
+
"RoundPrecision": 0
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"id": "CntPeopleID2",
|
| 114 |
+
"Value": 267,
|
| 115 |
+
"RoundPrecision": 0
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"id": "CntPeopleID3",
|
| 119 |
+
"Value": 10415,
|
| 120 |
+
"RoundPrecision": 0
|
| 121 |
+
},
|
| 122 |
+
{
|
| 123 |
+
"id": "CntPGACFPG",
|
| 124 |
+
"Value": 3203,
|
| 125 |
+
"RoundPrecision": 0
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"id": "CntPGACLR",
|
| 129 |
+
"Value": 4486,
|
| 130 |
+
"RoundPrecision": 0
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"id": "CntRegions",
|
| 134 |
+
"Value": 12,
|
| 135 |
+
"RoundPrecision": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"id": "CntTotalLanguages",
|
| 139 |
+
"Value": 7132,
|
| 140 |
+
"RoundPrecision": 0
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"id": "CntWorkersNeeded",
|
| 144 |
+
"Value": 74431,
|
| 145 |
+
"RoundPrecision": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"id": "PoplPeopCtry",
|
| 149 |
+
"Value": 8169807000,
|
| 150 |
+
"RoundPrecision": 3
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"id": "PoplPeopCtry1040",
|
| 154 |
+
"Value": 5442072000,
|
| 155 |
+
"RoundPrecision": 3
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"id": "PoplPeopCtryFrontier",
|
| 159 |
+
"Value": 1998449000,
|
| 160 |
+
"RoundPrecision": 3
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"id": "PoplPeopCtryLR",
|
| 164 |
+
"Value": 3572768000,
|
| 165 |
+
"RoundPrecision": 3
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"id": "PoplPeopCtryLR1040",
|
| 169 |
+
"Value": 3451759000,
|
| 170 |
+
"RoundPrecision": 3
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"id": "PoplPGACFPG",
|
| 174 |
+
"Value": 1879606000,
|
| 175 |
+
"RoundPrecision": 3
|
| 176 |
+
},
|
| 177 |
+
{
|
| 178 |
+
"id": "PoplPGACLR",
|
| 179 |
+
"Value": 3519072000,
|
| 180 |
+
"RoundPrecision": 3
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"id": "WorldChristianPct",
|
| 184 |
+
"Value": 30.9313023931891,
|
| 185 |
+
"RoundPrecision": 0
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"id": "WorldEvangelicalPct",
|
| 189 |
+
"Value": 7.78874416089203,
|
| 190 |
+
"RoundPrecision": 0
|
| 191 |
+
}
|
| 192 |
+
]
|
joshua_project_unreached.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff8d1af897fe7f343550e30f23b57da4aa6e91e67a9a234dcb76df8d63cc9d3b
|
| 3 |
+
size 4005646
|
prepare_souls_viz_data.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Prepare compact visualization data for souls visualizations.
|
| 3 |
+
Converts enriched Joshua Project data into minimal format for browser use.
|
| 4 |
+
|
| 5 |
+
Output: souls_enhanced_viz_data.json
|
| 6 |
+
Size target: < 3 MB (compact field names, essential data only)
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import sys
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
# Compact field mapping
|
| 14 |
+
COMPACT_FIELDS = {
|
| 15 |
+
'name': 'n', # People group name
|
| 16 |
+
'population': 'p', # Population
|
| 17 |
+
'jp_scale': 's', # JP Scale (1-5)
|
| 18 |
+
'percent_evangelical': 'e', # % Evangelical
|
| 19 |
+
'primary_religion': 'r', # Religion
|
| 20 |
+
'primary_language': 'l', # Language name
|
| 21 |
+
'language_code': 'lc', # ROL3 code
|
| 22 |
+
'country': 'c', # Country name
|
| 23 |
+
'country_code': 'cc', # ROG3 code
|
| 24 |
+
'continent': 'cn', # Continent
|
| 25 |
+
'region': 'rg', # Region
|
| 26 |
+
'affinity_bloc': 'ab', # Affinity Bloc
|
| 27 |
+
'people_cluster': 'pc', # People Cluster
|
| 28 |
+
'bible_status': 'bs', # Bible translation status
|
| 29 |
+
'has_jesus_film': 'jf', # Jesus Film Y/N
|
| 30 |
+
'lat_lon': 'll', # [lat, lng] if available
|
| 31 |
+
'least_reached': 'lr' # Y/N
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
def load_enriched_data():
|
| 35 |
+
"""Load the enriched Joshua Project dataset."""
|
| 36 |
+
data_file = Path(__file__).parent / 'joshua_project_enriched.json'
|
| 37 |
+
|
| 38 |
+
if not data_file.exists():
|
| 39 |
+
print(f"Error: {data_file} not found")
|
| 40 |
+
print("Run create_enriched_datasets.py first!")
|
| 41 |
+
sys.exit(1)
|
| 42 |
+
|
| 43 |
+
print(f"Loading {data_file.name}...")
|
| 44 |
+
with open(data_file, 'r') as f:
|
| 45 |
+
data = json.load(f)
|
| 46 |
+
|
| 47 |
+
print(f"Loaded {len(data):,} people groups")
|
| 48 |
+
return data
|
| 49 |
+
|
| 50 |
+
def safe_float(value, default=0.0):
|
| 51 |
+
"""Safely convert value to float."""
|
| 52 |
+
try:
|
| 53 |
+
return float(value) if value is not None else default
|
| 54 |
+
except (ValueError, TypeError):
|
| 55 |
+
return default
|
| 56 |
+
|
| 57 |
+
def safe_int(value, default=0):
|
| 58 |
+
"""Safely convert value to int."""
|
| 59 |
+
try:
|
| 60 |
+
return int(value) if value is not None else default
|
| 61 |
+
except (ValueError, TypeError):
|
| 62 |
+
return default
|
| 63 |
+
|
| 64 |
+
def compact_group(group):
|
| 65 |
+
"""Convert a people group record to compact format."""
|
| 66 |
+
compact = {
|
| 67 |
+
'n': group.get('PeopNameInCountry', 'Unknown'),
|
| 68 |
+
'p': safe_int(group.get('Population', 0)),
|
| 69 |
+
's': safe_int(group.get('JPScale', 0)),
|
| 70 |
+
'e': round(safe_float(group.get('PercentEvangelical', 0)), 1),
|
| 71 |
+
'r': group.get('PrimaryReligion', 'Unknown'),
|
| 72 |
+
'l': group.get('PrimaryLanguageName', 'Unknown'),
|
| 73 |
+
'lc': group.get('ROL3', ''),
|
| 74 |
+
'c': group.get('country_data', {}).get('name', 'Unknown') if group.get('country_data') else group.get('Ctry', 'Unknown'),
|
| 75 |
+
'cc': group.get('ROG3', ''),
|
| 76 |
+
'cn': group.get('Continent', ''),
|
| 77 |
+
'rg': group.get('RegionName', ''),
|
| 78 |
+
'ab': group.get('AffinityBloc', ''),
|
| 79 |
+
'pc': group.get('PeopleCluster', ''),
|
| 80 |
+
'bs': safe_int(group.get('BibleStatus', 0)),
|
| 81 |
+
'jf': group.get('language_data', {}).get('has_jesus_film', 'N') if group.get('language_data') else 'N',
|
| 82 |
+
'lr': group.get('LeastReached', 'N')
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
# Add lat/lon if available (some people groups have this)
|
| 86 |
+
# Check common field names
|
| 87 |
+
lat = group.get('Latitude') or group.get('PrimaryLanguageLatitude')
|
| 88 |
+
lon = group.get('Longitude') or group.get('PrimaryLanguageLongitude')
|
| 89 |
+
|
| 90 |
+
if lat and lon:
|
| 91 |
+
lat_val = safe_float(lat, None)
|
| 92 |
+
lon_val = safe_float(lon, None)
|
| 93 |
+
if lat_val is not None and lon_val is not None and lat_val != 0 and lon_val != 0:
|
| 94 |
+
compact['ll'] = [lat_val, lon_val]
|
| 95 |
+
|
| 96 |
+
return compact
|
| 97 |
+
|
| 98 |
+
def generate_stats(groups):
|
| 99 |
+
"""Generate summary statistics."""
|
| 100 |
+
stats = {
|
| 101 |
+
'total_groups': len(groups),
|
| 102 |
+
'total_population': sum(g['p'] for g in groups),
|
| 103 |
+
'unreached_count': sum(1 for g in groups if g['lr'] == 'Y'),
|
| 104 |
+
'unreached_population': sum(g['p'] for g in groups if g['lr'] == 'Y'),
|
| 105 |
+
|
| 106 |
+
# By religion
|
| 107 |
+
'by_religion': {},
|
| 108 |
+
|
| 109 |
+
# By continent
|
| 110 |
+
'by_continent': {},
|
| 111 |
+
|
| 112 |
+
# By affinity bloc
|
| 113 |
+
'by_affinity_bloc': {},
|
| 114 |
+
|
| 115 |
+
# By JP Scale
|
| 116 |
+
'by_jp_scale': {str(i): 0 for i in range(1, 6)},
|
| 117 |
+
|
| 118 |
+
# By Bible status
|
| 119 |
+
'by_bible_status': {str(i): 0 for i in range(0, 6)}
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
for g in groups:
|
| 123 |
+
# Religion
|
| 124 |
+
if g['r'] not in stats['by_religion']:
|
| 125 |
+
stats['by_religion'][g['r']] = {'count': 0, 'population': 0, 'unreached': 0}
|
| 126 |
+
stats['by_religion'][g['r']]['count'] += 1
|
| 127 |
+
stats['by_religion'][g['r']]['population'] += g['p']
|
| 128 |
+
if g['lr'] == 'Y':
|
| 129 |
+
stats['by_religion'][g['r']]['unreached'] += g['p']
|
| 130 |
+
|
| 131 |
+
# Continent
|
| 132 |
+
if g['cn']:
|
| 133 |
+
if g['cn'] not in stats['by_continent']:
|
| 134 |
+
stats['by_continent'][g['cn']] = {'count': 0, 'population': 0}
|
| 135 |
+
stats['by_continent'][g['cn']]['count'] += 1
|
| 136 |
+
stats['by_continent'][g['cn']]['population'] += g['p']
|
| 137 |
+
|
| 138 |
+
# Affinity Bloc
|
| 139 |
+
if g['ab']:
|
| 140 |
+
if g['ab'] not in stats['by_affinity_bloc']:
|
| 141 |
+
stats['by_affinity_bloc'][g['ab']] = {'count': 0, 'population': 0}
|
| 142 |
+
stats['by_affinity_bloc'][g['ab']]['count'] += 1
|
| 143 |
+
stats['by_affinity_bloc'][g['ab']]['population'] += g['p']
|
| 144 |
+
|
| 145 |
+
# JP Scale
|
| 146 |
+
if g['s']:
|
| 147 |
+
stats['by_jp_scale'][str(g['s'])] += 1
|
| 148 |
+
|
| 149 |
+
# Bible Status
|
| 150 |
+
if g['bs'] is not None:
|
| 151 |
+
stats['by_bible_status'][str(g['bs'])] += 1
|
| 152 |
+
|
| 153 |
+
return stats
|
| 154 |
+
|
| 155 |
+
def main():
|
| 156 |
+
print("=" * 70)
|
| 157 |
+
print("PREPARING SOULS VISUALIZATION DATA")
|
| 158 |
+
print("=" * 70)
|
| 159 |
+
|
| 160 |
+
# Load enriched data
|
| 161 |
+
enriched = load_enriched_data()
|
| 162 |
+
|
| 163 |
+
# Convert to compact format
|
| 164 |
+
print("\nConverting to compact format...")
|
| 165 |
+
compact_groups = []
|
| 166 |
+
|
| 167 |
+
for i, group in enumerate(enriched):
|
| 168 |
+
compact_groups.append(compact_group(group))
|
| 169 |
+
|
| 170 |
+
if (i + 1) % 1000 == 0:
|
| 171 |
+
print(f" Progress: {i+1:,}/{len(enriched):,}")
|
| 172 |
+
|
| 173 |
+
print(f"\n✅ Converted {len(compact_groups):,} groups")
|
| 174 |
+
|
| 175 |
+
# Generate stats
|
| 176 |
+
print("\nGenerating statistics...")
|
| 177 |
+
stats = generate_stats(compact_groups)
|
| 178 |
+
|
| 179 |
+
# Create output
|
| 180 |
+
output = {
|
| 181 |
+
'groups': compact_groups,
|
| 182 |
+
'stats': stats,
|
| 183 |
+
'generated': '2025-12-23',
|
| 184 |
+
'source': 'Joshua Project API via enriched dataset'
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
# Save to souls directory
|
| 188 |
+
output_file = Path(__file__).parent.parent.parent / 'poems' / 'souls' / 'souls_enhanced_viz_data.json'
|
| 189 |
+
output_file.parent.mkdir(parents=True, exist_ok=True)
|
| 190 |
+
|
| 191 |
+
print(f"\nSaving to {output_file}...")
|
| 192 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 193 |
+
json.dump(output, f, separators=(',', ':'), ensure_ascii=False)
|
| 194 |
+
|
| 195 |
+
# File size
|
| 196 |
+
size_mb = output_file.stat().st_size / (1024 * 1024)
|
| 197 |
+
|
| 198 |
+
print("\n" + "=" * 70)
|
| 199 |
+
print("SUMMARY")
|
| 200 |
+
print("=" * 70)
|
| 201 |
+
print(f"Output file: {output_file.name}")
|
| 202 |
+
print(f"File size: {size_mb:.2f} MB")
|
| 203 |
+
print(f"People groups: {len(compact_groups):,}")
|
| 204 |
+
print(f"Total population: {stats['total_population']:,}")
|
| 205 |
+
print(f"Unreached: {stats['unreached_count']:,} groups ({stats['unreached_population']:,} people)")
|
| 206 |
+
print(f"\nReligions: {len(stats['by_religion'])}")
|
| 207 |
+
print(f"Continents: {len(stats['by_continent'])}")
|
| 208 |
+
print(f"Affinity Blocs: {len(stats['by_affinity_bloc'])}")
|
| 209 |
+
print("\n" + "=" * 70)
|
| 210 |
+
print("✅ COMPLETE - Ready for visualization!")
|
| 211 |
+
print("=" * 70)
|
| 212 |
+
|
| 213 |
+
if __name__ == '__main__':
|
| 214 |
+
main()
|
process_joshua_data.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
# Paths
|
| 6 |
+
INPUT_FILE = "joshua_project_full_dump.json"
|
| 7 |
+
OUTPUT_FILE = "../../souls_viz_data.json"
|
| 8 |
+
|
| 9 |
+
def process_data():
|
| 10 |
+
print(f"Loading data from {INPUT_FILE}...")
|
| 11 |
+
try:
|
| 12 |
+
with open(INPUT_FILE, 'r', encoding='utf-8') as f:
|
| 13 |
+
data = json.load(f)
|
| 14 |
+
except FileNotFoundError:
|
| 15 |
+
print(f"Error: {INPUT_FILE} not found.")
|
| 16 |
+
return
|
| 17 |
+
|
| 18 |
+
print(f"Loaded {len(data)} records. Processing...")
|
| 19 |
+
|
| 20 |
+
affinity_blocs = {}
|
| 21 |
+
|
| 22 |
+
# We want to group by Affinity Bloc, then Region, then Country?
|
| 23 |
+
# For the visualization, we need flat lists of people groups but grouped by Affinity Bloc for the "Cells" view.
|
| 24 |
+
|
| 25 |
+
processed_groups = []
|
| 26 |
+
|
| 27 |
+
for record in data:
|
| 28 |
+
# Extract relevant fields
|
| 29 |
+
peid = record.get("PeopleID3")
|
| 30 |
+
name = record.get("PeopNameInCountry")
|
| 31 |
+
bloc = record.get("AffinityBloc", "Unknown")
|
| 32 |
+
pop = record.get("Population", 0)
|
| 33 |
+
religion = record.get("PrimaryReligion", "Unknown")
|
| 34 |
+
evangelical_pct = record.get("PercentEvangelical", 0)
|
| 35 |
+
# JPS cale: 1=Unreached, 2=Minimally Reached, 3=Superficially Reached, 4=Partially Reached, 5=Significantly Reached
|
| 36 |
+
status_raw = record.get("JPScale", 1)
|
| 37 |
+
|
| 38 |
+
# Normalize status (some might be strings or None)
|
| 39 |
+
try:
|
| 40 |
+
status = int(status_raw)
|
| 41 |
+
except (ValueError, TypeError):
|
| 42 |
+
status = 1
|
| 43 |
+
|
| 44 |
+
lat = record.get("Latitude")
|
| 45 |
+
lon = record.get("Longitude")
|
| 46 |
+
country = record.get("Ctry", "Unknown")
|
| 47 |
+
|
| 48 |
+
# Skip records with no population (optional, but good for vis)
|
| 49 |
+
if pop is None: pop = 0
|
| 50 |
+
if pop < 100: continue # Skip very small groups for noise reduction? Maybe keep them.
|
| 51 |
+
|
| 52 |
+
# simplify religion string
|
| 53 |
+
if not religion: religion = "Unknown"
|
| 54 |
+
|
| 55 |
+
group_data = {
|
| 56 |
+
"n": name,
|
| 57 |
+
"b": bloc,
|
| 58 |
+
"p": pop,
|
| 59 |
+
"r": religion,
|
| 60 |
+
"s": status, # 1-5 scale. 1 is unreached (dark/red).
|
| 61 |
+
"e": float(evangelical_pct) if evangelical_pct else 0.0,
|
| 62 |
+
"c": country,
|
| 63 |
+
"ll": [lat, lon] if lat and lon else None,
|
| 64 |
+
"l": record.get("PrimaryLanguageName", "Unknown")
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
processed_groups.append(group_data)
|
| 68 |
+
|
| 69 |
+
# Aggregate stats per bloc
|
| 70 |
+
if bloc not in affinity_blocs:
|
| 71 |
+
affinity_blocs[bloc] = {"pop": 0, "groups": 0, "unreached_pop": 0}
|
| 72 |
+
|
| 73 |
+
affinity_blocs[bloc]["pop"] += pop
|
| 74 |
+
affinity_blocs[bloc]["groups"] += 1
|
| 75 |
+
if status <= 1: # Unreached
|
| 76 |
+
affinity_blocs[bloc]["unreached_pop"] += pop
|
| 77 |
+
|
| 78 |
+
# Combine into final structure
|
| 79 |
+
# We might want to sort affinity blocs by population to help layout
|
| 80 |
+
|
| 81 |
+
# Sort groups by population descending for better rendering (draw big ones first or last?)
|
| 82 |
+
# Actually for Voronoi, order doesn't equate to z-index exactly, but for list views it helps.
|
| 83 |
+
processed_groups.sort(key=lambda x: x['p'], reverse=True)
|
| 84 |
+
|
| 85 |
+
output_data = {
|
| 86 |
+
"stats": affinity_blocs,
|
| 87 |
+
"groups": processed_groups
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
print(f"Writing {len(processed_groups)} groups to {OUTPUT_FILE}...")
|
| 91 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as f:
|
| 92 |
+
json.dump(output_data, f, separators=(',', ':')) # Minified
|
| 93 |
+
|
| 94 |
+
print("Done.")
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
process_data()
|