Datasets:
Commit ·
a8395d8
0
Parent(s):
Duplicate from Reja1/jee-neet-benchmark
Browse filesCo-authored-by: Md Rejaullah <Reja1@users.noreply.huggingface.co>
This view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +3 -0
- .gitignore +89 -0
- CLAUDE.md +77 -0
- README.md +348 -0
- configs/benchmark_config.yaml +29 -0
- images.tar.gz +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_01.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_02.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_03.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_04.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_05.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_06.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_07.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_08.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_09.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_10.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_11.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_12.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_13.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_14.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_15.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_16.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_17.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_01.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_02.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_03.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_04.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_05.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_06.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_07.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_08.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_09.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_10.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_11.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_12.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_13.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_14.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_15.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_16.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_17.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_01.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_02.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_03.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_04.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_05.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_06.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_07.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_08.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_09.png +3 -0
- images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_10.png +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
images/** filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
images/**/*.png filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python virtual environment
|
| 2 |
+
hf-env/
|
| 3 |
+
venv/
|
| 4 |
+
env/
|
| 5 |
+
*.venv
|
| 6 |
+
|
| 7 |
+
# Python cache files
|
| 8 |
+
__pycache__/
|
| 9 |
+
*.py[cod]
|
| 10 |
+
*$py.class
|
| 11 |
+
|
| 12 |
+
# Distribution / packaging
|
| 13 |
+
.Python
|
| 14 |
+
build/
|
| 15 |
+
develop-eggs/
|
| 16 |
+
dist/
|
| 17 |
+
downloads/
|
| 18 |
+
eggs/
|
| 19 |
+
.eggs/
|
| 20 |
+
lib/
|
| 21 |
+
lib64/
|
| 22 |
+
parts/
|
| 23 |
+
sdist/
|
| 24 |
+
var/
|
| 25 |
+
wheels/
|
| 26 |
+
share/python-wheels/
|
| 27 |
+
*.egg-info/
|
| 28 |
+
.installed.cfg
|
| 29 |
+
*.egg
|
| 30 |
+
MANIFEST
|
| 31 |
+
|
| 32 |
+
# PyInstaller
|
| 33 |
+
# Usually these files are written by a python script from a template
|
| 34 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 35 |
+
*.manifest
|
| 36 |
+
*.spec
|
| 37 |
+
|
| 38 |
+
# Installer logs
|
| 39 |
+
pip-log.txt
|
| 40 |
+
pip-delete-this-directory.txt
|
| 41 |
+
|
| 42 |
+
# Unit test / coverage reports
|
| 43 |
+
htmlcov/
|
| 44 |
+
.tox/
|
| 45 |
+
.nox/
|
| 46 |
+
.coverage
|
| 47 |
+
.coverage.*
|
| 48 |
+
.cache
|
| 49 |
+
nosetests.xml
|
| 50 |
+
coverage.xml
|
| 51 |
+
*.cover
|
| 52 |
+
*.py,cover
|
| 53 |
+
.hypothesis/
|
| 54 |
+
.pytest_cache/
|
| 55 |
+
cover/
|
| 56 |
+
|
| 57 |
+
# Jupyter Notebook
|
| 58 |
+
.ipynb_checkpoints
|
| 59 |
+
|
| 60 |
+
# IDE / Editor specific files
|
| 61 |
+
.idea/
|
| 62 |
+
.vscode/
|
| 63 |
+
*.sublime-project
|
| 64 |
+
*.sublime-workspace
|
| 65 |
+
|
| 66 |
+
# OS generated files
|
| 67 |
+
.DS_Store
|
| 68 |
+
Thumbs.db
|
| 69 |
+
|
| 70 |
+
# Results (optional - uncomment if you don't want to track results in Git)
|
| 71 |
+
# results/
|
| 72 |
+
|
| 73 |
+
# Configuration files containing secrets (uncomment if you use these names)
|
| 74 |
+
# configs/openrouter_config.yaml
|
| 75 |
+
.env*
|
| 76 |
+
.env
|
| 77 |
+
|
| 78 |
+
# Logs
|
| 79 |
+
logs/
|
| 80 |
+
*.log
|
| 81 |
+
npm-debug.log*
|
| 82 |
+
yarn-debug.log*
|
| 83 |
+
yarn-error.log*
|
| 84 |
+
pnpm-debug.log*
|
| 85 |
+
lerna-debug.log*
|
| 86 |
+
|
| 87 |
+
# Other
|
| 88 |
+
*.swp
|
| 89 |
+
*~
|
CLAUDE.md
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CLAUDE.md
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## Project Overview
|
| 6 |
+
|
| 7 |
+
A benchmark for evaluating vision-capable LLMs on Indian competitive exam questions (JEE Main, JEE Advanced, NEET). Questions are images sent to models via the OpenRouter API; responses are parsed from `<answer>...</answer>` tags and scored using exam-specific marking schemes.
|
| 8 |
+
|
| 9 |
+
## Running the Benchmark
|
| 10 |
+
|
| 11 |
+
```bash
|
| 12 |
+
# Setup
|
| 13 |
+
uv sync
|
| 14 |
+
echo "OPENROUTER_API_KEY=your_key" > .env
|
| 15 |
+
|
| 16 |
+
# Must run from project root (paths are resolved relative to cwd)
|
| 17 |
+
uv run python src/benchmark_runner.py --model "google/gemini-2.5-pro-preview-03-25" --exam_name JEE_ADVANCED --exam_year 2025
|
| 18 |
+
|
| 19 |
+
# Filter by question IDs
|
| 20 |
+
uv run python src/benchmark_runner.py --model "openai/o3" --question_ids "N24T3001,N24T3002"
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
CLI args: `--model` (required), `--exam_name` (all/NEET/JEE_ADVANCED/JEE_MAIN), `--exam_year` (all/2024/2025), `--question_ids`, `--output_dir`, `--config`, `--resume`.
|
| 24 |
+
|
| 25 |
+
## Testing
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
# Run the full pytest suite (68 tests)
|
| 29 |
+
uv run pytest tests/ -v
|
| 30 |
+
|
| 31 |
+
# Run individual module self-tests
|
| 32 |
+
uv run python src/utils.py # answer parsing logic
|
| 33 |
+
uv run python src/evaluation.py # scoring logic
|
| 34 |
+
uv run python src/llm_interface.py # API calls (requires .env and network)
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
## Architecture
|
| 38 |
+
|
| 39 |
+
```
|
| 40 |
+
benchmark_runner.py ─── orchestrator / entry point
|
| 41 |
+
├── loads config from configs/benchmark_config.yaml
|
| 42 |
+
├── loads dataset directly from metadata.jsonl (JSONL → HuggingFace Dataset)
|
| 43 |
+
│ ├── metadata.jsonl (question metadata, 578 questions)
|
| 44 |
+
│ └── images/ (question PNGs, stored in Git LFS)
|
| 45 |
+
├── calls llm_interface.py for each question
|
| 46 |
+
│ ├── prompts.py (prompt templates)
|
| 47 |
+
│ └── utils.py (parse_llm_answer extracts from <answer> tags)
|
| 48 |
+
├── scores via evaluation.py (exam-specific marking schemes)
|
| 49 |
+
└── writes results incrementally to results/{model}_{exam}_{year}_{timestamp}/
|
| 50 |
+
├── predictions.jsonl (raw API responses)
|
| 51 |
+
├── summary.jsonl (scored per-question results)
|
| 52 |
+
└── summary.md (human-readable report)
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
### Key data flow
|
| 56 |
+
|
| 57 |
+
1. Dataset loaded directly from `metadata.jsonl` into a HuggingFace `Dataset` object, filtered by exam/year
|
| 58 |
+
2. Each question image is base64-encoded and sent to OpenRouter with a structured prompt
|
| 59 |
+
3. If the response can't be parsed, a re-prompt is sent (text-only, with the bad response)
|
| 60 |
+
4. If the API call fails, the question is queued for retry (up to 3 attempts, exponential backoff via `tenacity`)
|
| 61 |
+
5. Answers are parsed from `<answer>...</answer>` tags by `utils.parse_llm_answer()`
|
| 62 |
+
6. `evaluation.py` scores using JEE/NEET marking schemes (partial credit for MCQ_MULTIPLE_CORRECT in JEE Advanced)
|
| 63 |
+
|
| 64 |
+
## Answer Format Conventions
|
| 65 |
+
|
| 66 |
+
- `MCQ_SINGLE_CORRECT`: `<answer>A</answer>` → `["A"]`
|
| 67 |
+
- `MCQ_MULTIPLE_CORRECT`: `<answer>A,C</answer>` → `["A", "C"]` (sorted, deduplicated)
|
| 68 |
+
- `INTEGER`: `<answer>42</answer>` → `["42"]`
|
| 69 |
+
- `SKIP`: `<answer>SKIP</answer>` → no penalty
|
| 70 |
+
|
| 71 |
+
## Important Notes
|
| 72 |
+
|
| 73 |
+
- **Git LFS**: Images and `metadata.jsonl` are in LFS. Run `git lfs pull` after cloning.
|
| 74 |
+
- **Working directory**: Scripts must be run from project root — config, data, and image paths are resolved relative to cwd.
|
| 75 |
+
- **Python 3.10+**: Uses union type syntax (`list[str] | str | None`).
|
| 76 |
+
- **Models**: Configured in `configs/benchmark_config.yaml` under `openrouter_models`. All must support vision input.
|
| 77 |
+
- **Result directory naming**: `results/{provider}_{model}_{exam}_{year}_{YYYYMMDD_HHMMSS}/` (slashes in model IDs replaced with underscores).
|
README.md
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
task_categories:
|
| 6 |
+
- visual-question-answering
|
| 7 |
+
- image-text-to-text
|
| 8 |
+
- question-answering
|
| 9 |
+
pretty_name: Indian Competitive Exams (JEE/NEET) LLM Benchmark
|
| 10 |
+
size_categories:
|
| 11 |
+
- n<1K
|
| 12 |
+
tags:
|
| 13 |
+
- education
|
| 14 |
+
- science
|
| 15 |
+
- india
|
| 16 |
+
- competitive-exams
|
| 17 |
+
- llm-benchmark
|
| 18 |
+
configs:
|
| 19 |
+
- config_name: default
|
| 20 |
+
data_files:
|
| 21 |
+
- split: test
|
| 22 |
+
path: "images/**"
|
| 23 |
+
drop_labels: true
|
| 24 |
+
dataset_info:
|
| 25 |
+
features:
|
| 26 |
+
- name: image
|
| 27 |
+
dtype: image
|
| 28 |
+
- name: question_id
|
| 29 |
+
dtype: string
|
| 30 |
+
- name: exam_name
|
| 31 |
+
dtype: string
|
| 32 |
+
- name: exam_year
|
| 33 |
+
dtype: int32
|
| 34 |
+
- name: subject
|
| 35 |
+
dtype: string
|
| 36 |
+
- name: question_type
|
| 37 |
+
dtype: string
|
| 38 |
+
- name: correct_answer
|
| 39 |
+
dtype: string
|
| 40 |
+
- name: paper_id
|
| 41 |
+
dtype: int64
|
| 42 |
+
splits:
|
| 43 |
+
- name: test
|
| 44 |
+
num_examples: 578
|
| 45 |
+
---
|
| 46 |
+
# JEE/NEET LLM Benchmark Dataset
|
| 47 |
+
|
| 48 |
+
[](https://opensource.org/licenses/MIT)
|
| 49 |
+
|
| 50 |
+
## Dataset Description
|
| 51 |
+
|
| 52 |
+
This repository contains a benchmark dataset designed for evaluating the capabilities of Large Language Models (LLMs) on questions from major Indian competitive examinations:
|
| 53 |
+
* **JEE (Main & Advanced):** Joint Entrance Examination for engineering.
|
| 54 |
+
* **NEET:** National Eligibility cum Entrance Test for medical fields.
|
| 55 |
+
|
| 56 |
+
The questions are presented in image format (`.png`) as they appear in the original papers. The dataset includes metadata linking each image to its corresponding exam details (name, year, subject, question type), and correct answer(s). The benchmark framework supports various question types including Single Correct MCQs, Multiple Correct MCQs (with partial marking for JEE Advanced), and Integer type questions.
|
| 57 |
+
|
| 58 |
+
**Current Data:**
|
| 59 |
+
* **NEET 2024** (Code T3): 200 questions across Physics, Chemistry, Botany, and Zoology
|
| 60 |
+
* **NEET 2025** (Code 45): 180 questions across Physics, Chemistry, Botany, and Zoology
|
| 61 |
+
* **JEE Advanced 2024** (Paper 1 & 2): 102 questions across Physics, Chemistry, and Mathematics
|
| 62 |
+
* **JEE Advanced 2025** (Paper 1 & 2): 96 questions across Physics, Chemistry, and Mathematics
|
| 63 |
+
* **Total:** 578 questions with comprehensive metadata
|
| 64 |
+
|
| 65 |
+
## Key Features
|
| 66 |
+
|
| 67 |
+
* **🖼️ Multimodal Reasoning:** Uses images of questions directly, testing the multimodal reasoning capability of models
|
| 68 |
+
* **📊 Exam-Specific Scoring:** Implements authentic scoring rules for different exams and question types, including partial marking for JEE Advanced
|
| 69 |
+
* **🔄 Robust API Handling:** Built-in retry mechanism and re-prompting for failed API calls or parsing errors
|
| 70 |
+
* **🎯 Flexible Filtering:** Filter by exam name, year, or specific question IDs for targeted evaluation
|
| 71 |
+
* **📈 Comprehensive Results:** Generates detailed JSON and human-readable Markdown summaries with section-wise breakdowns
|
| 72 |
+
* **🔧 Easy Configuration:** Simple YAML-based configuration for models and parameters
|
| 73 |
+
|
| 74 |
+
## How to Use
|
| 75 |
+
|
| 76 |
+
### Using `datasets` Library
|
| 77 |
+
|
| 78 |
+
The dataset is hosted on the Hugging Face Hub and can be loaded directly:
|
| 79 |
+
|
| 80 |
+
```python
|
| 81 |
+
from datasets import load_dataset
|
| 82 |
+
import json
|
| 83 |
+
|
| 84 |
+
# Load the evaluation split
|
| 85 |
+
dataset = load_dataset("Reja1/jee-neet-benchmark", split='test')
|
| 86 |
+
|
| 87 |
+
# Example: Access the first question
|
| 88 |
+
example = dataset[0]
|
| 89 |
+
image = example["image"]
|
| 90 |
+
question_id = example["question_id"]
|
| 91 |
+
subject = example["subject"]
|
| 92 |
+
correct_answers = json.loads(example["correct_answer"]) # Parse JSON string
|
| 93 |
+
|
| 94 |
+
print(f"Question ID: {question_id}")
|
| 95 |
+
print(f"Subject: {subject}")
|
| 96 |
+
print(f"Correct Answer(s): {correct_answers}")
|
| 97 |
+
# Display the image (requires Pillow)
|
| 98 |
+
# image.show()
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
### Manual Usage (Benchmark Scripts)
|
| 102 |
+
|
| 103 |
+
This repository contains scripts to run the benchmark evaluation directly:
|
| 104 |
+
|
| 105 |
+
1. **Clone the repository:**
|
| 106 |
+
```bash
|
| 107 |
+
git clone https://huggingface.co/datasets/Reja1/jee-neet-benchmark
|
| 108 |
+
cd jee-neet-benchmark
|
| 109 |
+
# Ensure Git LFS is installed and pull large files
|
| 110 |
+
git lfs pull
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
2. **Install dependencies:**
|
| 114 |
+
```bash
|
| 115 |
+
uv sync
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
3. **Configure API Key:**
|
| 119 |
+
* Create a file named `.env` in the root directory of the project.
|
| 120 |
+
* Add your OpenRouter API key to this file:
|
| 121 |
+
```dotenv
|
| 122 |
+
OPENROUTER_API_KEY=your_actual_openrouter_api_key_here
|
| 123 |
+
```
|
| 124 |
+
* **Important:** The `.gitignore` file is already configured to prevent committing the `.env` file. Never commit your API keys directly.
|
| 125 |
+
|
| 126 |
+
4. **Configure Models:**
|
| 127 |
+
* Edit the `configs/benchmark_config.yaml` file.
|
| 128 |
+
* Modify the `openrouter_models` list to include the specific model identifiers you want to evaluate:
|
| 129 |
+
```yaml
|
| 130 |
+
openrouter_models:
|
| 131 |
+
- "google/gemini-2.5-pro-preview-03-25"
|
| 132 |
+
- "anthropic/claude-sonnet-4"
|
| 133 |
+
- "openai/o3"
|
| 134 |
+
```
|
| 135 |
+
* Ensure these models support vision input on OpenRouter.
|
| 136 |
+
* You can also adjust other parameters like `max_tokens` and `request_timeout` if needed.
|
| 137 |
+
|
| 138 |
+
5. **Run the benchmark:**
|
| 139 |
+
|
| 140 |
+
**Basic usage (run all available models on all questions):**
|
| 141 |
+
```bash
|
| 142 |
+
uv run python src/benchmark_runner.py --config configs/benchmark_config.yaml --model "google/gemini-2.5-pro-preview-03-25"
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
**Filter by exam and year:**
|
| 146 |
+
```bash
|
| 147 |
+
# Run only NEET 2024 questions
|
| 148 |
+
uv run python src/benchmark_runner.py --config configs/benchmark_config.yaml --model "openai/o3" --exam_name NEET --exam_year 2024
|
| 149 |
+
|
| 150 |
+
# Run only JEE Advanced 2025 questions
|
| 151 |
+
uv run python src/benchmark_runner.py --config configs/benchmark_config.yaml --model "anthropic/claude-sonnet-4" --exam_name JEE_ADVANCED --exam_year 2025
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
**Run specific questions:**
|
| 155 |
+
```bash
|
| 156 |
+
# Run specific question IDs
|
| 157 |
+
uv run python src/benchmark_runner.py --config configs/benchmark_config.yaml --model "google/gemini-2.5-pro-preview-03-25" --question_ids "N24T3001,N24T3002,JA24P1M01"
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
**Resume an interrupted run:**
|
| 161 |
+
```bash
|
| 162 |
+
# Resume from an existing results directory (skips already-completed questions)
|
| 163 |
+
uv run python src/benchmark_runner.py --model "google/gemini-2.5-pro-preview-03-25" --resume results/google_gemini-2.5-pro-preview-03-25_NEET_2024_20250524_141230
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
**Custom output directory:**
|
| 167 |
+
```bash
|
| 168 |
+
uv run python src/benchmark_runner.py --config configs/benchmark_config.yaml --model "openai/gpt-4o" --output_dir my_custom_results
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
**Available options:**
|
| 172 |
+
- `--exam_name`: Choose from `NEET`, `JEE_MAIN`, `JEE_ADVANCED`, or `all` (default)
|
| 173 |
+
- `--exam_year`: Choose from available years (`2024`, `2025`, etc.) or `all` (default)
|
| 174 |
+
- `--question_ids`: Comma-separated list of specific question IDs to evaluate (e.g., "N24T3001,JA24P1M01")
|
| 175 |
+
- `--resume`: Path to an existing results directory to resume an interrupted run
|
| 176 |
+
|
| 177 |
+
6. **Check Results:**
|
| 178 |
+
* Results for each model run will be saved in timestamped subdirectories within the `results/` folder.
|
| 179 |
+
* Each run's folder (e.g., `results/google_gemini-2.5-pro-preview-03-25_NEET_2024_20250524_141230/`) contains:
|
| 180 |
+
* **`predictions.jsonl`**: Raw API responses for each question including:
|
| 181 |
+
- Raw LLM responses
|
| 182 |
+
- API call success/failure information
|
| 183 |
+
- Parse success status and errors
|
| 184 |
+
* **`summary.jsonl`**: Per-question scored results including:
|
| 185 |
+
- Predicted answers and ground truth
|
| 186 |
+
- Evaluation status and marks awarded
|
| 187 |
+
* **`summary.md`**: Human-readable Markdown summary with:
|
| 188 |
+
- Overall exam scores
|
| 189 |
+
- Question type breakdown
|
| 190 |
+
- Section-wise breakdown (by subject)
|
| 191 |
+
- Detailed statistics on correct/incorrect/skipped questions
|
| 192 |
+
|
| 193 |
+
## Scoring System
|
| 194 |
+
|
| 195 |
+
The benchmark implements authentic scoring systems for each exam type:
|
| 196 |
+
|
| 197 |
+
### NEET Scoring
|
| 198 |
+
- **Single Correct MCQ**: +4 for correct, -1 for incorrect, 0 for skipped/API failure
|
| 199 |
+
|
| 200 |
+
### JEE Main Scoring
|
| 201 |
+
- **Single Correct MCQ**: +4 for correct, -1 for incorrect, 0 for skipped/API failure
|
| 202 |
+
- **Integer Type**: +4 for correct, 0 for incorrect, 0 for skipped/API failure
|
| 203 |
+
|
| 204 |
+
### JEE Advanced Scoring
|
| 205 |
+
- **Single Correct MCQ**: +3 for correct, -1 for incorrect, 0 for skipped/API failure
|
| 206 |
+
- **Multiple Correct MCQ**: Partial marking system:
|
| 207 |
+
- +4 for all correct options selected
|
| 208 |
+
- +3 for 3 out of 4 correct options (when 4 are correct)
|
| 209 |
+
- +2 for 2 out of 3+ correct options
|
| 210 |
+
- +1 for 1 out of 2+ correct options
|
| 211 |
+
- -2 for any incorrect option selected
|
| 212 |
+
- 0 for skipped/API failure
|
| 213 |
+
- **Integer Type**: +4 for correct, 0 for incorrect, 0 for skipped/API failure
|
| 214 |
+
|
| 215 |
+
> **Note:** API failures and parse failures are scored as 0 (no penalty) since they do not represent a deliberate wrong choice.
|
| 216 |
+
|
| 217 |
+
## Advanced Features
|
| 218 |
+
|
| 219 |
+
### Retry Mechanism
|
| 220 |
+
- Automatic retry for failed API calls (up to 3 attempts with exponential backoff)
|
| 221 |
+
- Retries on HTTP 429 (rate limit), 500, 502, 503, 504 status codes
|
| 222 |
+
- Separate retry pass for questions that failed initially
|
| 223 |
+
- Comprehensive error tracking and reporting
|
| 224 |
+
|
| 225 |
+
### Resume Capability
|
| 226 |
+
- Resume interrupted benchmark runs with `--resume <results_dir>`
|
| 227 |
+
- Reads existing `summary.jsonl` to identify completed questions and skips them
|
| 228 |
+
- Appends new results to the same output files
|
| 229 |
+
|
| 230 |
+
### Re-prompting System
|
| 231 |
+
- If initial response parsing fails, the system automatically re-prompts the model
|
| 232 |
+
- Uses the previous response to ask for properly formatted answers
|
| 233 |
+
- Shows only relevant format examples based on question type (MCQ single, MCQ multiple, or integer)
|
| 234 |
+
|
| 235 |
+
### Comprehensive Evaluation
|
| 236 |
+
- Tracks multiple metrics: correct answers, partial credit, skipped questions, API failures
|
| 237 |
+
- Section-wise breakdown by subject
|
| 238 |
+
- Color-coded progress indicators in terminal output
|
| 239 |
+
|
| 240 |
+
## Dataset Structure
|
| 241 |
+
|
| 242 |
+
* **`metadata.jsonl`**: Contains metadata for each question image with fields:
|
| 243 |
+
- `file_name`: Path to the question image (relative to repo root)
|
| 244 |
+
- `question_id`: Unique identifier (e.g., "N24T3001")
|
| 245 |
+
- `exam_name`: Exam type ("NEET", "JEE_MAIN", "JEE_ADVANCED")
|
| 246 |
+
- `exam_year`: Year of the exam (integer)
|
| 247 |
+
- `subject`: Subject name (e.g., "Physics", "Chemistry", "Mathematics")
|
| 248 |
+
- `question_type`: Question format ("MCQ_SINGLE_CORRECT", "MCQ_MULTIPLE_CORRECT", "INTEGER")
|
| 249 |
+
- `correct_answer`: JSON-serialized string of correct answers (e.g., `'["A"]'`, `'["B", "C"]'`, `'["42"]'`)
|
| 250 |
+
|
| 251 |
+
* **`images/`**: Contains subdirectories for each exam set:
|
| 252 |
+
- `images/NEET_2024_T3/`: NEET 2024 question images
|
| 253 |
+
- `images/NEET_2025_45/`: NEET 2025 question images
|
| 254 |
+
- `images/JEE_ADVANCED_2024/`: JEE Advanced 2024 question images
|
| 255 |
+
- `images/JEE_ADVANCED_2025/`: JEE Advanced 2025 question images
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
* **`src/`**: Python source code for the benchmark system:
|
| 259 |
+
- `benchmark_runner.py`: Main benchmark execution script
|
| 260 |
+
- `llm_interface.py`: OpenRouter API interface with retry logic
|
| 261 |
+
- `evaluation.py`: Scoring and evaluation functions
|
| 262 |
+
- `prompts.py`: LLM prompts for different question types
|
| 263 |
+
- `utils.py`: Utility functions for parsing and configuration
|
| 264 |
+
|
| 265 |
+
* **`configs/`**: Configuration files:
|
| 266 |
+
- `benchmark_config.yaml`: Model selection and API parameters
|
| 267 |
+
|
| 268 |
+
* **`results/`**: Directory where benchmark results are stored (timestamped subdirectories)
|
| 269 |
+
|
| 270 |
+
## Data Fields
|
| 271 |
+
|
| 272 |
+
The dataset contains the following fields (accessible via `datasets`):
|
| 273 |
+
|
| 274 |
+
* `image`: The question image (`datasets.Image`)
|
| 275 |
+
* `question_id`: Unique identifier for the question (string)
|
| 276 |
+
* `exam_name`: Name of the exam (e.g., "NEET", "JEE_ADVANCED") (string)
|
| 277 |
+
* `exam_year`: Year of the exam (int)
|
| 278 |
+
* `subject`: Subject (e.g., "Physics", "Chemistry", "Mathematics") (string)
|
| 279 |
+
* `question_type`: Type of question (e.g., "MCQ_SINGLE_CORRECT", "INTEGER") (string)
|
| 280 |
+
* `correct_answer`: JSON-serialized string containing the correct answer(s). Use `json.loads()` to parse.
|
| 281 |
+
- For MCQs, these are option identifiers (e.g., `'["1"]'`, `'["A"]'`, `'["B", "C"]'`). The LLM should output the identifier as it appears in the question.
|
| 282 |
+
- For INTEGER type, this is the numerical answer as a string (e.g., `'["42"]'`, `'["12.75"]'`). The LLM should output the number.
|
| 283 |
+
- For some `MCQ_SINGLE_CORRECT` questions, multiple answers in the list are considered correct if the LLM prediction matches any one of them.
|
| 284 |
+
|
| 285 |
+
## LLM Answer Format
|
| 286 |
+
|
| 287 |
+
The LLM is expected to return its answer enclosed in `<answer>` tags. For example:
|
| 288 |
+
- MCQ Single Correct (Option A): `<answer>A</answer>`
|
| 289 |
+
- MCQ Single Correct (Option 2): `<answer>2</answer>`
|
| 290 |
+
- MCQ Multiple Correct (Options B and D): `<answer>B,D</answer>`
|
| 291 |
+
- Integer Answer: `<answer>42</answer>`
|
| 292 |
+
- Decimal Answer: `<answer>12.75</answer>`
|
| 293 |
+
- Skipped Question: `<answer>SKIP</answer>
|
| 294 |
+
|
| 295 |
+
The system parses these formats. Prompts are designed to guide the LLM accordingly.
|
| 296 |
+
|
| 297 |
+
## Troubleshooting
|
| 298 |
+
|
| 299 |
+
### Common Issues
|
| 300 |
+
|
| 301 |
+
**API Key Issues:**
|
| 302 |
+
- Ensure your `.env` file is in the root directory
|
| 303 |
+
- Verify your OpenRouter API key is valid and has sufficient credits
|
| 304 |
+
- Check that the key has access to vision-capable models
|
| 305 |
+
|
| 306 |
+
**Model Not Found:**
|
| 307 |
+
- Verify the model identifier exists on OpenRouter
|
| 308 |
+
- Ensure the model supports vision input
|
| 309 |
+
- Check your OpenRouter account has access to the specific model
|
| 310 |
+
|
| 311 |
+
**Memory Issues:**
|
| 312 |
+
- Reduce `max_tokens` in the config file
|
| 313 |
+
- Process smaller subsets using `--question_ids` filter
|
| 314 |
+
- Use models with smaller context windows
|
| 315 |
+
|
| 316 |
+
**Parsing Failures:**
|
| 317 |
+
- The system automatically attempts re-prompting for parsing failures
|
| 318 |
+
- Check the raw responses in `predictions.jsonl` to debug prompt issues
|
| 319 |
+
- Consider adjusting prompts in `src/prompts.py` for specific models
|
| 320 |
+
|
| 321 |
+
## Current Limitations
|
| 322 |
+
|
| 323 |
+
* **Dataset Size:** While comprehensive, the dataset could benefit from more JEE Main questions and additional years
|
| 324 |
+
* **Language Support:** Currently only supports English questions
|
| 325 |
+
* **Model Dependencies:** Requires models with vision capabilities available through OpenRouter
|
| 326 |
+
|
| 327 |
+
## Citation
|
| 328 |
+
|
| 329 |
+
If you use this dataset or benchmark code, please cite:
|
| 330 |
+
|
| 331 |
+
```bibtex
|
| 332 |
+
@misc{rejaullah_2025_jeeneetbenchmark,
|
| 333 |
+
title={JEE/NEET LLM Benchmark},
|
| 334 |
+
author={Md Rejaullah},
|
| 335 |
+
year={2025},
|
| 336 |
+
howpublished={\url{https://huggingface.co/datasets/Reja1/jee-neet-benchmark}},
|
| 337 |
+
}
|
| 338 |
+
```
|
| 339 |
+
|
| 340 |
+
## Contact
|
| 341 |
+
|
| 342 |
+
For questions, suggestions, or collaboration, feel free to reach out:
|
| 343 |
+
|
| 344 |
+
* **X (Twitter):** [https://x.com/RejaullahmdMd](https://x.com/RejaullahmdMd)
|
| 345 |
+
|
| 346 |
+
## License
|
| 347 |
+
|
| 348 |
+
This dataset and associated code are licensed under the [MIT License](https://opensource.org/licenses/MIT).
|
configs/benchmark_config.yaml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# List of OpenRouter model identifiers to evaluate.
|
| 2 |
+
# Find identifiers at: https://openrouter.ai/docs#models
|
| 3 |
+
# Ensure the models support vision input.
|
| 4 |
+
openrouter_models:
|
| 5 |
+
- "google/gemini-2.5-pro-preview-03-25"
|
| 6 |
+
- "anthropic/claude-sonnet-4"
|
| 7 |
+
- "google/gemini-2.5-flash-preview-05-20:thinking"
|
| 8 |
+
- "openai/o3"
|
| 9 |
+
- "openai/gpt-5"
|
| 10 |
+
- "x-ai/grok-4-fast:free"
|
| 11 |
+
- "google/gemini-3-pro-preview"
|
| 12 |
+
# - "google/gemini-pro-vision" # Example - uncomment or add others
|
| 13 |
+
# - "anthropic/claude-3-opus" # Example - check vision support and access
|
| 14 |
+
# - "anthropic/claude-3-sonnet"
|
| 15 |
+
# - "anthropic/claude-3-haiku"
|
| 16 |
+
|
| 17 |
+
# Path to the metadata JSONL file and base directory for image paths.
|
| 18 |
+
metadata_path: "images/metadata.jsonl"
|
| 19 |
+
images_base_dir: "images"
|
| 20 |
+
|
| 21 |
+
# Base directory where results for each model will be saved.
|
| 22 |
+
results_base_dir: "results"
|
| 23 |
+
|
| 24 |
+
# Optional: API request parameters
|
| 25 |
+
# Maximum tokens to generate in the response. Keep it low as we expect only numbers.
|
| 26 |
+
max_tokens: 10000
|
| 27 |
+
# Timeout for each API request in seconds.
|
| 28 |
+
# Set high enough for thinking models (o3, Gemini Flash Thinking) which can take longer.
|
| 29 |
+
request_timeout: 200
|
images.tar.gz
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f969da1a44ca3e7fa3f1b165794196bea5e513a5ff4d7bac8fc5330e6e3415e8
|
| 3 |
+
size 27916417
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_01.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_02.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_03.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_04.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_05.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_06.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_07.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_08.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_09.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_10.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_11.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_12.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_13.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_14.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_15.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_16.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_CHEMISTRY_17.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_01.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_02.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_03.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_04.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_05.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_06.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_07.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_08.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_09.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_10.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_11.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_12.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_13.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_14.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_15.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_16.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_MATH_17.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_01.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_02.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_03.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_04.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_05.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_06.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_07.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_08.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_09.png
ADDED
|
Git LFS Details
|
images/JEE_ADVANCED_2024/JEE_ADVANCED_2024_P1_PHYSICS_10.png
ADDED
|
Git LFS Details
|