JSCPPProgrammer commited on
Commit
80b7188
·
verified ·
1 Parent(s): 1b97093

Initial: GenSearcher workflow + FireRed /generate adapter + Gradio

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +7 -0
  2. .gitattributes +2 -0
  3. .pytest_cache/.gitignore +2 -0
  4. .pytest_cache/CACHEDIR.TAG +4 -0
  5. .pytest_cache/README.md +8 -0
  6. .pytest_cache/v/cache/nodeids +4 -0
  7. Dockerfile +35 -0
  8. README.md +70 -10
  9. app.py +108 -0
  10. dotenv.example +31 -0
  11. requirements.txt +6 -0
  12. scripts/entrypoint.sh +77 -0
  13. scripts/verify_env.py +38 -0
  14. services/__init__.py +1 -0
  15. services/firered_generate.py +143 -0
  16. space_gen.py +214 -0
  17. tests/test_imports.py +23 -0
  18. vendor/rllm/.env.gen_image +68 -0
  19. vendor/rllm/.github/workflows/pre-commit.yml +35 -0
  20. vendor/rllm/.gitignore +214 -0
  21. vendor/rllm/.pre-commit-config.yaml +9 -0
  22. vendor/rllm/.readthedocs.yaml +27 -0
  23. vendor/rllm/Dockerfile +26 -0
  24. vendor/rllm/LICENSE +202 -0
  25. vendor/rllm/README.md +126 -0
  26. vendor/rllm/build_docs.sh +25 -0
  27. vendor/rllm/docs/README.md +130 -0
  28. vendor/rllm/docs/api/agents/agent.md +5 -0
  29. vendor/rllm/docs/api/agents/utils.md +5 -0
  30. vendor/rllm/docs/api/engine/agent_execution_engine.md +5 -0
  31. vendor/rllm/docs/api/engine/agent_workflow_engine.md +5 -0
  32. vendor/rllm/docs/api/environments/base.md +15 -0
  33. vendor/rllm/docs/api/environments/env_utils.md +5 -0
  34. vendor/rllm/docs/api/index.md +50 -0
  35. vendor/rllm/docs/api/parser/chat_parser.md +15 -0
  36. vendor/rllm/docs/api/parser/tool_parser.md +20 -0
  37. vendor/rllm/docs/api/tools/code_tools.md +3 -0
  38. vendor/rllm/docs/api/tools/registry.md +12 -0
  39. vendor/rllm/docs/api/tools/tool_base.md +21 -0
  40. vendor/rllm/docs/api/tools/web_tools.md +5 -0
  41. vendor/rllm/docs/api/trainer/agent_trainer.md +5 -0
  42. vendor/rllm/docs/api/trainer/ray_runtime_env.md +111 -0
  43. vendor/rllm/docs/api/workflows/workflow.md +5 -0
  44. vendor/rllm/docs/assets/agentica-logo-black.png +0 -0
  45. vendor/rllm/docs/assets/agentica-logo.png +0 -0
  46. vendor/rllm/docs/assets/rllm_architecture.txt +33 -0
  47. vendor/rllm/docs/assets/rllm_components.png +3 -0
  48. vendor/rllm/docs/assets/rllm_logo_black.png +0 -0
  49. vendor/rllm/docs/assets/rllm_logo_blue.png +0 -0
  50. vendor/rllm/docs/assets/rllm_logo_white.png +0 -0
.dockerignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ _upstream_gen_searcher
2
+ .git
3
+ **/__pycache__
4
+ **/*.pyc
5
+ **/.pytest_cache
6
+ .env
7
+ *.md.bak
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ vendor/rllm/docs/assets/rllm_components.png filter=lfs diff=lfs merge=lfs -text
37
+ vendor/rllm/docs/assets/sdk_arch.png filter=lfs diff=lfs merge=lfs -text
.pytest_cache/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Created by pytest automatically.
2
+ *
.pytest_cache/CACHEDIR.TAG ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Signature: 8a477f597d28d172789f06886806bc55
2
+ # This file is a cache directory tag created by pytest.
3
+ # For information about cache directory tags, see:
4
+ # https://bford.info/cachedir/spec.html
.pytest_cache/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # pytest cache directory #
2
+
3
+ This directory contains data from the pytest's cache plugin,
4
+ which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
5
+
6
+ **Do not** commit this to version control.
7
+
8
+ See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information.
.pytest_cache/v/cache/nodeids ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [
2
+ "tests/test_imports.py::test_firered_service_parse",
3
+ "tests/test_imports.py::test_space_gen_importable"
4
+ ]
Dockerfile ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Space (Docker) — GenSearcher + FireRed
2
+ # Requires GPU. For multi-GPU full-local mode, set START_VLLM_*=1 and CUDA device envs in README.
3
+
4
+ FROM pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime
5
+
6
+ ENV DEBIAN_FRONTEND=noninteractive
7
+ RUN apt-get update && apt-get install -y --no-install-recommends \
8
+ curl \
9
+ git \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ WORKDIR /app
13
+
14
+ COPY vendor/rllm /app/vendor/rllm
15
+ COPY requirements.txt /app/requirements.txt
16
+ COPY app.py space_gen.py /app/
17
+ COPY services /app/services
18
+ COPY scripts /app/scripts
19
+
20
+ ENV PYTHONPATH=/app/vendor/rllm
21
+ ENV GRADIO_SERVER_PORT=7860
22
+
23
+ RUN pip install --no-cache-dir --upgrade pip setuptools wheel \
24
+ && pip install --no-cache-dir -e /app/vendor/rllm \
25
+ && pip install --no-cache-dir -r /app/requirements.txt
26
+
27
+ # Optional: local vLLM inside the image (large). Disable with build-arg if you only use external APIs.
28
+ ARG INSTALL_VLLM=1
29
+ RUN if [ "$INSTALL_VLLM" = "1" ]; then pip install --no-cache-dir "vllm>=0.6.3"; fi
30
+
31
+ RUN chmod +x /app/scripts/entrypoint.sh
32
+
33
+ EXPOSE 7860
34
+
35
+ CMD ["/app/scripts/entrypoint.sh"]
README.md CHANGED
@@ -1,10 +1,70 @@
1
- ---
2
- title: Gensearcher Firered
3
- emoji: 🐢
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GenSearcher + FireRed
3
+ emoji: 🔍
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: docker
7
+ pinned: false
8
+ suggested_hardware: a100-large
9
+ ---
10
+
11
+ # GenSearcher + FireRed-Image-Edit-1.1
12
+
13
+ This Space runs the **official** [Gen-Searcher](https://github.com/tulerfeng/Gen-Searcher) image workflow (`GenImageDeepResearchWorkflow` + `create_gen_image_tools`) against an OpenAI-compatible **GenSearcher-8B** server, then calls **FireRed-Image-Edit-1.1** through the same HTTP `/generate` contract as the upstream Qwen image API.
14
+
15
+ ## Architecture
16
+
17
+ 1. **Agent** — vendored `vision_deepresearch_async_workflow` from Gen-Searcher (unchanged `create_gen_image_tools`).
18
+ 2. **LLM** — `OPENAI_BASE_URL` + `GEN_EVAL_MODEL` (default `Gen-Searcher-8B`).
19
+ 3. **Browse summaries** — `BROWSE_SUMMARY_BASE_URL` + `BROWSE_SUMMARY_MODEL` with `BROWSE_GENERATE_ENGINE=vllm` (see [`.env.gen_image`](https://github.com/tulerfeng/Gen-Searcher/blob/main/Gen-DeepResearch-RL/rllm/.env.gen_image)).
20
+ 4. **Image generation** — local FastAPI adapter at `QWEN_EDIT_APP_URL` (default `http://127.0.0.1:8765`), compatible with `call_qwen_edit_to_generate_image` in upstream `gen_image_deepresearch_reward.py`.
21
+
22
+ ## Space secrets / environment
23
+
24
+ Configure in the Space **Settings → Variables and secrets** (or a mounted `.env.gen_image`):
25
+
26
+ | Variable | Purpose |
27
+ |----------|---------|
28
+ | `SERPER_KEY_ID` | Serper API key ([serper.dev](https://serper.dev)) |
29
+ | `JINA_API_KEYS` | Jina reader key for `r.jina.ai` |
30
+ | `OPENAI_BASE_URL` | OpenAI-compatible base URL for GenSearcher-8B (e.g. `https://.../v1`) |
31
+ | `OPENAI_API_KEY` | API key for that endpoint (use `EMPTY` if unused) |
32
+ | `GEN_EVAL_MODEL` | Served model name (default `Gen-Searcher-8B`) |
33
+ | `BROWSE_SUMMARY_BASE_URL` | OpenAI-compatible base for Qwen3-VL browse summarizer |
34
+ | `BROWSE_SUMMARY_MODEL` | Model id (e.g. `Qwen3-VL-30B-A3B-Instruct`) |
35
+ | `BROWSE_SUMMARY_API_KEY` | Key for browse server (`EMPTY` if none) |
36
+ | `BROWSE_GENERATE_ENGINE` | Set to `vllm` for OpenAI-compatible servers |
37
+
38
+ If the FireRed adapter runs **inside** this container (default), you usually do **not** need to set `QWEN_EDIT_APP_URL` (the entrypoint sets it to `http://127.0.0.1:8765`).
39
+
40
+ See [`dotenv.example`](./dotenv.example) for a full template.
41
+
42
+ ## Hardware
43
+
44
+ - **Minimum practical:** 1× GPU for FireRed + Gradio, with **external** vLLM endpoints for GenSearcher and browse (set `START_VLLM_GENSEARCHER=0`, `START_VLLM_BROWSE=0` — defaults).
45
+ - **Full local (as in upstream scripts):** multiple GPUs — enable `START_VLLM_GENSEARCHER=1`, `START_VLLM_BROWSE=1`, and set `GENSEARCHER_CUDA_VISIBLE_DEVICES`, `BROWSE_CUDA_VISIBLE_DEVICES`, `FIRERED_CUDA_VISIBLE_DEVICES` to disjoint GPU indices.
46
+
47
+ ## Local build
48
+
49
+ ```bash
50
+ cd hf-space
51
+ docker build -t gensearcher-firered .
52
+ docker run --gpus all -p 7860:7860 --env-file .env.gen_image gensearcher-firered
53
+ ```
54
+
55
+ ## Deploy to your Hugging Face account
56
+
57
+ ```bash
58
+ hf auth login
59
+ hf repos create JSCPPProgrammer/gensearcher-firered --type space --sdk docker --private
60
+ # from hf-space/
61
+ hf upload JSCPPProgrammer/gensearcher-firered . .
62
+ ```
63
+
64
+ Then set Space GPU and secrets in the Hub UI.
65
+
66
+ ## References
67
+
68
+ - [Gen-Searcher](https://github.com/tulerfeng/Gen-Searcher)
69
+ - [GenSearcher/Gen-Searcher-8B](https://huggingface.co/GenSearcher/Gen-Searcher-8B)
70
+ - [FireRed-Image-Edit-1.1](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.1)
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hugging Face Space: official GenSearcher agent + FireRed-Image-Edit-1.1 generation.
3
+
4
+ Set PYTHONPATH to include vendor/rllm (see Dockerfile / README).
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import io
9
+ import json
10
+ import os
11
+ from pathlib import Path
12
+
13
+ import gradio as gr
14
+ from PIL import Image
15
+
16
+ from space_gen import run_sync
17
+
18
+
19
+ def _trajectory_to_markdown(trajectory: list) -> str:
20
+ if not trajectory:
21
+ return "_No messages_"
22
+ parts = []
23
+ for i, m in enumerate(trajectory):
24
+ role = m.get("role", "")
25
+ content = m.get("content", "")
26
+ if isinstance(content, list):
27
+ content = json.dumps(content, ensure_ascii=False)[:8000]
28
+ parts.append(f"### {i + 1}. {role}\n\n```\n{content}\n```\n")
29
+ return "\n".join(parts)
30
+
31
+
32
+ def run_pipeline(
33
+ prompt: str,
34
+ temperature: float,
35
+ top_p: float,
36
+ research_only: bool,
37
+ ):
38
+ prompt = (prompt or "").strip()
39
+ if not prompt:
40
+ return None, "Enter a non-empty prompt.", "", ""
41
+
42
+ try:
43
+ result = run_sync(
44
+ prompt,
45
+ temperature=float(temperature),
46
+ top_p=float(top_p),
47
+ skip_generation=bool(research_only),
48
+ )
49
+ except Exception as e:
50
+ import traceback
51
+
52
+ return None, f"**Error**\n\n```\n{e}\n{traceback.format_exc()}\n```", "", ""
53
+
54
+ traj_md = _trajectory_to_markdown(result.get("trajectory_messages") or [])
55
+ meta = {
56
+ "termination": result.get("termination"),
57
+ "gen_prompt": result.get("gen_prompt"),
58
+ "used_prompt": result.get("used_prompt"),
59
+ "reference_paths": result.get("reference_paths"),
60
+ "image_error": result.get("image_error"),
61
+ }
62
+ meta_txt = "```json\n" + json.dumps(meta, ensure_ascii=False, indent=2) + "\n```"
63
+
64
+ png = result.get("image_png")
65
+ if png:
66
+ img = Image.open(io.BytesIO(png)).convert("RGB")
67
+ return img, meta_txt, traj_md, result.get("gen_prompt") or ""
68
+
69
+ return None, meta_txt, traj_md, result.get("gen_prompt") or ""
70
+
71
+
72
+ with gr.Blocks(title="GenSearcher + FireRed") as demo:
73
+ gr.Markdown(
74
+ "## GenSearcher + FireRed-Image-Edit-1.1\n"
75
+ "Runs the **official** GenSearcher search/browse/image-search agent (vLLM), "
76
+ "then generates with **FireRed** via the same `/generate` API as the Qwen edit server.\n\n"
77
+ "**Required secrets:** `SERPER_KEY_ID`, `JINA_API_KEYS`, and vLLM endpoints for "
78
+ "`OPENAI_BASE_URL` + `BROWSE_SUMMARY_BASE_URL` (see README)."
79
+ )
80
+ with gr.Row():
81
+ prompt = gr.Textbox(
82
+ label="Image task / prompt",
83
+ lines=4,
84
+ placeholder="Describe the image you want, including any real-world facts to verify.",
85
+ )
86
+ with gr.Row():
87
+ temperature = gr.Slider(0.0, 1.5, value=0.6, label="Temperature")
88
+ top_p = gr.Slider(0.0, 1.0, value=0.9, label="Top-p")
89
+ research_only = gr.Checkbox(
90
+ label="Research only (no FireRed generation)",
91
+ value=False,
92
+ )
93
+ run_btn = gr.Button("Run", variant="primary")
94
+ with gr.Row():
95
+ out_image = gr.Image(label="Generated image", type="pil")
96
+ out_meta = gr.Markdown(label="Run metadata")
97
+ out_traj = gr.Markdown(label="Trajectory (sanitized)")
98
+ out_gen_prompt = gr.Textbox(label="gen_prompt (from agent)", lines=6)
99
+
100
+ run_btn.click(
101
+ fn=run_pipeline,
102
+ inputs=[prompt, temperature, top_p, research_only],
103
+ outputs=[out_image, out_meta, out_traj, out_gen_prompt],
104
+ )
105
+
106
+ if __name__ == "__main__":
107
+ demo.queue(default_concurrency_limit=1)
108
+ demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("GRADIO_SERVER_PORT", "7860")))
dotenv.example ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy to .env.gen_image in Space secrets or mount. See README.
2
+
3
+ # GenSearcher agent (OpenAI-compatible vLLM)
4
+ export OPENAI_API_KEY="EMPTY"
5
+ export OPENAI_BASE_URL="http://127.0.0.1:8002/v1"
6
+ export GEN_EVAL_MODEL="Gen-Searcher-8B"
7
+
8
+ # FireRed adapter (this Space sets automatically if START_FIRERED_API=1)
9
+ export QWEN_EDIT_APP_URL="http://127.0.0.1:8765"
10
+ export QWEN_EDIT_APP_PATH="/generate"
11
+
12
+ # Serper + Jina (required for official tools)
13
+ export SERPER_KEY_ID=""
14
+ export JINA_API_KEYS=""
15
+ export TEXT_SEARCH_API_BASE_URL="https://google.serper.dev/search"
16
+ export IMAGE_SEARCH_API_BASE_URL="https://google.serper.dev/images"
17
+ export IMAGE_SEARCH_SAVE_DIR="/tmp/cached_images"
18
+
19
+ # Browse summarization (vLLM OpenAI-compatible)
20
+ export BROWSE_GENERATE_ENGINE="vllm"
21
+ export BROWSE_SUMMARY_BASE_URL="http://127.0.0.1:8003/v1"
22
+ export BROWSE_SUMMARY_API_KEY="EMPTY"
23
+ export BROWSE_SUMMARY_MODEL="Qwen3-VL-30B-A3B-Instruct"
24
+
25
+ export MAX_LLM_CALL_PER_RUN=9
26
+ export GEN_MAX_NEW_TOKENS_PER_TURN=4096
27
+ export GEN_IMAGE_TIMEOUT=1800
28
+
29
+ # Optional: launch local vLLM inside the container (needs extra GPUs)
30
+ # export START_VLLM_GENSEARCHER=1
31
+ # export START_VLLM_BROWSE=1
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Install rllm from vendored tree first (see Dockerfile).
2
+ diffusers>=0.31.0
3
+ accelerate>=0.26.0
4
+ gradio>=4.44.0
5
+ tiktoken>=0.7.0
6
+ uvicorn[standard]>=0.30.0
scripts/entrypoint.sh ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+ cd /app
4
+
5
+ export PYTHONPATH="/app/vendor/rllm:${PYTHONPATH:-}"
6
+
7
+ # Optional: load Space secrets copied to this path
8
+ if [[ -f /app/.env.gen_image ]]; then
9
+ set -a
10
+ # shellcheck source=/dev/null
11
+ source /app/.env.gen_image
12
+ set +a
13
+ fi
14
+
15
+ wait_http() {
16
+ local url=$1
17
+ local name=$2
18
+ local max_attempts=${3:-90}
19
+ local i=0
20
+ echo "[entrypoint] Waiting for ${name} (${url})..."
21
+ until curl -sf "$url" >/dev/null 2>&1; do
22
+ i=$((i + 1))
23
+ if [[ $i -ge $max_attempts ]]; then
24
+ echo "[entrypoint] Timeout waiting for ${name}"
25
+ exit 1
26
+ fi
27
+ sleep 2
28
+ done
29
+ echo "[entrypoint] ${name} is up."
30
+ }
31
+
32
+ # Defaults: only FireRed + Gradio in-container. Point OPENAI_BASE_URL / BROWSE_SUMMARY_BASE_URL
33
+ # to your vLLM (or other OpenAI-compatible) endpoints via Space secrets.
34
+
35
+ # --- Optional local vLLM: GenSearcher-8B (OpenAI-compatible) ---
36
+ if [[ "${START_VLLM_GENSEARCHER:-0}" == "1" ]]; then
37
+ CUDA_VISIBLE_DEVICES="${GENSEARCHER_CUDA_VISIBLE_DEVICES:-0}" \
38
+ vllm serve "${GENSEARCHER_MODEL_ID:-GenSearcher/Gen-Searcher-8B}" \
39
+ --host 0.0.0.0 \
40
+ --port 8002 \
41
+ --tensor-parallel-size "${GENSEARCHER_TP:-1}" \
42
+ --gpu-memory-utilization "${VLLM_GPU_MEMORY_UTIL:-0.85}" \
43
+ --served-model-name "${GEN_EVAL_MODEL:-Gen-Searcher-8B}" \
44
+ --max-model-len "${GENSEARCHER_MAX_MODEL_LEN:-65536}" \
45
+ --no-enable-prefix-caching &
46
+ wait_http "http://127.0.0.1:8002/v1/models" "GenSearcher vLLM"
47
+ export OPENAI_BASE_URL="${OPENAI_BASE_URL:-http://127.0.0.1:8002/v1}"
48
+ fi
49
+
50
+ # --- Optional local vLLM: browse summarization (Qwen3-VL) ---
51
+ if [[ "${START_VLLM_BROWSE:-0}" == "1" ]]; then
52
+ export BROWSE_GENERATE_ENGINE=vllm
53
+ CUDA_VISIBLE_DEVICES="${BROWSE_CUDA_VISIBLE_DEVICES:-1}" \
54
+ vllm serve "${BROWSE_MODEL_ID:-Qwen/Qwen3-VL-30B-A3B-Instruct}" \
55
+ --host 0.0.0.0 \
56
+ --port 8003 \
57
+ --tensor-parallel-size "${BROWSE_TP:-1}" \
58
+ --gpu-memory-utilization "${VLLM_GPU_MEMORY_UTIL:-0.85}" \
59
+ --served-model-name "${BROWSE_SUMMARY_MODEL:-Qwen3-VL-30B-A3B-Instruct}" \
60
+ --max-model-len "${BROWSE_MAX_MODEL_LEN:-65536}" \
61
+ --mm-processor-cache-gb 0 \
62
+ --no-enable-prefix-caching &
63
+ wait_http "http://127.0.0.1:8003/v1/models" "Browse-summary vLLM"
64
+ export BROWSE_SUMMARY_BASE_URL="${BROWSE_SUMMARY_BASE_URL:-http://127.0.0.1:8003/v1}"
65
+ fi
66
+
67
+ # --- FireRed adapter (GenSearcher /generate contract) ---
68
+ if [[ "${START_FIRERED_API:-1}" == "1" ]]; then
69
+ CUDA_VISIBLE_DEVICES="${FIRERED_CUDA_VISIBLE_DEVICES:-0}" \
70
+ python -m uvicorn services.firered_generate:app --host 0.0.0.0 --port 8765 &
71
+ wait_http "http://127.0.0.1:8765/health" "FireRed API" 120
72
+ export QWEN_EDIT_APP_URL="${QWEN_EDIT_APP_URL:-http://127.0.0.1:8765}"
73
+ else
74
+ echo "[entrypoint] START_FIRERED_API=0 — use external QWEN_EDIT_APP_URL for generation."
75
+ fi
76
+
77
+ exec python app.py
scripts/verify_env.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Print which GenSearcher Space env vars are set (never print secret values)."""
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ CHECKS = [
8
+ ("SERPER_KEY_ID", True),
9
+ ("JINA_API_KEYS", True),
10
+ ("OPENAI_BASE_URL", True),
11
+ ("GEN_EVAL_MODEL", False),
12
+ ("OPENAI_API_KEY", False),
13
+ ("BROWSE_SUMMARY_BASE_URL", True),
14
+ ("BROWSE_SUMMARY_MODEL", False),
15
+ ("BROWSE_SUMMARY_API_KEY", False),
16
+ ("BROWSE_GENERATE_ENGINE", False),
17
+ ("QWEN_EDIT_APP_URL", False),
18
+ ("QWEN_EDIT_APP_PATH", False),
19
+ ]
20
+
21
+
22
+ def main() -> None:
23
+ missing_required = []
24
+ for name, required in CHECKS:
25
+ val = os.environ.get(name, "").strip()
26
+ ok = bool(val)
27
+ status = "OK" if ok else ("MISSING" if required else "optional empty")
28
+ print(f"{name}: {status}")
29
+ if required and not ok:
30
+ missing_required.append(name)
31
+ if missing_required:
32
+ print("\nSet required variables (see README / dotenv.example):", ", ".join(missing_required))
33
+ raise SystemExit(1)
34
+ print("\nRequired variables present.")
35
+
36
+
37
+ if __name__ == "__main__":
38
+ main()
services/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # FireRed GenSearcher adapter service package.
services/firered_generate.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FireRed-Image-Edit HTTP service matching GenSearcher Qwen /generate contract.
3
+
4
+ Request/response aligned with qwen_image_api_server and gen_image_deepresearch_reward.call_qwen_edit_to_generate_image.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import argparse
9
+ import base64
10
+ import io
11
+ import os
12
+ import re
13
+ from typing import List, Optional
14
+
15
+ from fastapi import FastAPI, HTTPException
16
+ from pydantic import BaseModel
17
+ from PIL import Image
18
+
19
+ app = FastAPI(title="FireRed-Image-Edit GenSearcher adapter")
20
+
21
+ _pipe = None
22
+
23
+
24
+ def _load_image_from_url_or_data(url_or_data: str) -> Image.Image:
25
+ if url_or_data.startswith("data:image/"):
26
+ m = re.match(r"data:image/[^;]+;base64,(.*)", url_or_data, re.DOTALL)
27
+ if not m:
28
+ raise ValueError("Invalid data URL")
29
+ raw = base64.b64decode(m.group(1))
30
+ return Image.open(io.BytesIO(raw)).convert("RGB")
31
+ raise ValueError("Only data:image/...;base64,... URLs are supported in Space adapter")
32
+
33
+
34
+ class GenerateRequest(BaseModel):
35
+ image_urls: Optional[List[str]] = None
36
+ prompt: str
37
+ seed: int = 0
38
+ true_cfg_scale: float = 4.0
39
+ negative_prompt: str = " "
40
+ num_inference_steps: int = 40
41
+ guidance_scale: float = 1.0
42
+ num_images_per_prompt: int = 1
43
+
44
+
45
+ def get_pipeline():
46
+ global _pipe
47
+ if _pipe is None:
48
+ import torch
49
+ from diffusers import QwenImageEditPlusPipeline
50
+
51
+ model_path = os.environ.get(
52
+ "FIRERED_MODEL_ID", "FireRedTeam/FireRed-Image-Edit-1.1"
53
+ )
54
+ dtype = torch.bfloat16
55
+ _pipe = QwenImageEditPlusPipeline.from_pretrained(
56
+ model_path,
57
+ torch_dtype=dtype,
58
+ )
59
+ _pipe.to("cuda")
60
+ _pipe.set_progress_bar_config(disable=True)
61
+ return _pipe
62
+
63
+
64
+ @app.get("/health")
65
+ def health():
66
+ return {"status": "ok", "model_loaded": _pipe is not None}
67
+
68
+
69
+ @app.post("/generate")
70
+ def generate(request: GenerateRequest):
71
+ try:
72
+ pipe = get_pipeline()
73
+ except Exception as e:
74
+ raise HTTPException(status_code=503, detail=f"Model not ready: {e}")
75
+
76
+ import torch
77
+
78
+ images: List[Image.Image] = []
79
+ if request.image_urls:
80
+ for u in request.image_urls[:3]:
81
+ if u:
82
+ try:
83
+ images.append(_load_image_from_url_or_data(u))
84
+ except Exception as ex:
85
+ raise HTTPException(
86
+ status_code=400, detail=f"Bad image_urls entry: {ex}"
87
+ )
88
+
89
+ gen = torch.Generator(device="cuda").manual_seed(int(request.seed))
90
+
91
+ try:
92
+ with torch.inference_mode():
93
+ if not images:
94
+ # Text-only: FireRed is edit-focused; synthesize a neutral canvas for conditioning-free edit
95
+ blank = Image.new("RGB", (1024, 1024), (240, 240, 240))
96
+ out = pipe(
97
+ image=[blank],
98
+ prompt=request.prompt,
99
+ generator=gen,
100
+ true_cfg_scale=float(request.true_cfg_scale),
101
+ negative_prompt=request.negative_prompt or " ",
102
+ num_inference_steps=int(request.num_inference_steps),
103
+ guidance_scale=float(request.guidance_scale),
104
+ num_images_per_prompt=int(request.num_images_per_prompt),
105
+ )
106
+ else:
107
+ out = pipe(
108
+ image=images,
109
+ prompt=request.prompt,
110
+ generator=gen,
111
+ true_cfg_scale=float(request.true_cfg_scale),
112
+ negative_prompt=request.negative_prompt or " ",
113
+ num_inference_steps=int(request.num_inference_steps),
114
+ guidance_scale=float(request.guidance_scale),
115
+ num_images_per_prompt=int(request.num_images_per_prompt),
116
+ )
117
+ pil = out.images[0]
118
+ except Exception as e:
119
+ import traceback
120
+
121
+ return {
122
+ "success": False,
123
+ "message": f"{e}\n{traceback.format_exc()}",
124
+ }
125
+
126
+ buf = io.BytesIO()
127
+ pil.save(buf, format="PNG")
128
+ b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
129
+ return {"success": True, "image": b64}
130
+
131
+
132
+ def main():
133
+ parser = argparse.ArgumentParser()
134
+ parser.add_argument("--host", default="0.0.0.0")
135
+ parser.add_argument("--port", type=int, default=8765)
136
+ args = parser.parse_args()
137
+ import uvicorn
138
+
139
+ uvicorn.run(app, host=args.host, port=args.port)
140
+
141
+
142
+ if __name__ == "__main__":
143
+ main()
space_gen.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Run one official GenSearcher trajectory (OpenAI-compatible vLLM) then call FireRed /generate adapter.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import asyncio
7
+ import base64
8
+ import json
9
+ import os
10
+ import uuid
11
+ from typing import Any, Dict, List, Optional, Tuple
12
+
13
+ import requests
14
+
15
+ from rllm.engine.agent_workflow_engine import AgentWorkflowEngine
16
+ from rllm.engine.rollout import OpenAIEngine
17
+ from vision_deepresearch_async_workflow.gen_image_deepresearch_tools_executor import (
18
+ create_gen_image_tools,
19
+ )
20
+ from vision_deepresearch_async_workflow.gen_image_deepresearch_workflow import (
21
+ GenImageDeepResearchWorkflow,
22
+ )
23
+
24
+
25
+ def _sanitize_content(msg: dict) -> dict:
26
+ out = {"role": msg.get("role", ""), "content": ""}
27
+ content = msg.get("content", "")
28
+ if isinstance(content, str):
29
+ out["content"] = content[:50000] + ("..." if len(content) > 50000 else "")
30
+ else:
31
+ out["content"] = str(content)[:50000]
32
+ if "images" in msg:
33
+ out["images"] = [
34
+ (p if isinstance(p, str) else p.get("image", ""))[:200]
35
+ for p in (msg["images"] or [])[:10]
36
+ ]
37
+ return out
38
+
39
+
40
+ def get_effective_prompt_and_images(
41
+ user_prompt: str, prediction: dict
42
+ ) -> Tuple[str, List[str]]:
43
+ """Match eval/gen_image_from_results.get_effective_prompt_and_images."""
44
+ gen_prompt = (prediction.get("gen_prompt") or "").strip()
45
+ paths: List[str] = []
46
+ for r in prediction.get("reference_images") or []:
47
+ if not isinstance(r, dict):
48
+ continue
49
+ p = (r.get("local_path") or "").strip()
50
+ if p and os.path.exists(p):
51
+ paths.append(p)
52
+ if gen_prompt and paths:
53
+ return gen_prompt, paths
54
+ return user_prompt, []
55
+
56
+
57
+ def _parse_qwen_edit_base_url() -> str:
58
+ raw = os.environ.get("QWEN_EDIT_APP_URL", "http://127.0.0.1:8765").strip()
59
+ try:
60
+ urls = json.loads(raw)
61
+ if isinstance(urls, list) and urls:
62
+ return str(urls[0]).rstrip("/")
63
+ except json.JSONDecodeError:
64
+ pass
65
+ return raw.rstrip("/").strip('"').strip("'")
66
+
67
+
68
+ def call_generate_api(
69
+ base_url: str,
70
+ path: str,
71
+ prompt: str,
72
+ image_paths: List[str],
73
+ timeout: int = 1800,
74
+ ) -> bytes:
75
+ if not path.startswith("/"):
76
+ path = "/" + path
77
+ url = base_url.rstrip("/") + path
78
+ ref_images_b64: List[str] = []
79
+ for img_path in image_paths[:3]:
80
+ with open(img_path, "rb") as f:
81
+ ref_images_b64.append(base64.b64encode(f.read()).decode("utf-8"))
82
+ image_urls = (
83
+ [f"data:image/jpeg;base64,{b}" for b in ref_images_b64]
84
+ if ref_images_b64
85
+ else None
86
+ )
87
+ payload = {
88
+ "image_urls": image_urls,
89
+ "prompt": prompt,
90
+ "seed": int(os.environ.get("GEN_SEED", "0")),
91
+ "true_cfg_scale": float(os.environ.get("GEN_TRUE_CFG_SCALE", "4.0")),
92
+ "negative_prompt": os.environ.get("GEN_NEGATIVE_PROMPT", " "),
93
+ "num_inference_steps": int(os.environ.get("GEN_NUM_INFERENCE_STEPS", "40")),
94
+ "guidance_scale": float(os.environ.get("GEN_GUIDANCE_SCALE", "1.0")),
95
+ "num_images_per_prompt": 1,
96
+ }
97
+ r = requests.post(url, json=payload, timeout=timeout)
98
+ r.raise_for_status()
99
+ result = r.json()
100
+ if not result.get("success"):
101
+ raise RuntimeError(result.get("message", str(result)))
102
+ img_b64 = result.get("image") or ""
103
+ if img_b64.startswith("data:image"):
104
+ img_b64 = img_b64.split(",", 1)[-1]
105
+ return base64.b64decode(img_b64)
106
+
107
+
108
+ async def run_gensearcher_then_generate(
109
+ user_prompt: str,
110
+ *,
111
+ temperature: float = 0.6,
112
+ top_p: float = 0.9,
113
+ skip_generation: bool = False,
114
+ ) -> Dict[str, Any]:
115
+ sample_id = str(uuid.uuid4())[:8]
116
+ task = {
117
+ "id": sample_id,
118
+ "question": user_prompt,
119
+ "prompt": user_prompt,
120
+ "meta": {"source": "hf_space"},
121
+ }
122
+
123
+ model = os.environ.get("GEN_EVAL_MODEL", "Gen-Searcher-8B")
124
+ base_url = os.environ.get("OPENAI_BASE_URL", "http://127.0.0.1:8002/v1").rstrip("/")
125
+ if not base_url.endswith("/v1"):
126
+ base_url = base_url + "/v1"
127
+ api_key = os.environ.get("OPENAI_API_KEY", "EMPTY")
128
+
129
+ rollout_engine = OpenAIEngine(
130
+ model=model,
131
+ base_url=base_url,
132
+ api_key=api_key,
133
+ max_prompt_length=int(os.environ.get("MAX_PROMPT_LENGTH", "64000")),
134
+ max_response_length=int(os.environ.get("MAX_RESPONSE_LENGTH", "64000")),
135
+ sampling_params={
136
+ "temperature": temperature,
137
+ "top_p": top_p,
138
+ },
139
+ )
140
+ tools = create_gen_image_tools()
141
+ workflow_engine = AgentWorkflowEngine(
142
+ workflow_cls=GenImageDeepResearchWorkflow,
143
+ workflow_args={"tools": tools, "reward_function": None},
144
+ rollout_engine=rollout_engine,
145
+ n_parallel_tasks=1,
146
+ retry_limit=2,
147
+ )
148
+ await workflow_engine.initialize_pool()
149
+ try:
150
+ _, _, episode = await workflow_engine.process_task_with_retry(
151
+ task, sample_id, 0
152
+ )
153
+ finally:
154
+ workflow_engine.shutdown()
155
+
156
+ info = episode.info or {}
157
+ messages = info.get("messages", [])
158
+ prediction = info.get("prediction", {}) if isinstance(info.get("prediction"), dict) else {}
159
+ termination = info.get("termination") or (
160
+ episode.termination_reason.value
161
+ if getattr(episode, "termination_reason", None)
162
+ else "unknown"
163
+ )
164
+ trajectory = [_sanitize_content(m) for m in messages]
165
+
166
+ out: Dict[str, Any] = {
167
+ "termination": termination,
168
+ "trajectory_messages": trajectory,
169
+ "gen_prompt": prediction.get("gen_prompt", ""),
170
+ "prediction": prediction,
171
+ }
172
+
173
+ if skip_generation:
174
+ out["image_png"] = None
175
+ out["image_error"] = None
176
+ return out
177
+
178
+ if termination != "answer":
179
+ out["image_png"] = None
180
+ out["image_error"] = f"Agent did not finish with answer (termination={termination})"
181
+ return out
182
+
183
+ eff_prompt, img_paths = get_effective_prompt_and_images(user_prompt, prediction)
184
+ gen_base = _parse_qwen_edit_base_url()
185
+ gen_path = os.environ.get("QWEN_EDIT_APP_PATH", "/generate")
186
+ timeout = int(os.environ.get("GEN_IMAGE_TIMEOUT", "1800"))
187
+
188
+ try:
189
+ png_bytes = call_generate_api(gen_base, gen_path, eff_prompt, img_paths, timeout=timeout)
190
+ out["image_png"] = png_bytes
191
+ out["used_prompt"] = eff_prompt
192
+ out["reference_paths"] = img_paths
193
+ out["image_error"] = None
194
+ except Exception as e:
195
+ out["image_png"] = None
196
+ out["image_error"] = str(e)
197
+ return out
198
+
199
+
200
+ def run_sync(
201
+ user_prompt: str,
202
+ *,
203
+ temperature: float = 0.6,
204
+ top_p: float = 0.9,
205
+ skip_generation: bool = False,
206
+ ) -> Dict[str, Any]:
207
+ return asyncio.run(
208
+ run_gensearcher_then_generate(
209
+ user_prompt,
210
+ temperature=temperature,
211
+ top_p=top_p,
212
+ skip_generation=skip_generation,
213
+ )
214
+ )
tests/test_imports.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Smoke test: package layout and syntax (no GPU required)."""
2
+
3
+
4
+ def test_space_gen_importable():
5
+ import importlib.util
6
+ from pathlib import Path
7
+
8
+ root = Path(__file__).resolve().parents[1]
9
+ sg = root / "space_gen.py"
10
+ assert sg.exists()
11
+ spec = importlib.util.spec_from_file_location("space_gen", sg)
12
+ assert spec and spec.loader
13
+ # Do not execute full import (pulls torch/rllm); file must parse
14
+ src = sg.read_text(encoding="utf-8")
15
+ compile(src, str(sg), "exec")
16
+
17
+
18
+ def test_firered_service_parse():
19
+ from pathlib import Path
20
+
21
+ root = Path(__file__).resolve().parents[1]
22
+ fp = root / "services" / "firered_generate.py"
23
+ compile(fp.read_text(encoding="utf-8"), str(fp), "exec")
vendor/rllm/.env.gen_image ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Gen Image environment variables
2
+ # Usage: source .env.gen_image
3
+
4
+ # ===== Image generation service for train/eval: qwen_image | nano =====
5
+ # qwen_image: Qwen Edit HTTP service (see below); nano: Nano/Gemini image generation API (aligned with gen_image_from_results nano)
6
+ export GEN_IMAGE_SERVICE="qwen_image"
7
+
8
+ # ===== Qwen Edit image generation service endpoints (used when GEN_IMAGE_SERVICE=qwen_image) =====
9
+ # vLLM deployed service host/IP placeholder (IPs removed for open-source).
10
+ # Use multiple urls for acceleration
11
+ export QWEN_EDIT_APP_URL='["http://xxxx:8001", "http://xxxx:8001"]'
12
+ export QWEN_EDIT_APP_PATH="/generate"
13
+
14
+ # ===== Nano image generation API (used when GEN_IMAGE_SERVICE=nano; aligned with eval gen_image_from_results nano) =====
15
+ # export GEN_IMAGE_NANO_API_KEY=""
16
+ # export GEN_IMAGE_NANO_MODEL="gemini-3-pro-image-preview"
17
+ # # Total timeout (seconds): one Nano call (including retries) must not exceed this duration
18
+ # export GEN_IMAGE_NANO_TIMEOUT=1200
19
+ # # Max retries: retry on submit/poll failure up to this count
20
+ # export GEN_IMAGE_NANO_MAX_TRY=100
21
+ # # Max poll time per attempt (seconds): after task_id, poll imageGenerateQuery until status=1 or timeout
22
+ # export GEN_IMAGE_NANO_MAX_POLL=300
23
+
24
+ # ===== Reward scoring aligned with worldgen eval (GPT-4.1 + same prompt/overall formula) =====
25
+ # OpenAI API key for gpt-4.1 scoring
26
+ export GEN_REWARD_API_KEY=""
27
+ export GEN_REWARD_API_BASE_URL="https://api.openai.com/v1"
28
+ export GEN_REWARD_MODEL="gpt-4.1"
29
+ # Text reward coefficient in [0,1].
30
+ # Final reward = (1-GEN_REWARD_TEXT_COEF)*image_reward + GEN_REWARD_TEXT_COEF*text_reward
31
+ # 0 means image reward only (no extra text scoring call)
32
+ export GEN_REWARD_TEXT_COEF=0.5
33
+
34
+ # ===== Max LLM calls per agent run (shared by train/eval) =====
35
+ export MAX_LLM_CALL_PER_RUN=9
36
+
37
+ # ===== Image generation settings =====
38
+ export GEN_IMAGE_OUTPUT_DIR="./output_images"
39
+ export GEN_IMAGE_TIMEOUT=1800
40
+ export GEN_MIN_INPUT_IMAGES=1
41
+ export GEN_MAX_INPUT_IMAGES=4
42
+ export GEN_API_CONCURRENCY=32
43
+ # Per-turn generation token cap (rollout only; training still uses data.max_response_length)
44
+ export GEN_MAX_NEW_TOKENS_PER_TURN=4096
45
+ # export QWEN_VL_MAX_PIXELS=262144
46
+ export QWEN_VL_MAX_PIXELS=160000
47
+
48
+ # ===== Web tools (text + image search; default Serper endpoints) =====
49
+ export TEXT_SEARCH_API_BASE_URL="https://google.serper.dev/search"
50
+ export IMAGE_SEARCH_API_BASE_URL="https://google.serper.dev/images"
51
+ # API key sent as X-API-KEY (Serper: get key from serper.dev)
52
+ export SERPER_KEY_ID=""
53
+
54
+ # ===== Jina API (for web browsing) =====
55
+ export JINA_API_KEYS=""
56
+
57
+ # ===== Local cache directory for image search (IMAGE_SEARCH_SAVE_DIR) =====
58
+ export IMAGE_SEARCH_SAVE_DIR="./cached_images"
59
+
60
+ # ===== Browse summary model (Qwen3 via vLLM; shared by train/eval) =====
61
+ # vLLM deployed service host/IP (IPs removed for open-source)
62
+ export BROWSE_SUMMARY_BASE_URL="http://xxx:8001/v1"
63
+ export BROWSE_SUMMARY_MODEL="Qwen3-VL-30B-A3B-Instruct"
64
+ export BROWSE_GENERATE_ENGINE="vllm"
65
+
66
+ # ===== Other optional settings =====
67
+ export BROWSE_RANDOM_SLEEP="0"
68
+
vendor/rllm/.github/workflows/pre-commit.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Pre-commit
2
+
3
+ on:
4
+ pull_request:
5
+ push:
6
+ branches:
7
+ - main
8
+ - v0.*
9
+
10
+ jobs:
11
+ pre-commit:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v4
18
+ with:
19
+ python-version: '3.11'
20
+
21
+ - name: Cache pre-commit
22
+ uses: actions/cache@v3
23
+ with:
24
+ path: ~/.cache/pre-commit
25
+ key: pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
26
+ restore-keys: |
27
+ pre-commit-
28
+
29
+ - name: Install dependencies
30
+ run: |
31
+ python -m pip install --upgrade pip
32
+ pip install pre-commit
33
+
34
+ - name: Run pre-commit
35
+ run: pre-commit run --all-files
vendor/rllm/.gitignore ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ *.whl
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # UV
99
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ #uv.lock
103
+
104
+ # poetry
105
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
106
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
107
+ # commonly ignored for libraries.
108
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
109
+ #poetry.lock
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ #pdm.lock
114
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
115
+ # in version control.
116
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
117
+ .pdm.toml
118
+ .pdm-python
119
+ .pdm-build/
120
+
121
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
122
+ __pypackages__/
123
+
124
+ # Celery stuff
125
+ celerybeat-schedule
126
+ celerybeat.pid
127
+
128
+ # SageMath parsed files
129
+ *.sage.py
130
+
131
+ # Environments
132
+ .env
133
+ .venv
134
+ env/
135
+ venv/
136
+ ENV/
137
+ env.bak/
138
+ venv.bak/
139
+
140
+ # Spyder project settings
141
+ .spyderproject
142
+ .spyproject
143
+
144
+ # Rope project settings
145
+ .ropeproject
146
+
147
+ # mkdocs documentation
148
+ /site
149
+
150
+ # mypy
151
+ .mypy_cache/
152
+ .dmypy.json
153
+ dmypy.json
154
+
155
+ # Pyre type checker
156
+ .pyre/
157
+
158
+ # pytype static type analyzer
159
+ .pytype/
160
+
161
+ # Cython debug symbols
162
+ cython_debug/
163
+
164
+ # PyCharm
165
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
166
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
167
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
168
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
169
+ #.idea/
170
+
171
+ # PyPI configuration file
172
+ .pypirc
173
+
174
+ # DS_Store
175
+ .DS_Store
176
+
177
+ # Ignore parquet files.
178
+ data/*
179
+
180
+ # Ignore verl script outputs.
181
+ **/outputs/
182
+ **/wandb/
183
+ **/checkpoints/
184
+ tmp/
185
+
186
+ # Ignore debug logs and run result logs
187
+ logs/
188
+ rllm/*.json
189
+
190
+ # Ignore the big datasets
191
+ rllm/data/test/
192
+ rllm/data/train/
193
+
194
+ rllm/data/datasets/
195
+ rllm/registry/
196
+
197
+ # Coding assistant local rules ignore
198
+ .cursor/rules/*
199
+ CLAUDE.md
200
+
201
+ # Strands outputs ignore
202
+ examples/strands_outputs/*
203
+ strands_outputs/*
204
+ examples/strands/strands_outputs/*
205
+
206
+ # Deepresearch outputs ignore
207
+ examples/deepresearch/deepresearch_outputs/*
208
+ deepresearch_outputs/*
209
+ examples/deepresearch/hle_outputs/*
210
+ */hle_outputs/*
211
+ examples/deepresearch/HLE_OUTPUT_EVOLUTION.md
212
+
213
+ # Until we have a good way to handle cuda-version specific pkgs, we ignore uv.lock
214
+ uv.lock
vendor/rllm/.pre-commit-config.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ rev: "v0.11.4"
4
+ hooks:
5
+ - id: ruff
6
+ args: ["--fix", "--show-fixes", "--output-format=full"]
7
+ exclude: ^.*\.(ipynb)$|^verl/.*$
8
+ - id: ruff-format
9
+ exclude: ^verl/.*$
vendor/rllm/.readthedocs.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Read the Docs configuration file for rLLM
2
+ # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3
+
4
+ version: 2
5
+
6
+ # Set the OS, Python version and other tools you might need
7
+ build:
8
+ os: ubuntu-22.04
9
+ tools:
10
+ python: "3.10"
11
+ jobs:
12
+ post_create_environment:
13
+ # Install poetry
14
+ - pip install --upgrade pip
15
+ post_install:
16
+ # Install any additional system dependencies if needed
17
+ - echo "Build environment ready"
18
+
19
+ # Build documentation in the "docs/" directory with MkDocs
20
+ mkdocs:
21
+ configuration: mkdocs.yml
22
+ fail_on_warning: false
23
+
24
+ # Python configuration
25
+ python:
26
+ install:
27
+ - requirements: docs/requirements.txt
vendor/rllm/Dockerfile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM verlai/verl:vllm011.latest
2
+
3
+ WORKDIR /workspace
4
+
5
+ RUN git clone https://github.com/volcengine/verl.git
6
+ RUN cd verl && \
7
+ git checkout v0.6.1 && \
8
+ pip install -e .
9
+
10
+ # 2) Install rllm (editable)
11
+ RUN git clone https://github.com/rllm-org/rllm.git
12
+ RUN cd rllm && \
13
+ pip install -e .
14
+
15
+ # 3) Install playwright
16
+ RUN pip install playwright && \
17
+ playwright install chromium && \
18
+ playwright install-deps
19
+
20
+ CMD ["/bin/bash"]
21
+
22
+ # Docker Usage
23
+ # docker build -t rllm .
24
+ # docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/rllm -v /tmp:/tmp --name rllm-container rllm sleep infinity
25
+ # docker start rllm-container
26
+ # docker exec -it rllm-container bash
vendor/rllm/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
vendor/rllm/README.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ # rLLM
4
+
5
+ <div>
6
+ 🚀 Reinforcement Learning for Language Agents🌟
7
+ </div>
8
+ </div>
9
+ <div>
10
+ <br>
11
+
12
+ <div align="center">
13
+
14
+ [![Documentation](https://img.shields.io/badge/Documentation-blue?style=for-the-badge&logo=googledocs&logoColor=white)](https://rllm-project.readthedocs.io/en/latest)
15
+ [![Discord](https://img.shields.io/badge/Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/BDH46HT9en)
16
+ [![Website](https://img.shields.io/badge/Site-%233f72af.svg?style=for-the-badge&logo=semanticweb&logoColor=white)](https://rllm-project.com)
17
+ [![Blogs](https://img.shields.io/badge/Blogs-007AFF?style=for-the-badge)](https://rllm-project.com/blog)
18
+ [![X](https://img.shields.io/badge/-black?logo=X&style=for-the-badge)](https://x.com/rllm_project)
19
+
20
+ </div>
21
+
22
+ </div>
23
+
24
+ rLLM is an open-source framework for post-training language agents via reinforcement learning. With rLLM, you can easily build your custom agents and environments, train them with reinforcement learning, and deploy them for real-world workloads.
25
+
26
+ ## Releases 📰
27
+
28
+ <strong>[2025/10/16]</strong> rLLM [v0.2](https://github.com/rllm-org/rllm/tree/v0.2) is now officially released! We introduce `AgentWorkflowEngine` for training over arbitrary agentic programs. It also comes integrated with the official `verl-0.5.0`, featuring support for Megatron training. Check out this [blog post](https://rllm-project.com/post.html?post=rllm_v0.2.md) for more.
29
+
30
+ <strong>[2025/07/01]</strong> We release [`DeepSWE-Preview`](https://pretty-radio-b75.notion.site/DeepSWE-Training-a-Fully-Open-sourced-State-of-the-Art[…]-by-Scaling-RL-22281902c1468193aabbe9a8c59bbe33?pvs=73), a 32B software engineering agent (SWE) trained with purely RL that achieves 59% on SWEBench-Verified with test-time scaling,(42.2% Pass@1), topping the SWEBench leaderboard for open-weight models.
31
+
32
+ <strong>[2025/04/08]</strong> We release [`DeepCoder-14B-Preview`](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51), a 14B coding model that achieves an impressive **60.6%** Pass@1 accuracy on LiveCodeBench (+8% improvement), matching the performance of `o3-mini-2025-01-031 (Low)` and `o1-2024-12-17`.
33
+
34
+ <strong>[2025/02/10]</strong> We release [`DeepScaleR-1.5B-Preview`](https://pretty-radio-b75.notion.site/DeepScaleR-Surpassing-O1-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2), a 1.5B model that surpasses O1-Preview and achieves <strong>43.1% Pass@1</strong> on AIME. We achieve this by iteratively scaling Deepseek's GRPO algorithm from 8K→16K->24K context length for thinking.
35
+
36
+ ## Getting Started 🎯
37
+
38
+ ### Step 1: Building rLLM
39
+
40
+ rLLM requires Python >= 3.11. You can install it either directly via pip or build from source.
41
+
42
+ **Option A: Direct Installation**
43
+
44
+ ```bash
45
+ uv pip install "git+https://github.com/rllm-org/rllm.git"
46
+ ```
47
+
48
+ **Option B: Building from Source**
49
+
50
+ ```bash
51
+ # Clone the repository
52
+ git clone https://github.com/rllm-org/rllm.git
53
+ cd rllm
54
+
55
+ # Create a conda environment
56
+ conda create -n rllm python=3.11 -y
57
+ conda activate rllm
58
+
59
+ # Build rLLM from source
60
+ uv pip install -e .
61
+ ```
62
+
63
+ ### Step 2: Installing Training Backend
64
+
65
+ rLLM supports two training backends: `verl` and `Tinker`. Choose one based on your needs.
66
+
67
+ **Option A: Using `verl` as Training Backend**
68
+
69
+ ```bash
70
+ # Initialize and update the verl submodule
71
+ git submodule update --init --recursive
72
+
73
+ # Install verl
74
+ bash scripts/install_verl.sh
75
+ ```
76
+
77
+ **Option B: Using `Tinker` as Training Backend**
78
+
79
+ No additional installation needed. `tinker` and `tinker-cookbook` are pre-installed when you install rLLM.
80
+
81
+ > **Note:** The direct pip installation (Option A in Step 1) comes with Tinker as the default training backend. If you want to use `verl`, you'll need to build from source (Option B in Step 1) and then install `verl` (Option A in Step 2).
82
+
83
+ ### Installation with Docker 🐳
84
+
85
+ For a containerized setup, you can use Docker:
86
+
87
+ ```bash
88
+ # Build the Docker image
89
+ docker build -t rllm .
90
+
91
+ # Create and start the container
92
+ docker create --runtime=nvidia --gpus all --net=host --shm-size="10g" --cap-add=SYS_ADMIN -v .:/workspace/rllm -v /tmp:/tmp --name rllm-container rllm sleep infinity
93
+ docker start rllm-container
94
+
95
+ # Enter the container
96
+ docker exec -it rllm-container bash
97
+ ```
98
+
99
+ ## Awesome Projects using rLLM 🔥
100
+
101
+ * [DeepScaleR](https://pretty-radio-b75.notion.site/DeepScaleR-Surpassing-O1-Preview-with-a-1-5B-Model-by-Scaling-RL-19681902c1468005bed8ca303013a4e2): Surpassing O1-Preview with a 1.5B Model by Scaling RL
102
+ * [DeepCoder](https://pretty-radio-b75.notion.site/DeepCoder-A-Fully-Open-Source-14B-Coder-at-O3-mini-Level-1cf81902c14680b3bee5eb349a512a51): A Fully Open-Source 14B Coder at O3-mini Level
103
+ * [DeepSWE](https://pretty-radio-b75.notion.site/DeepSWE-Training-a-Fully-Open-sourced-State-of-the-Art[%E2%80%A6]-by-Scaling-RL-22281902c1468193aabbe9a8c59bbe33): Training a Fully Open-sourced, State-of-the-Art Coding Agent by Scaling RL
104
+ * [Tongyi DeepResearch](https://github.com/Alibaba-NLP/DeepResearch): A New Era of Open-Source AI Researchers [![GitHub Repo stars](https://img.shields.io/github/stars/Alibaba-NLP/DeepResearch)](https://github.com/Alibaba-NLP/DeepResearch)
105
+ * [Terminal-Bench-RL](https://github.com/Danau5tin/terminal-bench-rl): Training Long-Horizon Terminal Agents with Reinforcement Learning [![GitHub Repo stars](https://img.shields.io/github/stars/Danau5tin/terminal-bench-rl)](https://github.com/Danau5tin/terminal-bench-rl)
106
+ * [Cogito, Ergo Ludo](https://www.arxiv.org/abs/2509.25052): An Agent that Learns to Play by Reasoning and Planning
107
+ * [PettingLLMs](https://pettingllms-ai.github.io/): Using On-Policy Reinforcement Learning for Stronger Multi-Agent System [![GitHub Repo stars](https://img.shields.io/github/stars/pettingllms-ai/PettingLLMs)](https://github.com/pettingllms-ai/PettingLLMs)
108
+
109
+
110
+
111
+ ## Acknowledgements
112
+ Our work is done as part of [Berkeley Sky Computing Lab](https://sky.cs.berkeley.edu/). The rLLM team is generously supported by grants from [Laude Institute](https://www.laude.org/), [AWS](https://aws.amazon.com/), [Hyperbolic](https://www.hyperbolic.ai/), [Fireworks AI](https://fireworks.ai/), and [Modal](https://modal.com/). We pay special thanks to [Together AI](https://www.together.ai/) for the research partnership and compute support.
113
+
114
+ ## Citation
115
+ ```bibtex
116
+ @misc{rllm2025,
117
+ title={rLLM: A Framework for Post-Training Language Agents},
118
+ author={Sijun Tan and Michael Luo and Colin Cai and Tarun Venkat and Kyle Montgomery and Aaron Hao and Tianhao Wu and Arnav Balyan and Manan Roongta and Chenguang Wang and Li Erran Li and Raluca Ada Popa and Ion Stoica},
119
+ year={2025},
120
+ howpublished={\url{https://pretty-radio-b75.notion.site/rLLM-A-Framework-for-Post-Training-Language-Agents-21b81902c146819db63cd98a54ba5f31}},
121
+ note={Notion Blog}
122
+ year={2025}
123
+ }
124
+ ```
125
+
126
+ You may also cite our prior work [DeepScaleR](https://scholar.googleusercontent.com/scholar.bib?q=info:PrmBADk39GwJ:scholar.google.com/&output=citation&scisdr=CgIJFx-xEMCQ6zOgcuI:AAZF9b8AAAAAaPCmauIfzg8Rm9ImNYDad0uPUK8&scisig=AAZF9b8AAAAAaPCmahXsNqb1jTQBw2iPfw2vm9g&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1), [DeepCoder](https://scholar.googleusercontent.com/scholar.bib?q=info:xpZNEPI6opAJ:scholar.google.com/&output=citation&scisdr=CgIJFx-xEMCQ6zOgjM8:AAZF9b8AAAAAaPCmlM_hb3S0tzBSVrRYBZYDLWg&scisig=AAZF9b8AAAAAaPCmlG109SG8d8230AiDP4jMxlw&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1), and [DeepSWE](https://scholar.googleusercontent.com/scholar.bib?q=info:J9rT3SnY_aMJ:scholar.google.com/&output=citation&scisdr=CgIJFx-xEMCQ6zOg3D4:AAZF9b8AAAAAaPCmxD7Nl0xA_AcAeydpcE1BXCo&scisig=AAZF9b8AAAAAaPCmxE2Spzf5lf-2Toys5xEpnuA&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1).
vendor/rllm/build_docs.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Install mkdocs and required plugins if not already installed
4
+ echo "Installing documentation dependencies with uv..."
5
+ uv pip install -e .
6
+
7
+ # Ensure the rllm package is available for import
8
+ export PYTHONPATH="../:$PYTHONPATH"
9
+
10
+ # Change to docs directory
11
+ cd "$(dirname "$0")"
12
+
13
+ # Build the documentation
14
+ echo "Building documentation..."
15
+ mkdocs build
16
+
17
+ # Serve the documentation (if requested)
18
+ if [ "$1" == "serve" ]; then
19
+ echo "Starting documentation server..."
20
+ mkdocs serve
21
+ fi
22
+
23
+ echo "Documentation built successfully!"
24
+ echo "To serve the documentation, run: cd docs && mkdocs serve"
25
+ echo "To view the built documentation, open: docs/site/index.html"
vendor/rllm/docs/README.md ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # rLLM Documentation
2
+
3
+ This directory contains the documentation for the rLLM library, built using [MkDocs](https://www.mkdocs.org/) with [Material theme](https://squidfunk.github.io/mkdocs-material/) and [mkdocstrings](https://mkdocstrings.github.io/) for API documentation.
4
+
5
+ ## 🚀 Quick Start
6
+
7
+ ### Building the Documentation
8
+
9
+ To build the documentation:
10
+
11
+ ```bash
12
+ ./build_docs.sh
13
+ ```
14
+
15
+ ### Serving the Documentation Locally
16
+
17
+ To build and serve the documentation with live reload:
18
+
19
+ ```bash
20
+ ./build_docs.sh serve
21
+ ```
22
+
23
+ The documentation will be available at `http://localhost:8000`.
24
+
25
+ ## 📁 Structure
26
+
27
+ ```
28
+ docs/
29
+ ├── mkdocs.yml # MkDocs configuration
30
+ ├── build_docs.sh # Build script
31
+ ├── docs/ # Documentation content
32
+ │ ├── index.md # Homepage
33
+ │ ├── api/ # API documentation (generated)
34
+ │ ├── examples/ # Example guides
35
+ │ ├── getting-started/ # Getting started guides
36
+ │ └── core-concepts/ # Core concept explanations
37
+ └── site/ # Generated static site (after build)
38
+ ```
39
+
40
+ ## 🔧 Features
41
+
42
+ ### API Documentation
43
+ - **Automatic API docs**: Generated from docstrings using mkdocstrings
44
+ - **Google-style docstrings**: Supports Google-style docstring format
45
+ - **Source code links**: Direct links to source code
46
+ - **Type hints**: Shows function signatures and type annotations
47
+
48
+ ### Documentation Features
49
+ - **Material Design**: Modern, responsive theme
50
+ - **Code highlighting**: Syntax highlighting for multiple languages
51
+ - **Navigation**: Automatic navigation generation
52
+ - **Search**: Full-text search functionality
53
+ - **Mobile-friendly**: Responsive design for all devices
54
+
55
+ ## ✍️ Writing Documentation
56
+
57
+ ### Adding New Pages
58
+ 1. Create a new `.md` file in the appropriate `docs/` subdirectory
59
+ 2. Add the page to the `nav` section in `mkdocs.yml`
60
+ 3. Use Markdown syntax for content
61
+
62
+ ### API Documentation
63
+ API documentation is automatically generated from Python docstrings using mkdocstrings. To document a new module:
64
+
65
+ 1. Add a new file in `docs/api/`
66
+ 2. Use the mkdocstrings syntax: `::: module.name`
67
+ 3. Add the page to the navigation in `mkdocs.yml`
68
+
69
+ Example:
70
+ ```markdown
71
+ # My Module
72
+
73
+ Brief description of the module.
74
+
75
+ ::: rllm.my_module
76
+ ```
77
+
78
+ ### Code Examples
79
+ Use fenced code blocks with language specification:
80
+
81
+ ```python
82
+ from rllm.agents import Agent
83
+
84
+ agent = Agent()
85
+ ```
86
+
87
+ ## 🛠️ Customization
88
+
89
+ ### Theme Configuration
90
+ The Material theme is configured in `mkdocs.yml`. You can customize:
91
+ - Colors and palette
92
+ - Navigation features
93
+ - Extensions and plugins
94
+
95
+ ### Extensions
96
+ Currently enabled extensions:
97
+ - `pymdownx.highlight`: Code highlighting
98
+ - `pymdownx.superfences`: Enhanced code blocks
99
+ - `admonition`: Call-out boxes
100
+ - `pymdownx.details`: Collapsible sections
101
+
102
+ ## 📝 Dependencies
103
+
104
+ Documentation dependencies are automatically installed with the main package:
105
+ - `mkdocs`: Static site generator
106
+ - `mkdocs-material`: Material Design theme
107
+ - `mkdocstrings[python]`: API documentation from docstrings
108
+ - `mkdocs-autorefs`: Cross-references
109
+ - `pymdown-extensions`: Enhanced Markdown extensions
110
+
111
+ ## 🐛 Troubleshooting
112
+
113
+ ### Common Issues
114
+
115
+ **Import errors when building**:
116
+ - Ensure the rLLM package is properly installed: `uv pip install -e ..`
117
+ - Check that all dependencies are available
118
+
119
+ **Missing API documentation**:
120
+ - Verify the module path in the mkdocstrings directive
121
+ - Check that the module has proper docstrings
122
+
123
+ **Build fails**:
124
+ - Check that all dependencies are installed: `uv pip install -e .`
125
+ - Verify that the `mkdocs.yml` syntax is correct
126
+
127
+ ### Getting Help
128
+ - Check the [MkDocs documentation](https://www.mkdocs.org/)
129
+ - Review [mkdocstrings documentation](https://mkdocstrings.github.io/)
130
+ - Open an issue in the rLLM repository
vendor/rllm/docs/api/agents/agent.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Agent Base
2
+
3
+ The base Agent class provides the core interface and functionality that all rLLM agents inherit from.
4
+
5
+ ::: rllm.agents.agent
vendor/rllm/docs/api/agents/utils.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Agent Utils
2
+
3
+ Utility functions and helpers for agent implementations.
4
+
5
+ ::: rllm.agents.utils
vendor/rllm/docs/api/engine/agent_execution_engine.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Agent Execution Engine
2
+
3
+ The core execution infrastructure that handles trajectory rollout and agent execution workflows.
4
+
5
+ ::: rllm.engine.agent_execution_engine
vendor/rllm/docs/api/engine/agent_workflow_engine.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Agent Workflow Engine
2
+
3
+ The core execution infrastructure that handles workflow execution and episode rollout.
4
+
5
+ ::: rllm.engine.agent_workflow_engine
vendor/rllm/docs/api/environments/base.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base Environment
2
+
3
+ Core environment interface and base functionality that all rLLM environments inherit from.
4
+
5
+ ## Base Environment
6
+
7
+ ::: rllm.environments.base.base_env
8
+
9
+ ## Single Turn Environment
10
+
11
+ ::: rllm.environments.base.single_turn_env
12
+
13
+ ## Multi Turn Environment
14
+
15
+ ::: rllm.environments.base.multi_turn_env
vendor/rllm/docs/api/environments/env_utils.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Environment Utils
2
+
3
+ Utility functions and helpers for environment implementations and management.
4
+
5
+ ::: rllm.environments.env_utils
vendor/rllm/docs/api/index.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # API Reference
2
+
3
+ Welcome to the rLLM API reference documentation. This section provides comprehensive documentation for all modules, classes, and functions in the rLLM library.
4
+
5
+ ## Overview
6
+
7
+ rLLM is a library for training LLM agents with reinforcement learning. The API is organized into several key modules:
8
+
9
+ ## Core Modules
10
+
11
+ ### 🤖 Agents
12
+ The agents module contains various agent implementations that can be trained with reinforcement learning:
13
+
14
+ - **Base Agent**: Core agent interface and base functionality
15
+
16
+ ### 🌍 Environments
17
+ The environments module provides various training and evaluation environments:
18
+
19
+ - **Base Environment**: Core environment interface
20
+
21
+ ### 🧩 Workflow
22
+ The workflow module supports complex multi-step agent interactions:
23
+
24
+ - **Base Workflow**: Core workflow interface and base functionality
25
+
26
+ ### ⚙️ Engine
27
+ The engine module contains the core execution infrastructure:
28
+
29
+ - **Agent Execution Engine**: Handles trajectory rollout and agent execution
30
+ - **Agent Workflow Engine**: Handles episode rollout for complex workflows
31
+
32
+ ### 🎯 Trainer
33
+ The trainer module provides RL training capabilities:
34
+
35
+ - **Agent Trainer**: Main training interface for RL algorithms
36
+ - **Ray Runtime Environment**: Configuration for Ray runtime environment
37
+
38
+ ### 🛠️ Tools
39
+ The tools module provides a comprehensive framework for creating and managing tools:
40
+
41
+ - **Tool Base Classes**: Core interfaces and data structures
42
+ - **Web Tools**: Search, scraping, and content extraction tools
43
+ - **Code Tools**: Code execution and AI-powered coding assistance
44
+ - **Tool Registry**: Central registry for managing tools
45
+
46
+ ### 📝 Parser
47
+ The parser module provides functionality for parsing tool calls and managing chat templates:
48
+
49
+ - **Tool Parsers**: Parse tool calls from different model formats
50
+ - **Chat Parsers**: Parse messages in chat completions format into string
vendor/rllm/docs/api/parser/chat_parser.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Chat Template
2
+
3
+ Chat template utilities for parsing and managing conversation templates.
4
+
5
+ ## Chat Template Parser
6
+
7
+ Utilities for parsing and managing chat templates, including support for different model formats and conversation structures.
8
+
9
+ ### Features
10
+
11
+ - Parse various chat template formats
12
+ - Handle different conversation structures
13
+ - Support for custom template parsing
14
+
15
+ ::: rllm.parser.chat_template_parser
vendor/rllm/docs/api/parser/tool_parser.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tool Parsers
2
+
3
+ Tool parsers extract tool calls from model responses and generate tool prompts for different model formats.
4
+
5
+ ## Usage
6
+
7
+ ```python
8
+ from rllm.parser import get_tool_parser
9
+
10
+ # Get a specific parser
11
+ parser = get_tool_parser("r1")
12
+
13
+ # Parse tool calls from model response
14
+ tool_calls = parser.parse(model_response)
15
+
16
+ # Get tool prompt
17
+ prompt = parser.get_tool_prompt(tools_schema)
18
+ ```
19
+
20
+ ::: rllm.parser.tool_parser
vendor/rllm/docs/api/tools/code_tools.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Code Tools
2
+
3
+ ::: rllm.tools.code_tools
vendor/rllm/docs/api/tools/registry.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tool Registry
2
+
3
+ The tool registry provides a centralized way to register, store, and retrieve tools. It supports both individual tool registration and batch registration of multiple tools.
4
+
5
+ ### Features
6
+
7
+ - Register individual tools or batches of tools
8
+ - Retrieve tools by name
9
+ - List all registered tools
10
+ - Singleton pattern for global access
11
+
12
+ ::: rllm.tools.registry
vendor/rllm/docs/api/tools/tool_base.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tool Base Classes
2
+
3
+ Core base classes and data structures for the tools framework.
4
+
5
+ ## Tool
6
+
7
+ The base class for all tools that provides a common interface for both synchronous and asynchronous tool execution.
8
+
9
+ ::: rllm.tools.tool_base.Tool
10
+
11
+ ## ToolCall
12
+
13
+ Data structure representing a tool call with name and arguments.
14
+
15
+ ::: rllm.tools.tool_base.ToolCall
16
+
17
+ ## ToolOutput
18
+
19
+ Data structure representing the output of a tool execution, including results, errors, and metadata.
20
+
21
+ ::: rllm.tools.tool_base.ToolOutput
vendor/rllm/docs/api/tools/web_tools.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Web Tools
2
+
3
+ Web-based tools for search, scraping, and content extraction.
4
+
5
+ ::: rllm.tools.web_tools
vendor/rllm/docs/api/trainer/agent_trainer.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Agent Trainer
2
+
3
+ Main training interface for RL algorithms and agent optimization.
4
+
5
+ ::: rllm.trainer.agent_trainer
vendor/rllm/docs/api/trainer/ray_runtime_env.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ray Runtime Environment Configuration
2
+
3
+ ## Overview
4
+
5
+ The `ray_runtime_env` module automatically forwards relevant environment variables from your local environment to Ray worker processes during distributed training. This ensures that configuration for libraries like VLLM, NCCL, CUDA, and HuggingFace are properly propagated to all workers.
6
+
7
+ ## Environment Variable Forwarding
8
+
9
+ ### Automatic Forwarding
10
+
11
+ Environment variables with the following prefixes are automatically forwarded to Ray workers:
12
+
13
+ - **Inference Engines**: `VLLM_`, `SGL_`, `SGLANG_`
14
+ - **HuggingFace Libraries**: `HF_`, `TOKENIZERS_`, `DATASETS_`
15
+ - **Training Frameworks**: `TORCH_`, `PYTORCH_`, `DEEPSPEED_`, `MEGATRON_`
16
+ - **CUDA/NCCL**: `NCCL_`, `CUDA_`, `CUBLAS_`, `CUDNN_`, `NV_`, `NVIDIA_`
17
+
18
+ ### Default Environment Variables
19
+
20
+ The following variables are set by default for PPO training:
21
+
22
+ ```python
23
+ {
24
+ "TOKENIZERS_PARALLELISM": "true",
25
+ "NCCL_DEBUG": "WARN",
26
+ "VLLM_LOGGING_LEVEL": "WARN",
27
+ "VLLM_ALLOW_RUNTIME_LORA_UPDATING": "true",
28
+ "CUDA_DEVICE_MAX_CONNECTIONS": "1",
29
+ "VLLM_USE_V1": "1",
30
+ }
31
+ ```
32
+
33
+ Environment variables from your shell **can override** these defaults.
34
+
35
+ ## Controlling Forwarding with RLLM_EXCLUDE
36
+
37
+ Use the `RLLM_EXCLUDE` environment variable to prevent specific variables or entire prefixes from being forwarded to Ray workers.
38
+
39
+ ### Exclude Specific Variables
40
+
41
+ Exclude individual environment variables by name:
42
+
43
+ ```bash
44
+ export RLLM_EXCLUDE="CUDA_VISIBLE_DEVICES,HF_TOKEN"
45
+ # CUDA_VISIBLE_DEVICES and HF_TOKEN will NOT be forwarded
46
+ ```
47
+
48
+ ### Exclude Entire Prefixes
49
+
50
+ Use the wildcard pattern `PREFIX*` to exclude all variables with a given prefix:
51
+
52
+ ```bash
53
+ export RLLM_EXCLUDE="VLLM*"
54
+ # All VLLM_* variables will NOT be forwarded (except defaults)
55
+ ```
56
+
57
+ ### Combined Exclusions
58
+
59
+ Combine multiple exclusions with commas:
60
+
61
+ ```bash
62
+ export RLLM_EXCLUDE="VLLM*,CUDA*,NCCL_IB_DISABLE"
63
+ # Excludes all VLLM_*, all CUDA_*, and the specific NCCL_IB_DISABLE variable
64
+ ```
65
+
66
+ ## Usage Example
67
+
68
+ ```python
69
+ from rllm.trainer.verl.ray_runtime_env import get_ppo_ray_runtime_env
70
+
71
+ # Get the runtime environment configuration
72
+ runtime_env = get_ppo_ray_runtime_env()
73
+
74
+ # Pass to Ray actor initialization
75
+ actor = ActorClass.options(runtime_env=runtime_env).remote()
76
+ ```
77
+
78
+ ## Common Use Cases
79
+
80
+ ### Debugging with Verbose Logging
81
+
82
+ ```bash
83
+ export VLLM_LOGGING_LEVEL="DEBUG"
84
+ export NCCL_DEBUG="INFO"
85
+ # These will override defaults and propagate to all workers
86
+ ```
87
+
88
+ ### Preventing Token Forwarding
89
+
90
+ ```bash
91
+ export RLLM_EXCLUDE="HF_TOKEN"
92
+ # Useful if you want workers to use a different authentication method
93
+ ```
94
+
95
+ ## API Reference
96
+
97
+ ::: rllm.trainer.verl.ray_runtime_env._get_forwarded_env_vars
98
+ options:
99
+ show_root_heading: true
100
+ show_source: true
101
+
102
+ ::: rllm.trainer.verl.ray_runtime_env.get_ppo_ray_runtime_env
103
+ options:
104
+ show_root_heading: true
105
+ show_source: true
106
+
107
+ ::: rllm.trainer.verl.ray_runtime_env.FORWARD_PREFIXES
108
+ options:
109
+ show_root_heading: true
110
+ show_source: false
111
+
vendor/rllm/docs/api/workflows/workflow.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Workflow
2
+
3
+ The Workflow class provides the core interface and functionality that all workflows inherit from.
4
+
5
+ ::: rllm.workflows.workflow
vendor/rllm/docs/assets/agentica-logo-black.png ADDED
vendor/rllm/docs/assets/agentica-logo.png ADDED
vendor/rllm/docs/assets/rllm_architecture.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ +-------------------+
2
+ | |
3
+ | Language Model |
4
+ | |
5
+ +--------+----------+
6
+ |
7
+ v
8
+ +--------+----------+
9
+ | |
10
+ | Agent System |
11
+ | |
12
+ +--------+----------+
13
+ |
14
+ v
15
+ +--------+----------+ +----------------+
16
+ | | | |
17
+ | Environment +---->+ Rewards |
18
+ | | | |
19
+ +--------+----------+ +----------------+
20
+ |
21
+ v
22
+ +--------+----------+
23
+ | |
24
+ | Execution Engine |
25
+ | |
26
+ +--------+----------+
27
+ |
28
+ v
29
+ +--------+----------+
30
+ | |
31
+ | Training System |
32
+ | |
33
+ +-------------------+
vendor/rllm/docs/assets/rllm_components.png ADDED

Git LFS Details

  • SHA256: 0f1a6099c7b82f5f40c53f7019703f6b5ec7ecdd5c56bb5dc001314d667ca6b3
  • Pointer size: 131 Bytes
  • Size of remote file: 230 kB
vendor/rllm/docs/assets/rllm_logo_black.png ADDED
vendor/rllm/docs/assets/rllm_logo_blue.png ADDED
vendor/rllm/docs/assets/rllm_logo_white.png ADDED