codelion commited on
Commit
1332a7b
·
verified ·
1 Parent(s): d2bcbf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -14,9 +14,8 @@ import requests
14
  import glob
15
 
16
  # Model for OpenRouter
17
- # Using paid llama-3.2-3b-instruct since free tier models have unreliable rate limits
18
  MODELS = [
19
- "meta-llama/llama-3.2-3b-instruct", # 3B - Reliable, fast, and very cheap ($0.04/$0.04 per 1M tokens)
20
  ]
21
 
22
 
@@ -778,7 +777,7 @@ Your improved prompt here
778
 
779
  config = {
780
  "llm": {
781
- "primary_model": "meta-llama/llama-3.1-8b-instruct", # Use STRONGER model for prompt generation
782
  "api_base": "https://openrouter.ai/api/v1", # Use OpenRouter endpoint
783
  "temperature": 1.2, # Even higher temperature for more creative variations
784
  },
@@ -1007,7 +1006,7 @@ def optimize_prompt(initial_prompt: str, dataset_name: str, dataset_split: str,
1007
  ### Summary
1008
  - **Dataset**: {dataset_name} ({dataset_split} split)
1009
  - **Evaluation Model**: {model}
1010
- - **Evolution Model**: meta-llama/llama-3.1-8b-instruct (larger model for better prompt generation)
1011
  - **Initial Eval**: 50 samples
1012
  - **Final Eval**: 50 samples (same samples for fair comparison)
1013
  - **Evolution**: 50 samples per variant (SAME samples as initial/final!)
@@ -1045,7 +1044,7 @@ with gr.Blocks(title="OpenEvolve Prompt Optimizer", theme=gr.themes.Soft()) as d
1045
 
1046
  **Usage**: Enter initial prompt with `{input}` placeholder → Click optimize → Compare results
1047
 
1048
- **Model**: `meta-llama/llama-3.2-3b-instruct` (~$0.04 per 1M tokens)
1049
  """)
1050
 
1051
  with gr.Row():
@@ -1115,7 +1114,7 @@ with gr.Blocks(title="OpenEvolve Prompt Optimizer", theme=gr.themes.Soft()) as d
1115
  """Wrapper to use fixed model instead of dropdown"""
1116
  return optimize_prompt(
1117
  initial_prompt, dataset_name, dataset_split,
1118
- MODELS[0], # Use fixed llama-3.2-3b model
1119
  input_field, target_field, progress
1120
  )
1121
 
 
14
  import glob
15
 
16
  # Model for OpenRouter
 
17
  MODELS = [
18
+ "google/gemini-2.5-flash-lite",
19
  ]
20
 
21
 
 
777
 
778
  config = {
779
  "llm": {
780
+ "primary_model": "google/gemini-2.5-flash", # Use STRONGER model for prompt generation
781
  "api_base": "https://openrouter.ai/api/v1", # Use OpenRouter endpoint
782
  "temperature": 1.2, # Even higher temperature for more creative variations
783
  },
 
1006
  ### Summary
1007
  - **Dataset**: {dataset_name} ({dataset_split} split)
1008
  - **Evaluation Model**: {model}
1009
+ - **Evolution Model**: google/gemini-2.5-flash (larger model for better prompt generation)
1010
  - **Initial Eval**: 50 samples
1011
  - **Final Eval**: 50 samples (same samples for fair comparison)
1012
  - **Evolution**: 50 samples per variant (SAME samples as initial/final!)
 
1044
 
1045
  **Usage**: Enter initial prompt with `{input}` placeholder → Click optimize → Compare results
1046
 
1047
+ **Model**: `google/gemini-2.5-flash-lite`
1048
  """)
1049
 
1050
  with gr.Row():
 
1114
  """Wrapper to use fixed model instead of dropdown"""
1115
  return optimize_prompt(
1116
  initial_prompt, dataset_name, dataset_split,
1117
+ MODELS[0],
1118
  input_field, target_field, progress
1119
  )
1120