Skip to content

Commit 303cef0

Browse files
committed
refactor(prompt): Replace hardcoded strings with template fragments
- Refactor hardcoded strings in PromptSampler class to use template manager for localized fragments - Modify format_feature_coordinates function to return empty string instead of fixed text when no feature coordinates are available - Update fragments.json to add new template fragment
1 parent a65ca2b commit 303cef0

File tree

3 files changed

+58
-32
lines changed

3 files changed

+58
-32
lines changed

openevolve/prompt/sampler.py

Lines changed: 30 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -205,11 +205,13 @@ def _identify_improvement_areas(
205205
# Note feature exploration (not good/bad, just informational)
206206
if feature_dimensions:
207207
feature_coords = format_feature_coordinates(metrics, feature_dimensions)
208-
if feature_coords != "No feature coordinates":
208+
if feature_coords == "":
209+
msg = self.template_manager.get_fragment("no_feature_coordinates")
210+
else:
209211
msg = self.template_manager.get_fragment(
210212
"exploring_region", features=feature_coords
211213
)
212-
improvement_areas.append(msg)
214+
improvement_areas.append(msg)
213215

214216
# Code length check (configurable threshold)
215217
threshold = (
@@ -245,7 +247,7 @@ def _format_evolution_history(
245247

246248
for i, program in enumerate(reversed(selected_previous)):
247249
attempt_number = len(previous_programs) - i
248-
changes = program.get("metadata", {}).get("changes", "Unknown changes")
250+
changes = program.get("metadata", {}).get("changes", self.template_manager.get_fragment("attempt_unknown_changes"))
249251

250252
# Format performance metrics using safe formatting
251253
performance_parts = []
@@ -261,7 +263,7 @@ def _format_evolution_history(
261263

262264
# Determine outcome based on comparison with parent (only numeric metrics)
263265
parent_metrics = program.get("metadata", {}).get("parent_metrics", {})
264-
outcome = "Mixed results"
266+
outcome = self.template_manager.get_fragment("attempt_mixed_metrics")
265267

266268
# Safely compare only numeric metrics
267269
program_metrics = program.get("metrics", {})
@@ -288,9 +290,9 @@ def _format_evolution_history(
288290

289291
# Determine outcome based on numeric comparisons
290292
if numeric_comparisons_improved and all(numeric_comparisons_improved):
291-
outcome = "Improvement in all metrics"
293+
outcome = self.template_manager.get_fragment("attempt_all_metrics_improved")
292294
elif numeric_comparisons_regressed and all(numeric_comparisons_regressed):
293-
outcome = "Regression in all metrics"
295+
outcome = self.template_manager.get_fragment("attempt_all_metrics_regressed")
294296

295297
previous_attempts_str += (
296298
previous_attempt_template.format(
@@ -320,11 +322,11 @@ def _format_evolution_history(
320322
for name, value in program.get("metrics", {}).items():
321323
if isinstance(value, (int, float)):
322324
try:
323-
key_features.append(f"Performs well on {name} ({value:.4f})")
325+
key_features.append(self.template_manager.get_fragment("top_program_metrics_prefix") + f" {name} ({value:.4f})")
324326
except (ValueError, TypeError):
325-
key_features.append(f"Performs well on {name} ({value})")
327+
key_features.append(self.template_manager.get_fragment("top_program_metrics_prefix") + f" {name} ({value})")
326328
else:
327-
key_features.append(f"Performs well on {name} ({value})")
329+
key_features.append(self.template_manager.get_fragment("top_program_metrics_prefix") + f" {name} ({value})")
328330

329331
key_features_str = ", ".join(key_features)
330332

@@ -354,7 +356,7 @@ def _format_evolution_history(
354356
# Use random sampling to get diverse programs
355357
diverse_programs = random.sample(remaining_programs, num_diverse)
356358

357-
diverse_programs_str += "\n\n## Diverse Programs\n\n"
359+
diverse_programs_str += "\n\n## " + self.template_manager.get_fragment("diverse_programs_title") + "\n\n"
358360

359361
for i, program in enumerate(diverse_programs):
360362
# Use the full program code
@@ -367,7 +369,7 @@ def _format_evolution_history(
367369
key_features = program.get("key_features", [])
368370
if not key_features:
369371
key_features = [
370-
f"Alternative approach to {name}"
372+
self.template_manager.get_fragment("diverse_program_metrics_prefix") + f" {name}"
371373
for name in list(program.get("metrics", {}).keys())[
372374
:2
373375
] # Just first 2 metrics
@@ -472,21 +474,20 @@ def _determine_program_type(
472474

473475
# Check metadata for explicit type markers
474476
if metadata.get("diverse", False):
475-
return "Diverse"
477+
return self.template_manager.get_fragment("inspiration_type_diverse")
476478
if metadata.get("migrant", False):
477-
return "Migrant"
479+
return self.template_manager.get_fragment("inspiration_type_migrant")
478480
if metadata.get("random", False):
479-
return "Random"
480-
481+
return self.template_manager.get_fragment("inspiration_type_random")
481482
# Classify based on score ranges
482483
if score >= 0.8:
483-
return "High-Performer"
484+
return self.template_manager.get_fragment("inspiration_type_score_high_performer")
484485
elif score >= 0.6:
485-
return "Alternative"
486+
return self.template_manager.get_fragment("inspiration_type_score_alternative")
486487
elif score >= 0.4:
487-
return "Experimental"
488+
return self.template_manager.get_fragment("inspiration_type_score_experimental")
488489
else:
489-
return "Exploratory"
490+
return self.template_manager.get_fragment("inspiration_type_score_exploratory")
490491

491492
def _extract_unique_features(self, program: Dict[str, Any]) -> str:
492493
"""
@@ -509,42 +510,42 @@ def _extract_unique_features(self, program: Dict[str, Any]) -> str:
509510
and self.config.include_changes_under_chars
510511
and len(changes) < self.config.include_changes_under_chars
511512
):
512-
features.append(f"Modification: {changes}")
513+
features.append(self.template_manager.get_fragment("inspiration_changes_prefix").format(changes=changes))
513514

514515
# Analyze metrics for standout characteristics
515516
metrics = program.get("metrics", {})
516517
for metric_name, value in metrics.items():
517518
if isinstance(value, (int, float)):
518519
if value >= 0.9:
519-
features.append(f"Excellent {metric_name} ({value:.3f})")
520+
features.append(f"{self.template_manager.get_fragment('inspiration_metrics_excellent').format(metric_name=metric_name, value=value)}")
520521
elif value <= 0.3:
521-
features.append(f"Alternative {metric_name} approach")
522+
features.append(f"{self.template_manager.get_fragment('inspiration_metrics_alternative').format(metric_name=metric_name)}")
522523

523524
# Code-based features (simple heuristics)
524525
code = program.get("code", "")
525526
if code:
526527
code_lower = code.lower()
527528
if "class" in code_lower and "def __init__" in code_lower:
528-
features.append("Object-oriented approach")
529+
features.append(self.template_manager.get_fragment("inspiration_code_with_class"))
529530
if "numpy" in code_lower or "np." in code_lower:
530-
features.append("NumPy-based implementation")
531+
features.append(self.template_manager.get_fragment("inspiration_code_with_numpy"))
531532
if "for" in code_lower and "while" in code_lower:
532-
features.append("Mixed iteration strategies")
533+
features.append(self.template_manager.get_fragment("inspiration_code_with_mixed_iteration"))
533534
if (
534535
self.config.concise_implementation_max_lines
535536
and len(code.split("\n")) <= self.config.concise_implementation_max_lines
536537
):
537-
features.append("Concise implementation")
538+
features.append(self.template_manager.get_fragment("inspiration_code_with_concise_line"))
538539
elif (
539540
self.config.comprehensive_implementation_min_lines
540541
and len(code.split("\n")) >= self.config.comprehensive_implementation_min_lines
541542
):
542-
features.append("Comprehensive implementation")
543+
features.append(self.template_manager.get_fragment("inspiration_code_with_comprehensive_line"))
543544

544545
# Default if no specific features found
545546
if not features:
546547
program_type = self._determine_program_type(program)
547-
features.append(f"{program_type} approach to the problem")
548+
features.append(self.template_manager.get_fragment("inspiration_no_features_postfix").format(program_type=program_type))
548549

549550
# Use num_top_programs as limit for features (similar to how we limit programs)
550551
feature_limit = self.config.num_top_programs
@@ -587,7 +588,7 @@ def _render_artifacts(self, artifacts: Dict[str, Union[str, bytes]]) -> str:
587588
sections.append(f"### {key}\n```\n{content}\n```")
588589

589590
if sections:
590-
return "## Last Execution Output\n\n" + "\n\n".join(sections)
591+
return "## " + self.template_manager.get_fragment("artifact_title") + "\n\n" + "\n\n".join(sections)
591592
else:
592593
return ""
593594

openevolve/prompts/defaults/fragments.json

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,30 @@
1414
"metrics_improved": "Metrics showing improvement: {metrics}. Consider continuing with similar approaches.",
1515
"metrics_regressed": "Metrics showing changes: {metrics}. Consider different approaches in these areas.",
1616
"code_simplification": "Consider simplifying the code to improve readability and maintainability",
17-
"default_improvement": "Focus on improving the fitness score while exploring diverse solutions"
17+
"default_improvement": "Focus on improving the fitness score while exploring diverse solutions",
18+
"no_feature_coordinates": "No feature coordinates",
19+
"artifact_title": "Last Execution Output",
20+
"diverse_programs_title": "Diverse Programs",
21+
"attempt_unknown_changes": "Unknown changes",
22+
"attempt_all_metrics_improved": "Improvement in all metrics",
23+
"attempt_all_metrics_regressed": "Regression in all metrics",
24+
"attempt_mixed_metrics": "Mixed results",
25+
"top_program_metrics_prefix": "Performs well on",
26+
"diverse_program_metrics_prefix": "Alternative approach to",
27+
"inspiration_type_diverse": "Diverse",
28+
"inspiration_type_migrant": "Migrant",
29+
"inspiration_type_random": "Random",
30+
"inspiration_type_score_high_performer": "High-Performer",
31+
"inspiration_type_score_alternative": "Alternative",
32+
"inspiration_type_score_experimental": "Experimental",
33+
"inspiration_type_score_exploratory": "Exploratory",
34+
"inspiration_changes_prefix": "Modification:",
35+
"inspiration_metrics_excellent": "Excellent {metric_name} ({value:.3f})",
36+
"inspiration_metrics_alternative": "Alternative {metric_name} approach",
37+
"inspiration_code_with_class": "Object-oriented approach",
38+
"inspiration_code_with_numpy": "NumPy-based implementation",
39+
"inspiration_code_with_mixed_iteration": "Mixed iteration strategies",
40+
"inspiration_code_with_concise_line": "Concise implementation",
41+
"inspiration_code_with_comprehensive_line": "Comprehensive implementation",
42+
"inspiration_no_features_postfix": "approach to the problem"
1843
}

openevolve/utils/metrics_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ def format_feature_coordinates(metrics: Dict[str, Any], feature_dimensions: List
139139
else:
140140
feature_values.append(f"{dim}={value}")
141141

142-
if not feature_values:
143-
return "No feature coordinates"
142+
if not feature_values: # No valid feature coordinates found will return empty string
143+
return ""
144144

145145
return ", ".join(feature_values)

0 commit comments

Comments
 (0)