diff --git a/.jules/bolt.md b/.jules/bolt.md new file mode 100644 index 0000000..1a1263f --- /dev/null +++ b/.jules/bolt.md @@ -0,0 +1,3 @@ +## 2024-05-23 - Thread Pool Overhead in Iterative Tasks +**Learning:** Recreating `ThreadPoolExecutor` inside a frequently called loop (like an optimization loop) introduces significant overhead, especially when the individual tasks are short-lived. +**Action:** Initialize `ThreadPoolExecutor` once in the class `__init__` and reuse it across method calls to amortize the setup cost. diff --git a/plugins/llm-application-dev/skills/prompt-engineering-patterns/scripts/optimize-prompt.py b/plugins/llm-application-dev/skills/prompt-engineering-patterns/scripts/optimize-prompt.py index ce52721..97159f8 100644 --- a/plugins/llm-application-dev/skills/prompt-engineering-patterns/scripts/optimize-prompt.py +++ b/plugins/llm-application-dev/skills/prompt-engineering-patterns/scripts/optimize-prompt.py @@ -25,6 +25,11 @@ class PromptOptimizer: self.client = llm_client self.test_suite = test_suite self.results_history = [] + self.executor = ThreadPoolExecutor() + + def shutdown(self): + """Shutdown the thread pool executor.""" + self.executor.shutdown(wait=True) def evaluate_prompt(self, prompt_template: str, test_cases: List[TestCase] = None) -> Dict[str, float]: """Evaluate a prompt template against test cases in parallel.""" @@ -63,8 +68,7 @@ class PromptOptimizer: } # Run test cases in parallel - with ThreadPoolExecutor() as executor: - results = list(executor.map(process_test_case, test_cases)) + results = list(self.executor.map(process_test_case, test_cases)) # Aggregate metrics for result in results: @@ -247,16 +251,19 @@ def main(): optimizer = PromptOptimizer(MockLLMClient(), test_suite) - base_prompt = "Classify the sentiment of: {text}\nSentiment:" + try: + base_prompt = "Classify the sentiment of: {text}\nSentiment:" - results = optimizer.optimize(base_prompt) + results = optimizer.optimize(base_prompt) - print("\n" + "="*50) - print("Optimization Complete!") - print(f"Best Accuracy: {results['best_score']:.2f}") - print(f"Best Prompt:\n{results['best_prompt']}") + print("\n" + "="*50) + print("Optimization Complete!") + print(f"Best Accuracy: {results['best_score']:.2f}") + print(f"Best Prompt:\n{results['best_prompt']}") - optimizer.export_results('optimization_results.json') + optimizer.export_results('optimization_results.json') + finally: + optimizer.shutdown() if __name__ == '__main__':