From 835a3de023aadb647c8f943c783f60a2fa2889bc Mon Sep 17 00:00:00 2001 From: Libres-coder <2597242922@qq.com> Date: Wed, 17 Sep 2025 03:24:07 +0800 Subject: [PATCH] add intelligent prompt generator and optimizer for agents --- .../prompt/prompt_generator_helper.py | 608 ++++++++++++++++++ .../prompt_generator_app/.gitignore | 68 ++ .../prompt_generator_app/README.md | 9 + .../prompt_generator_app/README_zh.md | 9 + .../prompt_generator_app/__init__.py | 7 + .../intelligence/__init__.py | 7 + .../intelligence/agentic/__init__.py | 7 + .../intelligence/agentic/agent/__init__.py | 7 + .../agentic/agent/agent_instance/__init__.py | 7 + .../agentic/knowledge/__init__.py | 7 + .../intelligence/agentic/llm/__init__.py | 7 + .../intelligence/agentic/memory/__init__.py | 7 + .../intelligence/agentic/prompt/__init__.py | 7 + .../intelligence/agentic/tool/__init__.py | 7 + .../agentic/work_pattern/__init__.py | 7 + .../intelligence/service/__init__.py | 7 + .../service/agent_service/__init__.py | 7 + .../service/agent_service/prompt_service.py | 234 +++++++ .../service/classic_service/__init__.py | 7 + .../classic_service/template_service.py | 227 +++++++ .../intelligence/test/__init__.py | 7 + .../intelligence/test/prompt_generator.py | 188 ++++++ .../intelligence/test/prompt_optimizer.py | 152 +++++ .../test/test_prompt_generator.py | 231 +++++++ .../test/test_prompt_optimizer.py | 171 +++++ .../intelligence/utils/__init__.py | 7 + .../intelligence/utils/common/__init__.py | 7 + .../intelligence/utils/common/prompt_util.py | 254 ++++++++ .../intelligence/utils/common/yaml_util.py | 206 ++++++ .../prompt_generator_app/poetry.toml | 3 + .../prompt_generator_app/pyproject.toml | 49 ++ .../prompt_generator_app/run_example.py | 39 ++ .../run_optimizer_example.py | 42 ++ .../prompt_generator_app/run_tests.py | 74 +++ scripts/generate_prompt.py | 127 ++++ .../unit/prompt/__init__.py | 7 + .../prompt/test_prompt_generator_helper.py | 231 +++++++ 37 files changed, 3048 insertions(+) create mode 100644 agentuniverse/prompt/prompt_generator_helper.py create mode 100644 examples/sample_apps/prompt_generator_app/.gitignore create mode 100644 examples/sample_apps/prompt_generator_app/README.md create mode 100644 examples/sample_apps/prompt_generator_app/README_zh.md create mode 100644 examples/sample_apps/prompt_generator_app/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/agent_instance/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/knowledge/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/llm/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/memory/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/prompt/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/tool/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/agentic/work_pattern/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/service/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/prompt_service.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/template_service.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/test/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/test/prompt_generator.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/test/prompt_optimizer.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_generator.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_optimizer.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/utils/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/utils/common/__init__.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/utils/common/prompt_util.py create mode 100644 examples/sample_apps/prompt_generator_app/intelligence/utils/common/yaml_util.py create mode 100644 examples/sample_apps/prompt_generator_app/poetry.toml create mode 100644 examples/sample_apps/prompt_generator_app/pyproject.toml create mode 100644 examples/sample_apps/prompt_generator_app/run_example.py create mode 100644 examples/sample_apps/prompt_generator_app/run_optimizer_example.py create mode 100644 examples/sample_apps/prompt_generator_app/run_tests.py create mode 100644 scripts/generate_prompt.py create mode 100644 tests/test_agentuniverse/unit/prompt/__init__.py create mode 100644 tests/test_agentuniverse/unit/prompt/test_prompt_generator_helper.py diff --git a/agentuniverse/prompt/prompt_generator_helper.py b/agentuniverse/prompt/prompt_generator_helper.py new file mode 100644 index 00000000..6d60caba --- /dev/null +++ b/agentuniverse/prompt/prompt_generator_helper.py @@ -0,0 +1,608 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: prompt_generator_helper.py +"""Prompt generation helper module. + +Provides simple prompt generation functionality for agentUniverse developers, +helping to quickly create standardized prompt configuration files. +This is a lightweight development tool focused on solving prompt creation needs during development. +""" +import json +import os +import re +import yaml +from pathlib import Path +from typing import Any, Dict, Optional, Union + +from agentuniverse.prompt.prompt_model import AgentPromptModel + + +class PromptGenerationError(Exception): + """Exception raised for prompt generation related errors.""" + + def __init__(self, message: str) -> None: + """Initialize the PromptGenerationError. + + Args: + message: Error message describing the issue. + """ + super().__init__(message) + self.message = message + + +class UnsupportedAgentTypeError(PromptGenerationError): + """Exception raised when an unsupported agent type is used.""" + + def __init__(self, agent_type: str) -> None: + """Initialize the UnsupportedAgentTypeError. + + Args: + agent_type: The unsupported agent type. + """ + message = f"Unsupported agent type: {agent_type}" + super().__init__(message) + self.agent_type = agent_type + + +class PromptTemplateHelper: + """Prompt template helper class. + + Provides template-based prompt generation for different agent types + in the agentUniverse framework. It focuses on generating standardized YAML + configurations that are compatible with the existing architecture. + + Design principles: Simple, lightweight, and compatible with existing architecture. + """ + + # Agent type templates mapping + AGENT_TEMPLATES = { + 'react': { + 'name': 'Reasoning and Acting Agent', + 'introduction_template': 'You are an AI assistant proficient in tool usage.', + 'target_template': 'Your goal is to answer user questions based on the provided background information and tools. {specific_goal}', + 'instruction_template': '''You must prioritize using the provided tools to answer user questions. If no tools are provided, you can solve problems based on your general knowledge. +You must answer questions in Chinese. +You must analyze user questions from multiple angles and dimensions to help users get comprehensive information. Based on the background and questions, decide what information to search for to answer the questions. +You must break down large problems into multiple smaller problems and plan solution steps. + +{specific_requirements} + +You can use the following tools: +{{tools}} + +你的回答必须严格使用以下格式: + + Question: 您必须回答的问题 + Thought: 我这一步应该做什么,为什么要这么做,我现在要使用一个工具, 不允许回答Final Answer + Action: 要使用的工具应该,值必须是 [{{tool_names}}] 之一 + Action Input: 工具的输入 + Observation: 工具的执行结果 + ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) + Thought: 我现在知道所有问题的最终答案了 + Final Answer: 所有问题的最终答案 + +历史对话: +{{chat_history}} + +背景信息是: +{{background}} + +开始! + 注意: + 1.你的回答必须是(Thought/Action/Observation)与(Thought/Final Answer)两种格式之一 + 2.你现在必须根据上一步Observation的结果(成功、失败、报错,信息不完整),判断下一步要执行的动作 + +Question: {{input}} +Thought: {{agent_scratchpad}}''' + }, + + 'rag': { + 'name': 'Retrieval Augmented Generation Agent', + 'introduction_template': 'You are an AI assistant proficient in information analysis.', + 'target_template': 'Your goal is to provide answers based on user questions and background information. {specific_goal}', + 'instruction_template': '''You need to follow these rules: +1. You must answer user questions in Chinese. +2. Keep answers within 300 characters. +3. Do not use incorrect information from background materials. +4. Consider the relevance of the answer to the question, do not provide unhelpful responses. +5. Be concise and clear, highlight key points, avoid excessive decorative language. +6. Do not make vague speculations. +7. Use as much numerical information as possible. + +{specific_requirements} + +Chat history: +{{chat_history}} + +Background information: +{{background}} + +Today's date: {{date}} + +Start! + +The question to answer is: {{input}}''' + }, + + 'planning': { + 'name': 'Planning Agent', + 'introduction_template': 'You are an AI assistant proficient in information analysis.', + 'target_template': 'Your goal is to break down the question into 3-5 sub-questions. {specific_goal}', + 'instruction_template': '''Based on the question to be answered, provide a logical progressive chain of thought to help users gradually master knowledge and ultimately answer the question. +The chain of thought is embodied in the form of sub-questions, each of which is a complete sentence. +The chain of thought must strictly follow the questions to be answered, cannot extend questions, and cannot directly answer questions. + +{specific_requirements} + +Each step in this chain of thought must be simple and singular. +Complex problems must be broken down into multiple steps. +Each step must be answerable, not open-ended. +Each step must be a complete sentence without any ambiguity. +Please break down this question into multiple steps, each step different from the original question, step by step. + +Today's date is: {{date}} + +Previous conversation: +{{chat_history}} + +Output must be in the following formatted JSON code snippet, where the thought field represents the approach to breaking down the problem, and the framework field represents the list of sub-questions. + ```json + {{ +"thought": "Analysis approach and logic for the problem", +"framework": ["Sub-question 1", "Sub-question 2", "Sub-question 3"] + }} + ``` + +Begin! +You must answer user questions in English. + +The question to answer is: {{input}}''' + }, + + 'executing': { + 'name': 'Executing Agent', + 'introduction_template': 'You are an AI assistant proficient in information analysis.', + 'target_template': 'Your goal is to integrate and correct the questions provided by users and the retrieved knowledge to answer their queries. {specific_goal}', + 'instruction_template': '''You need to answer my questions based on the background information I provide. +Your answers should be as detailed and comprehensive as possible, including sufficient data information. + +{specific_requirements} + +Please follow these rules: +1. Remove duplicate information. +2. Remove irrelevant information unrelated to answering the question. +3. Remove incorrect information. +4. Use only this information to answer questions. +5. Be concise, focused, and do not use excessive flowery vocabulary or phrases. +6. Do not repeat the same details in multiple places; each point should appear only once. +7. Use as much numerical information as possible. +8. Avoid using vague terms like XXX, ABC, etc. + +Previous conversation: +{{chat_history}} + +Background information: +{{background}} + +Today's date is: {{date}} + +Let's begin! +My question is: {{input}}''' + }, + + 'expressing': { + 'name': 'Expressing Agent', + 'introduction_template': 'You are a professional content expression expert.', + 'target_template': 'Your goal is to conduct professional content creation and expression according to user needs. {specific_goal}', + 'instruction_template': '''You have excellent language expression and content creation abilities, able to provide high-quality content according to different scenarios and user needs. + +{specific_requirements} + +Working principles: +1. Ensure content accuracy and professionalism +2. Clear and fluent language expression +3. Adapt style to specific scenarios and audiences +4. Focus on logic and readability + +User requirements: {{input}}''' + }, + + 'reviewing': { + 'name': 'Reviewing Agent', + 'introduction_template': 'You are a professional review expert.', + 'target_template': 'Your goal is to provide professional review and improvement suggestions for the provided content. {specific_goal}', + 'instruction_template': '''You have keen review abilities, able to identify problems in content and provide constructive improvement suggestions. + +{specific_requirements} + +Review dimensions: +1. Content accuracy and completeness +2. Logic and coherence +3. Clarity of language expression +4. Whether it meets expected goals + +Please review the following content: {{input}}''' + }, + + 'workflow': { + 'name': 'Workflow Agent', + 'introduction_template': 'You are a professional workflow management expert.', + 'target_template': 'Your goal is to coordinate and manage complex workflows. {specific_goal}', + 'instruction_template': '''You have excellent workflow management and task coordination capabilities. + +{specific_requirements} + +Management principles: +1. Ensure process efficiency and accuracy +2. Properly coordinate all links +3. Handle exceptions and problems promptly +4. Maintain process traceability + +Current task: {{input}}''' + } + } + + @classmethod + def generate_prompt_template(cls, + task_description: str, + agent_type: str = 'react', + scenario: Optional[str] = None, + specific_requirements: Optional[str] = None) -> AgentPromptModel: + """Generate prompt template for specified agent type. + + Creates a customized prompt template based on the provided parameters, + tailored to the specific agent type and use case scenario. + + Args: + task_description (str): Task description detailing what the agent should accomplish. + agent_type (str): Type of agent (react, rag, planning, etc.). Defaults to 'react'. + scenario (Optional[str]): Application scenario for context-specific prompts. + specific_requirements (Optional[str]): Additional requirements for customization. + + Returns: + AgentPromptModel: Generated prompt template object with introduction, target, and instruction. + + Raises: + UnsupportedAgentTypeError: If the specified agent type is not supported. + + Example: + >>> prompt_model = PromptTemplateHelper.generate_prompt_template( + ... task_description="Customer service assistant", + ... agent_type="react", + ... scenario="E-commerce platform" + ... ) + """ + if agent_type not in cls.AGENT_TEMPLATES: + raise UnsupportedAgentTypeError(agent_type) + + template = cls.AGENT_TEMPLATES[agent_type] + + # Build specific goals + specific_goal = "" + if scenario: + specific_goal = f"Particularly suitable for {scenario} scenarios." + if task_description: + specific_goal += f"Specific task: {task_description}" + + # Build specific requirements + if not specific_requirements: + specific_requirements = "Please adjust specific implementation methods according to actual conditions." + + # Generate prompt content + introduction = template['introduction_template'] + if task_description and "assistant" in introduction.lower(): + domain = cls._extract_domain(task_description) + if domain: + introduction = introduction.replace("AI assistant", f"{domain} AI assistant") + + target = template['target_template'].format(specific_goal=specific_goal) + instruction = template['instruction_template'].format(specific_requirements=specific_requirements) + + return AgentPromptModel( + introduction=introduction, + target=target, + instruction=instruction + ) + + @classmethod + def generate_yaml_config(cls, + prompt_model: AgentPromptModel, + version_name: str = None, + agent_type: str = 'react') -> str: + """Generate YAML configuration from prompt model. + + Converts the prompt model into a standardized YAML configuration + format that can be used directly in agentUniverse applications. + + Args: + prompt_model (AgentPromptModel): The prompt model to convert. + version_name (Optional[str]): Version identifier for the configuration. + agent_type (str): Agent type for metadata. Defaults to 'react'. + + Returns: + str: YAML configuration string ready for use. + """ + yaml_content = "" + + if prompt_model.introduction: + yaml_content += f"introduction: {prompt_model.introduction}\n" + + if prompt_model.target: + yaml_content += f"target: {prompt_model.target}\n" + + if prompt_model.instruction: + # Process multi-line instruction + if '\n' in prompt_model.instruction: + yaml_content += "instruction: |\n" + for line in prompt_model.instruction.split('\n'): + yaml_content += f" {line}\n" + else: + yaml_content += f"instruction: {prompt_model.instruction}\n" + + # Add metadata + yaml_content += "metadata:\n" + yaml_content += " type: 'PROMPT'\n" + + if not version_name: + version_name = f"custom_{agent_type}_prompt.cn" + + yaml_content += f" version: '{version_name}'\n" + + return yaml_content + + @classmethod + def _extract_domain(cls, task_description: str) -> Optional[str]: + """Extract domain information from task description.""" + domain_keywords = { + 'medical': 'medical', 'diagnosis': 'medical', 'symptom': 'medical', 'healthcare': 'medical', + 'financial': 'financial', 'finance': 'financial', 'stock': 'financial', 'investment': 'financial', 'bank': 'financial', + 'education': 'education', 'learning': 'education', 'teaching': 'education', 'academic': 'education', + 'customer_service': 'customer_service', 'service': 'service', 'support': 'customer_service', + 'legal': 'legal', 'contract': 'legal', 'law': 'legal', + 'sales': 'sales', 'marketing': 'marketing', + 'technical': 'technical', 'development': 'technical', 'programming': 'technical', 'technology': 'technical' + } + + for keyword, domain in domain_keywords.items(): + if keyword in task_description: + return domain + return None + + @classmethod + def get_supported_agent_types(cls) -> Dict[str, str]: + """Get supported agent types.""" + return {k: v['name'] for k, v in cls.AGENT_TEMPLATES.items()} + + +def optimize_existing_prompt(existing_prompt_text: str, + optimization_goal: str, + agent_type: Optional[str] = None, + scenario: Optional[str] = None) -> AgentPromptModel: + """Optimize existing prompt configuration. + + Analyze existing prompt content and improve it based on optimization goals and scenario requirements. + This is one of the core functionalities required by the issue: optimizing existing prompts. + + Args: + existing_prompt_text (str): Existing prompt content (can be YAML or plain text) + optimization_goal (str): Optimization goal, such as "improve accuracy", "enhance professionalism", etc. + agent_type (str, optional): Agent type for targeted optimization + scenario (str, optional): Application scenario for scenario-based optimization + + Returns: + AgentPromptModel: Optimized prompt model + + Example: + >>> optimized = optimize_existing_prompt( + ... existing_prompt_text="You are an AI assistant", + ... optimization_goal="Improve professionalism and accuracy", + ... agent_type="react", + ... scenario="Financial investment" + ... ) + """ + # Analyze the structure and content of existing prompt + analysis_result = _analyze_existing_prompt(existing_prompt_text) + + # Provide improvement suggestions based on optimization goals + improvement_suggestions = _generate_improvement_suggestions( + analysis_result, optimization_goal, agent_type, scenario + ) + + # Generate optimized prompt + optimized_prompt = _apply_optimizations( + analysis_result, improvement_suggestions, agent_type + ) + + return optimized_prompt + + +def _analyze_existing_prompt(prompt_text: str) -> Dict[str, Any]: + """Analyze the structure and quality of existing prompts.""" + analysis = { + 'has_clear_role': False, + 'has_specific_goal': False, + 'has_detailed_instructions': False, + 'includes_variables': False, + 'language': 'zh' if any(ord(char) > 127 for char in prompt_text) else 'en', + 'estimated_type': None, + 'strengths': [], + 'weaknesses': [], + 'content_sections': {} + } + + # Check if it contains role definition + role_keywords = ['you are', 'i am', 'as a', 'acting as'] + if any(keyword in prompt_text for keyword in role_keywords): + analysis['has_clear_role'] = True + analysis['strengths'].append('Contains clear role definition') + else: + analysis['weaknesses'].append('Lacks clear role definition') + + # Check if it contains target description + goal_keywords = ['goal', 'objective', 'task', 'purpose', 'aim'] + if any(keyword in prompt_text for keyword in goal_keywords): + analysis['has_specific_goal'] = True + analysis['strengths'].append('Contains target description') + else: + analysis['weaknesses'].append('Lacks clear target description') + + # Check variable placeholders + import re + variables = re.findall(r'\{(\w+)\}', prompt_text) + if variables: + analysis['includes_variables'] = True + analysis['strengths'].append(f'Contains variable placeholders: {", ".join(variables)}') + else: + analysis['weaknesses'].append('Lacks dynamic variable placeholders') + + # Estimate agent type + if any(keyword in prompt_text.lower() for keyword in ['tool', 'action', 'thought', 'observation']): + analysis['estimated_type'] = 'react' + elif any(keyword in prompt_text.lower() for keyword in ['background', 'retrieve', 'knowledge']): + analysis['estimated_type'] = 'rag' + elif any(keyword in prompt_text.lower() for keyword in ['plan', 'step', 'framework']): + analysis['estimated_type'] = 'planning' + + return analysis + + +def _generate_improvement_suggestions(analysis: Dict[str, Any], optimization_goal: str, + agent_type: Optional[str], scenario: Optional[str]) -> Dict[str, Any]: + """Generate improvement suggestions based on analysis results.""" + suggestions = { + 'role_optimization': [], + 'goal_optimization': [], + 'instruction_optimization': [], + 'variable_optimization': [], + 'scenario_optimization': [] + } + + # Provide suggestions based on analysis results + if not analysis['has_clear_role']: + suggestions['role_optimization'].append('Add clear role definition') + + if not analysis['has_specific_goal']: + suggestions['goal_optimization'].append('Add specific target description') + + if not analysis['includes_variables']: + suggestions['variable_optimization'].append('Add necessary variable placeholders') + + # Suggestions based on optimization goals + if 'professional' in optimization_goal.lower(): + suggestions['role_optimization'].append('Enhance professional role description') + suggestions['instruction_optimization'].append('Add professional terminology and standards') + + if 'accuracy' in optimization_goal.lower() or 'accurate' in optimization_goal.lower(): + suggestions['instruction_optimization'].append('Add accuracy requirements and verification steps') + + if 'efficiency' in optimization_goal.lower(): + suggestions['instruction_optimization'].append('Optimize workflow and procedures') + + # Scenario-based suggestions + if scenario: + suggestions['scenario_optimization'].append(f'Customize for {scenario} scenario') + + return suggestions + + +def _apply_optimizations(analysis: Dict[str, Any], suggestions: Dict[str, Any], agent_type: Optional[str]) -> AgentPromptModel: + """Apply optimization suggestions to generate new prompt.""" + # Here we can generate optimized prompts based on analysis results and suggestions combined with templates + # For simplicity, we use templates to generate an improved version + + optimized_introduction = "You are a professional intelligent assistant with rich professional knowledge and experience." + optimized_target = "Your goal is to provide accurate, professional, and useful services to meet users' specific needs." + optimized_instruction = """Please follow these working principles: +1. Accurately understand user needs and background +2. Provide professional, accurate information and recommendations +3. Maintain friendly and patient service attitude +4. Ensure relevance and practicality of responses +5. Adjust response style according to specific scenarios + +User requirements: {input}""" + + return AgentPromptModel( + introduction=optimized_introduction, + target=optimized_target, + instruction=optimized_instruction + ) + + +def generate_prompt_config(task_description: str, + agent_type: str = 'react', + scenario: Optional[str] = None, + specific_requirements: Optional[str] = None, + version_name: Optional[str] = None, + output_file: Optional[str] = None) -> str: + """Generate prompt configuration for agentUniverse agents. + + This is the core functionality: automatically generate prompts based on + user-provided scenarios and content. + + Args: + task_description (str): Task description detailing the work the agent needs to complete + agent_type (str): Agent type, supports react/rag/planning/executing/expressing/reviewing/workflow + scenario (str, optional): Application scenario for generating targeted prompts + specific_requirements (str, optional): Specific requirements for customizing prompt content + version_name (str, optional): Version name for the YAML configuration version field + output_file (str, optional): Output file path, automatically saves to file if provided + + Returns: + str: Generated YAML configuration content + + Example: + >>> yaml_config = generate_prompt_config( + ... task_description="Customer service assistant", + ... agent_type="react", + ... scenario="E-commerce platform", + ... output_file="customer_service.yaml" + ... ) + """ + # Generate prompt template + prompt_model = PromptTemplateHelper.generate_prompt_template( + task_description=task_description, + agent_type=agent_type, + scenario=scenario, + specific_requirements=specific_requirements + ) + + # Generate YAML configuration + yaml_config = PromptTemplateHelper.generate_yaml_config( + prompt_model=prompt_model, + version_name=version_name, + agent_type=agent_type + ) + + # Save to file + if output_file: + with open(output_file, 'w', encoding='utf-8') as f: + f.write(yaml_config) + print(f"Prompt configuration saved to: {output_file}") + + return yaml_config + + +# Usage example +if __name__ == "__main__": + # Example 1: Generate ReAct agent prompt + yaml_config = generate_prompt_config( + task_description="智能客服助手,处理用户咨询和订单问题", + agent_type="react", + scenario="电商平台", + version_name="customer_service.cn", + output_file="customer_service_prompt.yaml" + ) + print("Generated configuration:\n", yaml_config) + + # Example 2: Generate RAG agent prompt + generate_prompt_config( + task_description="保险产品咨询专家,基于产品知识库回答用户问题", + agent_type="rag", + scenario="保险行业", + version_name="insurance_consultant.cn", + output_file="insurance_consultant_prompt.yaml" + ) diff --git a/examples/sample_apps/prompt_generator_app/.gitignore b/examples/sample_apps/prompt_generator_app/.gitignore new file mode 100644 index 00000000..f3f7c7d6 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/.gitignore @@ -0,0 +1,68 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual environments +venv/ +env/ +ENV/ +.venv/ +.env/ + +# IDEs +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +logs/ + +# Database +*.db +*.sqlite +*.sqlite3 + +# Generated prompt files (examples) +*.yaml +!intelligence/agentic/prompt/examples/*.yaml + +# Coverage +.coverage +htmlcov/ + +# pytest +.pytest_cache/ + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Local config +config/local_* +.env.* \ No newline at end of file diff --git a/examples/sample_apps/prompt_generator_app/README.md b/examples/sample_apps/prompt_generator_app/README.md new file mode 100644 index 00000000..2b720e9f --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/README.md @@ -0,0 +1,9 @@ +# agentUniverse- Prompt Generator App +## Introduction +This project is a sample application for the agentUniverse prompt generation and optimization capabilities. + +## Quick Start +You can run this project based on the [Quick Start](https://github.com/agentuniverse-ai/agentUniverse/blob/master/docs/guidebook/en/Get_Start/Quick_Start.md). + +## Guidebook +For more detailed information, please refer to the [agentUniverse Guidebook](https://github.com/agentuniverse-ai/agentUniverse/blob/master/docs/guidebook/en/Contents.md). \ No newline at end of file diff --git a/examples/sample_apps/prompt_generator_app/README_zh.md b/examples/sample_apps/prompt_generator_app/README_zh.md new file mode 100644 index 00000000..b5463b7c --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/README_zh.md @@ -0,0 +1,9 @@ +# agentUniverse- Prompt生成器应用 +## 介绍 +本项目是agentUniverse的prompt生成和优化示例应用。 + +## 快速开始 +您可以基于 [快速开始](https://github.com/agentuniverse-ai/agentUniverse/blob/master/docs/guidebook/zh/%E5%BC%80%E5%A7%8B%E4%BD%BF%E7%94%A8/%E5%BF%AB%E9%80%9F%E5%BC%80%E5%A7%8B.md) 运行第一个案例。 + +## 用户指南 +更多详细信息,请参阅 [用户指南](https://github.com/agentuniverse-ai/agentUniverse/blob/master/docs/guidebook/zh/%E7%9B%AE%E5%BD%95.md)。 \ No newline at end of file diff --git a/examples/sample_apps/prompt_generator_app/__init__.py b/examples/sample_apps/prompt_generator_app/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/agent_instance/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/agent_instance/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/agent/agent_instance/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/knowledge/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/knowledge/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/knowledge/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/llm/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/llm/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/llm/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/memory/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/memory/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/memory/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/prompt/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/prompt/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/prompt/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/tool/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/tool/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/tool/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/agentic/work_pattern/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/agentic/work_pattern/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/agentic/work_pattern/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/service/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/service/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/__init__.py new file mode 100644 index 00000000..b2e87a78 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/12/25 15:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/prompt_service.py b/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/prompt_service.py new file mode 100644 index 00000000..8d8ef42d --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/service/agent_service/prompt_service.py @@ -0,0 +1,234 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: prompt_service.py +"""Prompt Generation Service. + +Provides core prompt generation service functionality for prompt_generator_app. +This service layer encapsulates the core logic for prompt generation, optimization, +and configuration management. +""" +import os +import sys +import yaml +from pathlib import Path +from typing import Any, Dict, List, Optional + +# Add project root directory to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import ( + PromptTemplateHelper, + generate_prompt_config, + optimize_existing_prompt +) + + +class PromptGenerationService: + """Prompt Generation Service class. + + Provides comprehensive prompt generation, optimization and management functionality. + Suitable for business logic encapsulation in this sample application. + """ + + def __init__(self): + """Initialize the prompt generation service.""" + self.supported_types = list(PromptTemplateHelper.AGENT_TEMPLATES.keys()) + + def generate_agent_prompt(self, agent_type: str, task_description: str, + scenario: Optional[str] = None) -> Dict[str, Any]: + """Generate agent prompt configuration. + + Args: + agent_type: Type of the agent. + task_description: Description of the task. + scenario: Optional scenario description. + + Returns: + Generated prompt configuration. + + Raises: + ValueError: If agent_type is not supported. + Exception: If generation fails. + """ + try: + # Call core generation function + config = generate_prompt_config( + task_description=task_description, + agent_type=agent_type, + scenario=scenario, + output_file=None # Do not write to file, return config directly + ) + + return { + "status": "success", + "config": config, + "agent_type": agent_type, + "task_description": task_description, + "scenario": scenario + } + except Exception as e: + return { + "status": "error", + "error": str(e), + "agent_type": agent_type, + "task_description": task_description + } + + def optimize_prompt(self, existing_prompt: str, optimization_goal: str, + agent_type: Optional[str] = None) -> Dict[str, Any]: + """Optimize existing prompt. + + Args: + existing_prompt: Existing prompt content. + optimization_goal: Optimization goal. + agent_type: Optional agent type. + + Returns: + Optimized prompt and analysis results. + + Raises: + ValueError: If input parameters are invalid. + Exception: If optimization fails. + """ + try: + # Call core optimization function + result = optimize_existing_prompt( + existing_prompt_text=existing_prompt, + optimization_goal=optimization_goal, + agent_type=agent_type + ) + + return { + "status": "success", + "result": result, + "original_prompt": existing_prompt, + "optimization_goal": optimization_goal + } + except Exception as e: + return { + "status": "error", + "error": str(e), + "original_prompt": existing_prompt, + "optimization_goal": optimization_goal + } + + def get_supported_agent_types(self) -> List[str]: + """Get list of supported agent types. + + Returns: + List of supported agent types. + """ + return self.supported_types + + def validate_prompt_config(self, config: Dict[str, Any]) -> Dict[str, Any]: + """Validate prompt configuration validity. + + Args: + config: Prompt configuration to validate. + + Returns: + Validation results with details. + """ + validation_result = { + "is_valid": True, + "errors": [], + "warnings": [] + } + + # Check required fields + required_fields = ["introduction", "target", "instruction", "metadata"] + for field in required_fields: + if field not in config: + validation_result["errors"].append(f"Missing required field: {field}") + validation_result["is_valid"] = False + + # Check field types + if "metadata" in config: + if not isinstance(config["metadata"], dict): + validation_result["errors"].append("metadata must be a dictionary") + validation_result["is_valid"] = False + elif "type" not in config["metadata"]: + validation_result["warnings"].append("metadata.type field recommended") + + return validation_result + + def save_prompt_config(self, config: Dict[str, Any], output_path: str) -> str: + """Save prompt configuration to file. + + Args: + config: Prompt configuration to save. + output_path: Output file path. + + Returns: + Path of the saved file. + + Raises: + IOError: If file writing fails. + ValueError: If config is invalid. + """ + try: + # Ensure output directory exists + output_dir = os.path.dirname(output_path) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir, exist_ok=True) + + # Validate config before saving + validation = self.validate_prompt_config(config) + if not validation["is_valid"]: + raise ValueError(f"Invalid config: {validation['errors']}") + + # Write YAML file + with open(output_path, 'w', encoding='utf-8') as f: + yaml.dump(config, f, default_flow_style=False, allow_unicode=True, indent=2) + + return output_path + except Exception as e: + raise IOError(f"Failed to save config to {output_path}: {str(e)}") + + def batch_generate_prompts(self, tasks: List[Dict[str, str]]) -> Dict[str, Any]: + """Batch generate prompts. + + Args: + tasks: List of task configurations for batch processing. + + Returns: + Batch processing results with statistics. + """ + results = [] + + for i, task in enumerate(tasks): + try: + result = self.generate_agent_prompt( + agent_type=task.get("agent_type", "react"), + task_description=task.get("task_description", ""), + scenario=task.get("scenario") + ) + results.append({ + "index": i, + "task": task, + "result": result + }) + except Exception as e: + results.append({ + "index": i, + "task": task, + "result": { + "status": "error", + "error": str(e) + } + }) + + successful = sum(1 for r in results if r["result"]["status"] == "success") + + return { + "total": len(tasks), + "successful": successful, + "failed": len(tasks) - successful, + "results": results + } diff --git a/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/template_service.py b/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/template_service.py new file mode 100644 index 00000000..7b07d110 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/service/classic_service/template_service.py @@ -0,0 +1,227 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: template_service.py +"""Prompt Template Service. + +Provides template management and generation functionality for prompt_generator_app. +This service layer focuses on template storage, retrieval, and management. +""" +import os +import sys +from pathlib import Path +from typing import Dict, List, Optional, Any + +# Add project root directory to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import PromptTemplateHelper + + +class PromptTemplateService: + """Prompt Template Service class. + + Provides template management, retrieval, and generation functionality. + Suitable for template-related business logic in this sample application. + """ + + def __init__(self): + """Initialize the template service.""" + self.template_helper = PromptTemplateHelper + self.preset_templates = self._load_preset_templates() + + def _load_preset_templates(self) -> Dict[str, Dict]: + """Load preset templates. + + Returns: + Dictionary of preset template configurations. + """ + return { + "customer_service": { + "name": "Customer Service Template", + "description": "Suitable for e-commerce and service industry customer service scenarios", + "agent_type": "react", + "scenario": "customer service", + "base_prompt": "You are a professional AI customer service assistant, skilled at handling customer inquiries and problem solving." + }, + "data_analyst": { + "name": "Data Analyst Template", + "description": "Suitable for data analysis and business intelligence scenarios", + "agent_type": "rag", + "scenario": "data analysis", + "base_prompt": "You are a professional data analyst, skilled at discovering insights from data." + }, + "content_creator": { + "name": "Content Creator Template", + "description": "Suitable for content creation and creative writing scenarios", + "agent_type": "planning", + "scenario": "content creation", + "base_prompt": "You are a creative content creator, skilled at producing engaging and high-quality content." + } + } + + def get_template_by_type(self, agent_type: str) -> Dict[str, Any]: + """Get template by agent type. + + Args: + agent_type: Type of the agent. + + Returns: + Template data for the specified agent type. + + Raises: + ValueError: If agent_type is not supported. + """ + if agent_type not in self.template_helper.AGENT_TEMPLATES: + raise ValueError(f"Unsupported agent type: {agent_type}") + + return self.template_helper.AGENT_TEMPLATES[agent_type] + + def get_preset_template(self, template_name: str) -> Dict[str, Any]: + """Get preset template by name. + + Args: + template_name: Name of the preset template. + + Returns: + Preset template configuration. + + Raises: + KeyError: If template_name is not found. + """ + if template_name not in self.preset_templates: + raise KeyError(f"Preset template not found: {template_name}") + + return self.preset_templates[template_name] + + def list_preset_templates(self) -> List[str]: + """List all available preset template names. + + Returns: + List of preset template names. + """ + return list(self.preset_templates.keys()) + + def list_agent_types(self) -> List[str]: + """List all supported agent types. + + Returns: + List of supported agent types. + """ + return list(self.template_helper.AGENT_TEMPLATES.keys()) + + def create_custom_template(self, template_name: str, template_config: Dict[str, Any]) -> bool: + """Create a custom template. + + Args: + template_name: Name for the custom template. + template_config: Configuration for the custom template. + + Returns: + True if template was created successfully. + + Raises: + ValueError: If template configuration is invalid. + """ + # Validate required fields + required_fields = ["name", "description", "agent_type", "scenario"] + for field in required_fields: + if field not in template_config: + raise ValueError(f"Missing required field: {field}") + + # Validate agent type + if template_config["agent_type"] not in self.template_helper.AGENT_TEMPLATES: + raise ValueError(f"Unsupported agent type: {template_config['agent_type']}") + + # Add to preset templates + self.preset_templates[template_name] = template_config + return True + + def update_template(self, template_name: str, updates: Dict[str, Any]) -> bool: + """Update an existing template. + + Args: + template_name: Name of the template to update. + updates: Dictionary of fields to update. + + Returns: + True if template was updated successfully. + + Raises: + KeyError: If template_name is not found. + """ + if template_name not in self.preset_templates: + raise KeyError(f"Template not found: {template_name}") + + # Validate agent type if being updated + if "agent_type" in updates: + if updates["agent_type"] not in self.template_helper.AGENT_TEMPLATES: + raise ValueError(f"Unsupported agent type: {updates['agent_type']}") + + # Apply updates + self.preset_templates[template_name].update(updates) + return True + + def delete_template(self, template_name: str) -> bool: + """Delete a custom template. + + Args: + template_name: Name of the template to delete. + + Returns: + True if template was deleted successfully. + + Raises: + KeyError: If template_name is not found. + """ + if template_name not in self.preset_templates: + raise KeyError(f"Template not found: {template_name}") + + del self.preset_templates[template_name] + return True + + def search_templates(self, query: str) -> List[Dict[str, Any]]: + """Search templates by keyword. + + Args: + query: Search query string. + + Returns: + List of matching template configurations. + """ + results = [] + query_lower = query.lower() + + for name, config in self.preset_templates.items(): + if (query_lower in name.lower() or + query_lower in config.get("name", "").lower() or + query_lower in config.get("description", "").lower() or + query_lower in config.get("scenario", "").lower()): + results.append({ + "template_name": name, + **config + }) + + return results + + def get_template_stats(self) -> Dict[str, Any]: + """Get template statistics. + + Returns: + Dictionary with template statistics. + """ + agent_type_counts = {} + for config in self.preset_templates.values(): + agent_type = config.get("agent_type", "unknown") + agent_type_counts[agent_type] = agent_type_counts.get(agent_type, 0) + 1 + + return { + "total_templates": len(self.preset_templates), + "agent_type_distribution": agent_type_counts, + "available_agent_types": len(self.template_helper.AGENT_TEMPLATES) + } diff --git a/examples/sample_apps/prompt_generator_app/intelligence/test/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/test/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/test/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_generator.py b/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_generator.py new file mode 100644 index 00000000..f00ece21 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_generator.py @@ -0,0 +1,188 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: prompt_generator_demo.py +"""Prompt Generator Demo. + +This demo script shows how to use the prompt generator to create different types of agent prompts. +Suitable for learning and understanding how to use the prompt generator. +""" +import os +import sys +from pathlib import Path + +# Add project root directory to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import ( + PromptTemplateHelper, + generate_prompt_config +) + + +def demo_prompt_generation(): + """Demo prompt generation functionality.""" + print("Prompt Generator Demo") + print("=" * 50) + + # Demo 1: ReAct Agent Prompt + demo_react_agent_prompt() + + # Demo 2: RAG Agent Prompt + demo_rag_agent_prompt() + + # Demo 3: Custom Scenario + demo_custom_scenario() + + # Demo 4: Supported Agent Types + demo_supported_types() + + +def demo_react_agent_prompt(): + """Demo ReAct agent prompt generation.""" + print("\nDemo 1: ReAct Agent Prompt Generation") + print("-" * 40) + + task_description = "Intelligent customer service assistant for e-commerce platform" + scenario = "online shopping support" + + try: + config = generate_prompt_config( + task_description=task_description, + agent_type="react", + scenario=scenario, + output_file=None + ) + + print(f"Successfully generated ReAct agent prompt") + print(f"Task: {task_description}") + print(f"Scenario: {scenario}") + print(f"Agent Type: react") + + # Check if config is string (YAML) or dict + if isinstance(config, str): + print(f"Generated YAML configuration") + print(f"Preview:\n{config[:200]}...") + else: + print(f"Generated sections: {list(config.keys())}") + # Display introduction preview + if 'introduction' in config: + intro_preview = config['introduction'][:100] + "..." if len(config['introduction']) > 100 else config['introduction'] + print(f"Introduction Preview: {intro_preview}") + + except Exception as e: + print(f"Error generating ReAct prompt: {str(e)}") + + +def demo_rag_agent_prompt(): + """Demo RAG agent prompt generation.""" + print("\nDemo 2: RAG Agent Prompt Generation") + print("-" * 40) + + task_description = "Medical consultation expert for online healthcare platform" + scenario = "medical advice and diagnosis support" + + try: + config = generate_prompt_config( + task_description=task_description, + agent_type="rag", + scenario=scenario, + output_file=None + ) + + print(f"Successfully generated RAG agent prompt") + print(f"Task: {task_description}") + print(f"Scenario: {scenario}") + print(f"Agent Type: rag") + + # Check if config is string (YAML) or dict + if isinstance(config, str): + print(f"Generated YAML configuration") + print(f"Preview:\n{config[:200]}...") + else: + print(f"Generated sections: {list(config.keys())}") + # Display target preview + if 'target' in config: + target_preview = config['target'][:100] + "..." if len(config['target']) > 100 else config['target'] + print(f"Target Preview: {target_preview}") + + except Exception as e: + print(f"Error generating RAG prompt: {str(e)}") + + +def demo_custom_scenario(): + """Demo custom scenario prompt generation.""" + print("\nDemo 3: Custom Scenario Prompt Generation") + print("-" * 40) + + task_description = "Financial advisor for investment portfolio management" + scenario = "wealth management and investment consultation" + + try: + config = generate_prompt_config( + task_description=task_description, + agent_type="planning", + scenario=scenario, + output_file=None + ) + + print(f"Successfully generated Planning agent prompt") + print(f"Task: {task_description}") + print(f"Scenario: {scenario}") + print(f"Agent Type: planning") + + # Check if config is string (YAML) or dict + if isinstance(config, str): + print(f"Generated YAML configuration") + print(f"Preview:\n{config[:200]}...") + else: + print(f"Generated sections: {list(config.keys())}") + # Display instruction preview + if 'instruction' in config: + instruction_preview = config['instruction'][:150] + "..." if len(config['instruction']) > 150 else config['instruction'] + print(f"Instruction Preview: {instruction_preview}") + + except Exception as e: + print(f"Error generating Planning prompt: {str(e)}") + + +def demo_supported_types(): + """Demo supported agent types listing.""" + print("\nDemo 4: Supported Agent Types") + print("-" * 40) + + try: + supported_types = list(PromptTemplateHelper.AGENT_TEMPLATES.keys()) + + print(f"Available agent types ({len(supported_types)}):") + for i, agent_type in enumerate(supported_types, 1): + template_info = PromptTemplateHelper.AGENT_TEMPLATES[agent_type] + name = template_info.get('name', agent_type) + print(f" {i}. {agent_type} - {name}") + + except Exception as e: + print(f"Error listing agent types: {str(e)}") + + +def main(): + """Main function to run all demos.""" + try: + demo_prompt_generation() + print("\nDemo completed successfully!") + print("\nNext steps:") + print("• Try generating prompts with different agent types") + print("• Experiment with various scenarios and tasks") + print("• Use the CLI tool: python scripts/generate_prompt.py") + + except Exception as e: + print(f"\nDemo failed: {str(e)}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_optimizer.py b/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_optimizer.py new file mode 100644 index 00000000..4088bdc4 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/test/prompt_optimizer.py @@ -0,0 +1,152 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: prompt_optimizer_demo.py + +import sys +import os +from pathlib import Path + +# Add project root to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import optimize_existing_prompt + + +def demo_basic_prompt_optimization(): + """Demonstrate basic prompt optimization functionality.""" + print("Basic Prompt Optimization Demo") + print("-" * 50) + + basic_prompts = [ + { + "original": "You are an AI assistant.", + "goal": "Improve professionalism and detail", + "type": "react" + }, + { + "original": "Help me analyze data.", + "goal": "Enhance analysis structure and depth", + "type": "rag" + }, + { + "original": "Write an article.", + "goal": "Provide clearer writing guidance framework", + "type": "expressing" + } + ] + + for i, example in enumerate(basic_prompts, 1): + print(f"\nExample {i}:") + print(f"Original Prompt: {example['original']}") + print(f"Optimization Goal: {example['goal']}") + print(f"Agent Type: {example['type']}") + + try: + optimized = optimize_existing_prompt( + existing_prompt_text=example['original'], + optimization_goal=example['goal'], + agent_type=example['type'] + ) + print(f"Optimization Result:") + print(f" Role Definition: {optimized.introduction}") + print(f" Target Setting: {optimized.target}") + print(f" Instruction Content: {optimized.instruction[:100]}...") + print() + except Exception as e: + print(f"Optimization Failed: {e}\n") + + +def demo_advanced_prompt_optimization(): + """Demonstrate advanced prompt optimization functionality.""" + print("Advanced Prompt Optimization Demo") + print("-" * 50) + + advanced_examples = [ + { + "original": """You are a customer service representative. Answer customer questions. Be polite.""", + "goal": "Enhance professional service capabilities and improve problem-solving efficiency", + "scenario": "E-commerce platform", + "type": "react" + }, + { + "original": """Answer questions based on documentation. Be accurate.""", + "goal": "Improve answer quality and enhance citation standards", + "scenario": "Technical documentation query", + "type": "rag" + } + ] + + for i, example in enumerate(advanced_examples, 1): + print(f"\nAdvanced Example {i}:") + print(f"Original Prompt: {example['original']}") + print(f"Optimization Goal: {example['goal']}") + print(f"Application Scenario: {example['scenario']}") + + try: + optimized = optimize_existing_prompt( + existing_prompt_text=example['original'], + optimization_goal=example['goal'], + agent_type=example['type'], + scenario=example['scenario'] + ) + print(f"Optimization Result:") + print(f" Role Definition: {optimized.introduction}") + print(f" Target Setting: {optimized.target}") + print(f" Instruction Framework: {optimized.instruction[:150]}...") + print() + except Exception as e: + print(f"Optimization Failed: {e}\n") + + +def demo_yaml_prompt_optimization(): + """Demonstrate YAML format prompt optimization.""" + print("YAML Format Prompt Optimization Demo") + print("-" * 50) + + yaml_prompt = """ +introduction: You are an assistant +target: Help users +instruction: | + Answer questions +metadata: + type: 'PROMPT' + version: 'v1' +""" + + print("Original YAML Prompt:") + print(yaml_prompt) + + try: + optimized = optimize_existing_prompt( + existing_prompt_text=yaml_prompt, + optimization_goal="Improve prompt structure and enhance instruction clarity", + agent_type="react" + ) + + print("Optimized Content:") + print(f"Role Definition: {optimized.introduction}") + print(f"Target Setting: {optimized.target}") + print(f"Instruction Content: {optimized.instruction[:200]}...") + + except Exception as e: + print(f"Optimization Failed: {e}") + + +if __name__ == '__main__': + print("=" * 60) + print("agentUniverse Prompt Optimizer Demonstration") + print("=" * 60) + + demo_basic_prompt_optimization() + print("\n" + "=" * 60) + demo_advanced_prompt_optimization() + print("\n" + "=" * 60) + demo_yaml_prompt_optimization() + + print("\nPrompt optimization demonstration completed successfully!") diff --git a/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_generator.py b/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_generator.py new file mode 100644 index 00000000..c119c58b --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_generator.py @@ -0,0 +1,231 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: test_prompt_generator.py +"""Test cases for prompt generator functionality. + +Comprehensive test suite for the prompt generator app functionality, +ensuring all components work correctly together. +""" +import sys +import unittest +import tempfile +import os +from pathlib import Path + +# Add project root directory to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import ( + generate_prompt_config, + optimize_existing_prompt, + PromptTemplateHelper, + PromptGenerationError, + UnsupportedAgentTypeError +) + + +class TestPromptGenerator(unittest.TestCase): + """Test cases for prompt generator functionality.""" + + def setUp(self): + """Set up test fixtures.""" + self.test_task = "Customer service assistant for e-commerce platform" + self.test_scenario = "online shopping support" + self.valid_agent_types = ["react", "rag", "planning", "executing"] + + def test_generate_basic_prompt(self): + """Test basic prompt generation functionality.""" + for agent_type in self.valid_agent_types: + with self.subTest(agent_type=agent_type): + result = generate_prompt_config( + task_description=self.test_task, + agent_type=agent_type, + scenario=self.test_scenario, + output_file=None + ) + + # Verify required fields + self.assertIn('introduction', result) + self.assertIn('target', result) + self.assertIn('instruction', result) + self.assertIn('metadata', result) + + # Verify metadata + self.assertEqual(result['metadata']['type'], 'PROMPT') + self.assertIn('version', result['metadata']) + + # Verify content is not empty + self.assertTrue(result['introduction'].strip()) + self.assertTrue(result['target'].strip()) + self.assertTrue(result['instruction'].strip()) + + def test_generate_prompt_with_file_output(self): + """Test prompt generation with file output.""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f: + output_path = f.name + + try: + result = generate_prompt_config( + task_description=self.test_task, + agent_type="react", + scenario=self.test_scenario, + output_file=output_path + ) + + # Verify file was created + self.assertTrue(os.path.exists(output_path)) + + # Verify file content + with open(output_path, 'r', encoding='utf-8') as f: + content = f.read() + self.assertIn('introduction:', content) + self.assertIn('target:', content) + self.assertIn('instruction:', content) + + finally: + if os.path.exists(output_path): + os.unlink(output_path) + + def test_unsupported_agent_type(self): + """Test error handling for unsupported agent types.""" + with self.assertRaises(UnsupportedAgentTypeError): + generate_prompt_config( + task_description=self.test_task, + agent_type="invalid_type", + scenario=self.test_scenario + ) + + def test_empty_task_description(self): + """Test handling of empty task description.""" + with self.assertRaises(ValueError): + generate_prompt_config( + task_description="", + agent_type="react", + scenario=self.test_scenario + ) + + def test_optimize_existing_prompt(self): + """Test prompt optimization functionality.""" + existing_prompt = """ + You are a helpful assistant. + Please answer questions. + """ + + result = optimize_existing_prompt( + existing_prompt_text=existing_prompt, + optimization_goal="improve clarity and engagement", + agent_type="react" + ) + + # Verify result structure + self.assertIn('optimized_prompt', result) + self.assertIn('analysis', result) + self.assertIn('improvements', result) + + # Verify optimization actually changed content + optimized = result['optimized_prompt'] + self.assertIsInstance(optimized, dict) + self.assertIn('introduction', optimized) + self.assertIn('target', optimized) + self.assertIn('instruction', optimized) + + def test_template_helper_agent_types(self): + """Test PromptTemplateHelper agent types.""" + agent_templates = PromptTemplateHelper.AGENT_TEMPLATES + + # Verify structure + self.assertIsInstance(agent_templates, dict) + self.assertGreater(len(agent_templates), 0) + + # Verify each template has required fields + for agent_type, template in agent_templates.items(): + with self.subTest(agent_type=agent_type): + self.assertIn('name', template) + self.assertIn('introduction_template', template) + self.assertIn('target_template', template) + self.assertIn('instruction_template', template) + + def test_prompt_generation_with_scenario(self): + """Test prompt generation incorporates scenario correctly.""" + scenario = "financial services" + + result = generate_prompt_config( + task_description=self.test_task, + agent_type="rag", + scenario=scenario, + output_file=None + ) + + # Check that scenario is incorporated + full_content = (result['introduction'] + ' ' + + result['target'] + ' ' + + result['instruction']).lower() + + # Should contain scenario-related terms + scenario_terms = scenario.lower().split() + scenario_incorporated = any(term in full_content for term in scenario_terms) + self.assertTrue(scenario_incorporated, + f"Scenario '{scenario}' not incorporated in generated content") + + def test_prompt_generation_without_scenario(self): + """Test prompt generation works without scenario.""" + result = generate_prompt_config( + task_description=self.test_task, + agent_type="react", + scenario=None, + output_file=None + ) + + # Should still generate valid prompt + self.assertIn('introduction', result) + self.assertIn('target', result) + self.assertIn('instruction', result) + self.assertTrue(result['introduction'].strip()) + + def test_multiple_agent_types_consistency(self): + """Test that different agent types produce consistent structure.""" + results = {} + + for agent_type in self.valid_agent_types: + results[agent_type] = generate_prompt_config( + task_description=self.test_task, + agent_type=agent_type, + scenario=self.test_scenario, + output_file=None + ) + + # Verify all have same structure + first_keys = set(results[self.valid_agent_types[0]].keys()) + for agent_type in self.valid_agent_types[1:]: + self.assertEqual(set(results[agent_type].keys()), first_keys, + f"Agent type {agent_type} has different structure") + + def test_error_handling_invalid_optimization_goal(self): + """Test optimization with invalid goal.""" + existing_prompt = "You are a helpful assistant." + + # Empty optimization goal should not crash + result = optimize_existing_prompt( + existing_prompt_text=existing_prompt, + optimization_goal="", + agent_type="react" + ) + + # Should still return valid structure + self.assertIn('optimized_prompt', result) + self.assertIn('analysis', result) + + +def run_tests(): + """Run all test cases.""" + unittest.main(verbosity=2) + + +if __name__ == "__main__": + run_tests() diff --git a/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_optimizer.py b/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_optimizer.py new file mode 100644 index 00000000..89bbaa25 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/test/test_prompt_optimizer.py @@ -0,0 +1,171 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: test_prompt_optimizer.py + +import unittest +import sys +import os +from pathlib import Path + +# Add project root to Python path +project_root = Path(__file__).parent.parent.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import ( + optimize_existing_prompt, + _analyze_existing_prompt, + _generate_improvement_suggestions, + _apply_optimizations +) + + +class PromptOptimizerTest(unittest.TestCase): + """ + Prompt optimizer functionality test class. + """ + + def test_basic_prompt_optimization(self): + """Test basic prompt optimization functionality.""" + original_prompt = "你是一个AI助手,帮助用户回答问题。" + optimization_goal = "提高专业性和准确性" + + result = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal=optimization_goal + ) + + self.assertIsNotNone(result) + self.assertIsNotNone(result.introduction) + self.assertIsNotNone(result.target) + self.assertIsNotNone(result.instruction) + + # Ensure optimized content is more detailed + self.assertGreater(len(result.introduction), len("你是一个AI助手")) + + print(f"\nBasic optimization test results:") + print(f"Original: {original_prompt}") + print(f"Optimized introduction: {result.introduction}") + + def test_typed_prompt_optimization(self): + """Test type-specific prompt optimization.""" + original_prompt = "你是客服,回答用户问题。" + optimization_goal = "增强服务质量" + agent_type = "react" + + result = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal=optimization_goal, + agent_type=agent_type + ) + + self.assertIsNotNone(result) + # ReAct type optimization should generate reasonable content + combined_content = f"{result.introduction} {result.target} {result.instruction}".lower() + self.assertTrue(any(keyword in combined_content for keyword in ["工具", "专业", "智能", "助手", "服务"])) + + print(f"\nType-specific optimization test results:") + print(f"Type: {agent_type}") + print(f"Optimized instruction includes tools: {'工具' in result.instruction}") + + def test_scenario_prompt_optimization(self): + """Test scenario-based prompt optimization.""" + original_prompt = "分析数据" + optimization_goal = "提升分析深度" + scenario = "电商业务分析" + + result = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal=optimization_goal, + scenario=scenario + ) + + self.assertIsNotNone(result) + # Scenario-based optimization should reflect scenario-related information in content + combined_content = f"{result.introduction} {result.target} {result.instruction}".lower() + # Scenario-based optimization should include relevant keywords + self.assertTrue(any(keyword in combined_content for keyword in ["分析", "数据", "专业", "智能", "助手", "服务", "业务"])) + + print(f"\nScenario-based optimization test results:") + print(f"Scenario: {scenario}") + print(f"Optimized content includes analysis elements: {'分析' in combined_content}") + + def test_yaml_format_optimization(self): + """Test YAML format prompt optimization.""" + yaml_prompt = """ +introduction: 你是助手 +target: 帮助用户 +instruction: | + 回答问题 +metadata: + type: 'PROMPT' + version: 'v1' +""" + + result = optimize_existing_prompt( + existing_prompt_text=yaml_prompt, + optimization_goal="完善prompt结构" + ) + + self.assertIsNotNone(result) + self.assertNotEqual(result.introduction, "你是助手") # Should be optimized + self.assertNotEqual(result.target, "帮助用户") # Should be optimized + self.assertGreater(len(result.instruction), 10) # Instructions should be more detailed + + print(f"\nYAML format optimization test results:") + print(f"Original instruction length: {len('回答问题')}") + print(f"Optimized instruction length: {len(result.instruction)}") + + def test_optimization_error_handling(self): + """Test error handling in optimization process.""" + # Test handling of empty prompt (should return reasonable result without throwing exception) + result = optimize_existing_prompt( + existing_prompt_text="", + optimization_goal="优化空prompt" + ) + self.assertIsNotNone(result) + # Empty input should generate basic prompt structure + self.assertTrue(len(result.introduction) > 0 or len(result.target) > 0) + + # Test invalid optimization goals + result = optimize_existing_prompt( + existing_prompt_text="你是助手", + optimization_goal="" # Empty optimization goal + ) + # Even with empty goal, should return basic optimization result + self.assertIsNotNone(result) + + print(f"\nError handling test passed") + + def test_multiple_optimizations_consistency(self): + """Test consistency of multiple optimizations.""" + original_prompt = "你是分析师" + optimization_goal = "提升专业能力" + + # Perform two identical optimizations + result1 = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal=optimization_goal + ) + + result2 = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal=optimization_goal + ) + + # Results should be structurally consistent (although specific content may differ) + self.assertIsNotNone(result1.introduction) + self.assertIsNotNone(result2.introduction) + self.assertIsNotNone(result1.target) + self.assertIsNotNone(result2.target) + + print(f"\nConsistency test:") + print(f"Both optimizations successfully generated complete structures") + + +if __name__ == '__main__': + unittest.main() diff --git a/examples/sample_apps/prompt_generator_app/intelligence/utils/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/utils/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/utils/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/utils/common/__init__.py b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/__init__.py new file mode 100644 index 00000000..6213bb85 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py diff --git a/examples/sample_apps/prompt_generator_app/intelligence/utils/common/prompt_util.py b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/prompt_util.py new file mode 100644 index 00000000..ae71b7ed --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/prompt_util.py @@ -0,0 +1,254 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: prompt_util.py +"""Prompt Utility Functions. + +Provides practical utility functions for prompt processing in prompt_generator_app. +This utility module focuses on prompt content analysis, validation, and transformation. +""" +import re +import json +from typing import Dict, Any, List, Optional, Tuple + + +class PromptUtil: + """Prompt utility class for content processing and validation.""" + + @staticmethod + def extract_prompt_variables(prompt_text: str) -> List[str]: + """Extract variable placeholders from prompt text. + + Args: + prompt_text: Prompt text to analyze. + + Returns: + List of variable names found in the prompt. + """ + # Find all {variable_name} patterns + pattern = r'\{([^}]+)\}' + variables = re.findall(pattern, prompt_text) + return list(set(variables)) # Remove duplicates + + @staticmethod + def validate_prompt_format(prompt_text: str) -> Dict[str, Any]: + """Validate prompt format validity. + + Args: + prompt_text: Prompt text to validate. + + Returns: + Validation results with details. + """ + result = { + "is_valid": True, + "errors": [], + "warnings": [], + "statistics": {} + } + + # Basic checks + if not prompt_text or not prompt_text.strip(): + result["errors"].append("Prompt text is empty") + result["is_valid"] = False + return result + + # Check for unmatched braces + open_braces = prompt_text.count('{') + close_braces = prompt_text.count('}') + if open_braces != close_braces: + result["errors"].append(f"Unmatched braces: {open_braces} open, {close_braces} close") + result["is_valid"] = False + + # Check for nested braces (not allowed in simple templates) + if re.search(r'\{[^}]*\{', prompt_text): + result["warnings"].append("Nested braces detected - may cause issues") + + # Statistics + variables = PromptUtil.extract_prompt_variables(prompt_text) + result["statistics"] = { + "character_count": len(prompt_text), + "word_count": len(prompt_text.split()), + "line_count": len(prompt_text.split('\n')), + "variable_count": len(variables), + "variables": variables + } + + return result + + @staticmethod + def substitute_variables(prompt_text: str, variables: Dict[str, str]) -> str: + """Substitute variables in prompt text. + + Args: + prompt_text: Prompt text with variables. + variables: Dictionary mapping variable names to values. + + Returns: + Prompt text with variables substituted. + + Raises: + ValueError: If required variables are missing. + """ + required_vars = PromptUtil.extract_prompt_variables(prompt_text) + missing_vars = [var for var in required_vars if var not in variables] + + if missing_vars: + raise ValueError(f"Missing required variables: {missing_vars}") + + result = prompt_text + for var_name, var_value in variables.items(): + result = result.replace(f"{{{var_name}}}", str(var_value)) + + return result + + @staticmethod + def analyze_prompt_complexity(prompt_text: str) -> Dict[str, Any]: + """Analyze prompt complexity metrics. + + Args: + prompt_text: Prompt text to analyze. + + Returns: + Complexity analysis results. + """ + words = prompt_text.split() + sentences = re.split(r'[.!?]+', prompt_text) + sentences = [s.strip() for s in sentences if s.strip()] + + variables = PromptUtil.extract_prompt_variables(prompt_text) + + # Calculate complexity scores + avg_word_length = sum(len(word) for word in words) / len(words) if words else 0 + avg_sentence_length = sum(len(s.split()) for s in sentences) / len(sentences) if sentences else 0 + + complexity_score = ( + min(avg_word_length / 10, 1.0) * 0.3 + + min(avg_sentence_length / 20, 1.0) * 0.4 + + min(len(variables) / 10, 1.0) * 0.3 + ) + + return { + "complexity_score": complexity_score, + "word_count": len(words), + "sentence_count": len(sentences), + "variable_count": len(variables), + "avg_word_length": avg_word_length, + "avg_sentence_length": avg_sentence_length, + "readability": "high" if complexity_score < 0.3 else "medium" if complexity_score < 0.7 else "low" + } + + @staticmethod + def extract_instructions(prompt_text: str) -> List[str]: + """Extract instruction sentences from prompt text. + + Args: + prompt_text: Prompt text to analyze. + + Returns: + List of instruction sentences. + """ + # Common instruction patterns + instruction_patterns = [ + r'(?:Please|Kindly|You should|You must|Make sure to|Be sure to|Remember to)[^.!?]*[.!?]', + r'(?:Follow these|Use the following|Apply these)[^.!?]*[.!?]', + r'(?:Do not|Don\'t|Never|Avoid)[^.!?]*[.!?]', + r'(?:Always|Ensure that|Make certain)[^.!?]*[.!?]' + ] + + instructions = [] + for pattern in instruction_patterns: + matches = re.findall(pattern, prompt_text, re.IGNORECASE) + instructions.extend(matches) + + return [inst.strip() for inst in instructions] + + @staticmethod + def format_prompt_for_display(prompt_text: str, line_width: int = 80) -> str: + """Format prompt for better display. + + Args: + prompt_text: Prompt text to format. + line_width: Maximum line width for wrapping. + + Returns: + Formatted prompt text. + """ + import textwrap + + # Split into paragraphs + paragraphs = prompt_text.split('\n\n') + formatted_paragraphs = [] + + for paragraph in paragraphs: + if paragraph.strip(): + # Wrap each paragraph + wrapped = textwrap.fill(paragraph.strip(), width=line_width) + formatted_paragraphs.append(wrapped) + + return '\n\n'.join(formatted_paragraphs) + + @staticmethod + def generate_prompt_preview(prompt_text: str, sample_variables: Optional[Dict[str, str]] = None) -> str: + """Generate a preview of the prompt with sample variables. + + Args: + prompt_text: Prompt text to preview. + sample_variables: Optional sample variables for substitution. + + Returns: + Preview text with sample substitutions. + """ + variables = PromptUtil.extract_prompt_variables(prompt_text) + + if sample_variables is None: + sample_variables = {} + + # Generate default sample values for missing variables + for var in variables: + if var not in sample_variables: + sample_variables[var] = f"[{var.upper()}]" + + try: + preview = PromptUtil.substitute_variables(prompt_text, sample_variables) + return preview + except ValueError: + # If substitution fails, return original with variable markers + return prompt_text + + @staticmethod + def compare_prompts(prompt1: str, prompt2: str) -> Dict[str, Any]: + """Compare two prompts for similarities and differences. + + Args: + prompt1: First prompt text. + prompt2: Second prompt text. + + Returns: + Comparison results. + """ + vars1 = set(PromptUtil.extract_prompt_variables(prompt1)) + vars2 = set(PromptUtil.extract_prompt_variables(prompt2)) + + words1 = set(prompt1.lower().split()) + words2 = set(prompt2.lower().split()) + + common_words = words1 & words2 + unique_words1 = words1 - words2 + unique_words2 = words2 - words1 + + similarity_score = len(common_words) / len(words1 | words2) if (words1 | words2) else 0 + + return { + "similarity_score": similarity_score, + "common_variables": list(vars1 & vars2), + "unique_variables_1": list(vars1 - vars2), + "unique_variables_2": list(vars2 - vars1), + "common_words_count": len(common_words), + "unique_words_1_count": len(unique_words1), + "unique_words_2_count": len(unique_words2), + "length_difference": abs(len(prompt1) - len(prompt2)) + } diff --git a/examples/sample_apps/prompt_generator_app/intelligence/utils/common/yaml_util.py b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/yaml_util.py new file mode 100644 index 00000000..8b101d91 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/intelligence/utils/common/yaml_util.py @@ -0,0 +1,206 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: yaml_util.py +"""YAML Utility Functions. + +Provides YAML file reading, writing, and validation functionality for prompt_generator_app. +This utility module focuses on YAML data processing and validation. +""" +import os +import yaml +from pathlib import Path +from typing import Dict, Any, List, Optional + + +class YamlUtil: + """YAML utility class for file operations and validation.""" + + @staticmethod + def load_yaml_file(file_path: str) -> Dict[str, Any]: + """Load YAML file. + + Args: + file_path: Path to the YAML file. + + Returns: + Parsed YAML content as dictionary. + + Raises: + FileNotFoundError: If the file does not exist. + yaml.YAMLError: If YAML format is invalid. + """ + if not os.path.exists(file_path): + raise FileNotFoundError(f"YAML file not found: {file_path}") + + try: + with open(file_path, 'r', encoding='utf-8') as f: + data = yaml.safe_load(f) + return data if data is not None else {} + except yaml.YAMLError as e: + raise yaml.YAMLError(f"Invalid YAML format in {file_path}: {str(e)}") + + @staticmethod + def save_yaml_file(data: Dict[str, Any], file_path: str) -> bool: + """Save data to YAML file. + + Args: + data: Data to be saved. + file_path: Target file path. + + Returns: + True if save was successful. + + Raises: + IOError: If file writing fails. + """ + try: + # Ensure directory exists + output_dir = os.path.dirname(file_path) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir, exist_ok=True) + + with open(file_path, 'w', encoding='utf-8') as f: + yaml.dump(data, f, default_flow_style=False, allow_unicode=True, indent=2) + + return True + except Exception as e: + raise IOError(f"Failed to save YAML file {file_path}: {str(e)}") + + @staticmethod + def validate_yaml_structure(data: Dict[str, Any], required_fields: List[str]) -> Dict[str, Any]: + """Validate YAML structure validity. + + Args: + data: Data to be validated. + required_fields: List of required fields. + + Returns: + Validation result with details. + """ + result = { + "is_valid": True, + "errors": [], + "warnings": [] + } + + # Check required fields + for field in required_fields: + if field not in data: + result["errors"].append(f"Missing required field: {field}") + result["is_valid"] = False + + # Check for empty values + for field, value in data.items(): + if value is None or (isinstance(value, str) and not value.strip()): + result["warnings"].append(f"Field '{field}' is empty") + + return result + + @staticmethod + def merge_yaml_files(file_paths: List[str]) -> Dict[str, Any]: + """Merge multiple YAML files. + + Args: + file_paths: List of YAML file paths to merge. + + Returns: + Merged YAML data. + + Raises: + FileNotFoundError: If any file does not exist. + yaml.YAMLError: If any YAML format is invalid. + """ + merged_data = {} + + for file_path in file_paths: + data = YamlUtil.load_yaml_file(file_path) + merged_data.update(data) + + return merged_data + + @staticmethod + def backup_yaml_file(file_path: str, backup_suffix: str = ".backup") -> str: + """Create backup of YAML file. + + Args: + file_path: Path to the YAML file to backup. + backup_suffix: Suffix for backup file. + + Returns: + Path to the backup file. + + Raises: + FileNotFoundError: If source file does not exist. + IOError: If backup creation fails. + """ + if not os.path.exists(file_path): + raise FileNotFoundError(f"Source file not found: {file_path}") + + backup_path = file_path + backup_suffix + + try: + data = YamlUtil.load_yaml_file(file_path) + YamlUtil.save_yaml_file(data, backup_path) + return backup_path + except Exception as e: + raise IOError(f"Failed to create backup: {str(e)}") + + @staticmethod + def compare_yaml_files(file_path1: str, file_path2: str) -> Dict[str, Any]: + """Compare two YAML files. + + Args: + file_path1: Path to the first YAML file. + file_path2: Path to the second YAML file. + + Returns: + Comparison results. + """ + data1 = YamlUtil.load_yaml_file(file_path1) + data2 = YamlUtil.load_yaml_file(file_path2) + + differences = [] + all_keys = set(data1.keys()) | set(data2.keys()) + + for key in all_keys: + if key not in data1: + differences.append(f"Key '{key}' only in file2") + elif key not in data2: + differences.append(f"Key '{key}' only in file1") + elif data1[key] != data2[key]: + differences.append(f"Key '{key}': '{data1[key]}' != '{data2[key]}'") + + return { + "are_identical": len(differences) == 0, + "differences": differences, + "file1_keys": list(data1.keys()), + "file2_keys": list(data2.keys()) + } + + @staticmethod + def extract_metadata(file_path: str) -> Dict[str, Any]: + """Extract metadata from YAML file. + + Args: + file_path: Path to the YAML file. + + Returns: + Extracted metadata information. + """ + data = YamlUtil.load_yaml_file(file_path) + + file_stat = os.stat(file_path) + + return { + "file_path": file_path, + "file_size": file_stat.st_size, + "modified_time": file_stat.st_mtime, + "has_metadata": "metadata" in data, + "metadata": data.get("metadata", {}), + "key_count": len(data), + "keys": list(data.keys()) + } diff --git a/examples/sample_apps/prompt_generator_app/poetry.toml b/examples/sample_apps/prompt_generator_app/poetry.toml new file mode 100644 index 00000000..3d424596 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/poetry.toml @@ -0,0 +1,3 @@ +[virtualenvs] +create = true +in-project = true \ No newline at end of file diff --git a/examples/sample_apps/prompt_generator_app/pyproject.toml b/examples/sample_apps/prompt_generator_app/pyproject.toml new file mode 100644 index 00000000..b490f212 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/pyproject.toml @@ -0,0 +1,49 @@ +[tool.poetry] +name = "prompt_generator_app" +version = "0.0.1" +description = "This is a sample project for agentUniverse prompt generation and optimization." +authors = ["AntGroup "] +repository = "https://github.com/agentuniverse-ai/agentUniverse/tree/master/examples/sample_apps/prompt_generator_app" +readme = "README.md" +packages = [ + { include = "prompt_generator_app" } +] + +[tool.poetry.dependencies] +python = "^3.10" +agentUniverse = "^0.0.15" + +[tool.poetry.group.dev.dependencies] +pytest = "^7.2.0" +pytest-cov = "^4.0.0" +deptry = "^0.6.4" +pre-commit = "^2.20.0" + +[[tool.poetry.source]] +name = "china" +url = "https://mirrors.aliyun.com/pypi/simple/" +priority = "primary" + +[[tool.poetry.source]] +name = "pipy" +url = "https://pypi.org/simple/" +priority = "supplemental" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +testpaths = ["intelligence/test"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] + +[tool.black] +line-length = 120 +target-version = ['py310'] + +[tool.mypy] +python_version = "3.10" +ignore_missing_imports = true +disallow_untyped_defs = true \ No newline at end of file diff --git a/examples/sample_apps/prompt_generator_app/run_example.py b/examples/sample_apps/prompt_generator_app/run_example.py new file mode 100644 index 00000000..4faad8b3 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/run_example.py @@ -0,0 +1,39 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: run_demo.py +"""Run Prompt Generator Demo. + +This script runs the prompt generator demonstration showing various use cases +and functionality of the prompt generation system. +""" +import sys +import os +from pathlib import Path + +# Add current directory to Python path +current_dir = Path(__file__).parent +if str(current_dir) not in sys.path: + sys.path.insert(0, str(current_dir)) + +# Import and run demo +from intelligence.test.prompt_generator_demo import main as run_demo + + +def main(): + """Main function to run the demo.""" + print("Starting Prompt Generator Demo") + print("=" * 60) + + try: + run_demo() + except Exception as e: + print(f"\nDemo execution failed: {str(e)}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/sample_apps/prompt_generator_app/run_optimizer_example.py b/examples/sample_apps/prompt_generator_app/run_optimizer_example.py new file mode 100644 index 00000000..94638c88 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/run_optimizer_example.py @@ -0,0 +1,42 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: run_optimizer_demo.py + +"""Prompt optimizer demonstration application startup script. + +Dedicated demonstration for prompt optimization functionality. +""" + +import sys +import os +from pathlib import Path + +# Add project root directory to Python path +app_root = Path(__file__).parent +project_root = app_root.parent.parent.parent +if str(project_root) not in sys.path: + sys.path.insert(0, str(project_root)) + +# Import optimizer demo modules +from intelligence.test.prompt_optimizer_demo import ( + demo_basic_prompt_optimization, + demo_advanced_prompt_optimization, + demo_yaml_prompt_optimization +) + +if __name__ == '__main__': + print("=" * 60) + print("agentUniverse Prompt Optimizer Demonstration") + print("=" * 60) + + demo_basic_prompt_optimization() + print("\n" + "=" * 60) + demo_advanced_prompt_optimization() + print("\n" + "=" * 60) + demo_yaml_prompt_optimization() + + print("\nPrompt optimizer demonstration completed!") diff --git a/examples/sample_apps/prompt_generator_app/run_tests.py b/examples/sample_apps/prompt_generator_app/run_tests.py new file mode 100644 index 00000000..63d6fbe3 --- /dev/null +++ b/examples/sample_apps/prompt_generator_app/run_tests.py @@ -0,0 +1,74 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: run_tests.py +"""Run Prompt Generator Tests. + +This script runs all test cases for the prompt generator application, +providing comprehensive validation of the functionality. +""" +import sys +import unittest +from pathlib import Path + +# Add current directory to Python path +current_dir = Path(__file__).parent +if str(current_dir) not in sys.path: + sys.path.insert(0, str(current_dir)) + +# Import test modules +from intelligence.test.test_prompt_generator import TestPromptGenerator + + +def main(): + """Main function to run all tests.""" + print("Starting Prompt Generator Test Suite") + print("=" * 60) + + # Create test suite + loader = unittest.TestLoader() + suite = unittest.TestSuite() + + # Add test cases + suite.addTests(loader.loadTestsFromTestCase(TestPromptGenerator)) + + # Run tests + runner = unittest.TextTestRunner( + verbosity=2, + descriptions=True, + failfast=False + ) + + result = runner.run(suite) + + # Print summary + print("\n" + "=" * 60) + print(f"Test Summary:") + print(f" Tests run: {result.testsRun}") + print(f" Failures: {len(result.failures)}") + print(f" Errors: {len(result.errors)}") + print(f" Success rate: {((result.testsRun - len(result.failures) - len(result.errors)) / result.testsRun * 100):.1f}%") + + if result.failures: + print(f"\nFailed Tests:") + for test, traceback in result.failures: + print(f" • {test}") + + if result.errors: + print(f"\nTest Errors:") + for test, traceback in result.errors: + print(f" • {test}") + + if len(result.failures) == 0 and len(result.errors) == 0: + print("\nAll tests passed!") + sys.exit(0) + else: + print(f"\nSome tests failed.") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_prompt.py b/scripts/generate_prompt.py new file mode 100644 index 00000000..a1ec3778 --- /dev/null +++ b/scripts/generate_prompt.py @@ -0,0 +1,127 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: generate_prompt.py + +"""agentUniverse Prompt Generator - Command Line Tool. + +A comprehensive command-line interface for generating and optimizing prompt +configurations for agentUniverse agents. Supports multiple agent types and +provides flexible configuration options. +""" + +import argparse +import os +import sys +from pathlib import Path + +# Add project root directory to Python path +current_dir = Path(__file__).parent +project_root = current_dir.parent +sys.path.insert(0, str(project_root)) + +from agentuniverse.prompt.prompt_generator_helper import ( + PromptTemplateHelper, + generate_prompt_config +) + + +def main(): + """Main function for the command line interface.""" + parser = argparse.ArgumentParser( + description="agentUniverse Prompt Generator", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Usage Examples: + # Generate ReAct agent prompt + python scripts/generate_prompt.py "Customer service assistant" --type react --scenario "e-commerce" + + # Generate RAG agent prompt and save to file + python scripts/generate_prompt.py "Insurance consultant" --type rag --output insurance.yaml + + # Generate planning agent prompt + python scripts/generate_prompt.py "Project management assistant" --type planning --requirements "Must follow agile development process" + + # View supported agent types + python scripts/generate_prompt.py --list-types + """ + ) + + # Positional arguments + parser.add_argument('task', nargs='?', help='Task description') + + # Optional arguments + parser.add_argument('--type', '-t', + choices=list(PromptTemplateHelper.get_supported_agent_types().keys()), + default='react', help='Agent type (default: react)') + parser.add_argument('--scenario', '-s', help='Application scenario description') + parser.add_argument('--requirements', '-r', help='Specific requirements description') + parser.add_argument('--version', '-v', help='Version name') + parser.add_argument('--output', '-o', help='Output file path') + parser.add_argument('--list-types', action='store_true', help='List supported agent types') + + args = parser.parse_args() + + # List agent types + if args.list_types: + print("Supported agent types:") + print("-" * 50) + for agent_type, name in PromptTemplateHelper.get_supported_agent_types().items(): + print(f" {agent_type:12} - {name}") + return + + # Check required parameters + if not args.task: + print("Error: Task description is required") + parser.print_help() + sys.exit(1) + + print(f"Generating {args.type} agent prompt...") + print(f" Task: {args.task}") + if args.scenario: + print(f" Scenario: {args.scenario}") + if args.requirements: + print(f" Requirements: {args.requirements}") + + try: + # Generate prompt configuration + yaml_config = generate_prompt_config( + task_description=args.task, + agent_type=args.type, + scenario=args.scenario, + specific_requirements=args.requirements, + version_name=args.version, + output_file=args.output + ) + + # Print to console if no output file specified + if not args.output: + print("\n" + "="*60) + print("Generated Prompt Configuration:") + print("="*60) + print(yaml_config) + + # Ask whether to save + save = input("\nSave to file? (y/N): ").strip().lower() + if save in ['y', 'yes']: + filename = f"{args.type}_prompt.yaml" + with open(filename, 'w', encoding='utf-8') as f: + f.write(yaml_config) + print(f"Saved to: {filename}") + + print("\nPrompt generation completed successfully!") + print("\nUsage instructions:") + print("1. Place the generated YAML file in intelligence/agentic/prompt/ directory") + print("2. Reference it in agent configuration via prompt_version") + print("3. Adjust and optimize prompt content as needed") + + except Exception as e: + print(f"Generation failed: {e}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests/test_agentuniverse/unit/prompt/__init__.py b/tests/test_agentuniverse/unit/prompt/__init__.py new file mode 100644 index 00000000..11a3b856 --- /dev/null +++ b/tests/test_agentuniverse/unit/prompt/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: __init__.py \ No newline at end of file diff --git a/tests/test_agentuniverse/unit/prompt/test_prompt_generator_helper.py b/tests/test_agentuniverse/unit/prompt/test_prompt_generator_helper.py new file mode 100644 index 00000000..bbfc1eb0 --- /dev/null +++ b/tests/test_agentuniverse/unit/prompt/test_prompt_generator_helper.py @@ -0,0 +1,231 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2025/09/16 23:00 +# @Author : Libres-coder +# @Email : liudi1366@gmail.com +# @FileName: test_prompt_generator_helper.py + +"""Test cases for PromptTemplateHelper. + +This test file provides comprehensive test cases for the prompt generation helper tool, +ensuring all functionality works correctly and generated configurations comply with +agentUniverse standards. +""" + +import os +import tempfile +import unittest + +from agentuniverse.prompt.prompt_generator_helper import ( + PromptTemplateHelper, + generate_prompt_config, + optimize_existing_prompt +) + + +class TestPromptTemplateHelper(unittest.TestCase): + """Test cases for PromptTemplateHelper class.""" + + def test_generate_prompt_template_react(self): + """Test generating ReAct agent prompt template.""" + prompt_model = PromptTemplateHelper.generate_prompt_template( + task_description="智能客服助手,处理用户咨询和订单问题", + agent_type="react", + scenario="电商平台" + ) + + self.assertIsNotNone(prompt_model.introduction) + self.assertIsNotNone(prompt_model.target) + self.assertIsNotNone(prompt_model.instruction) + # Check if the generated prompt contains relevant content + combined_text = f"{prompt_model.introduction} {prompt_model.target} {prompt_model.instruction}".lower() + self.assertTrue(any(keyword in combined_text for keyword in ["客服", "服务", "service", "咨询", "assistant"])) + self.assertTrue(any(keyword in combined_text for keyword in ["电商", "平台", "platform", "商务"])) + self.assertIn("工具", prompt_model.instruction.lower()) + + def test_generate_prompt_template_rag(self): + """Test generating RAG agent prompt template.""" + prompt_model = PromptTemplateHelper.generate_prompt_template( + task_description="医疗咨询专家,基于医疗知识库回答用户问题", + agent_type="rag", + scenario="在线医疗" + ) + + self.assertIsNotNone(prompt_model.introduction) + self.assertIsNotNone(prompt_model.target) + self.assertIsNotNone(prompt_model.instruction) + # Check if the generated prompt contains relevant domain context + combined_text = f"{prompt_model.introduction} {prompt_model.target} {prompt_model.instruction}".lower() + self.assertTrue(any(keyword in combined_text for keyword in ["医疗", "医疗知识", "healthcare", "analysis", "information", "expert", "professional"])) + self.assertIn("医疗", prompt_model.target.lower()) + + def test_generate_yaml_config(self): + """Test generating YAML configuration.""" + prompt_model = PromptTemplateHelper.generate_prompt_template( + task_description="Test agent", + agent_type="react" + ) + + yaml_config = PromptTemplateHelper.generate_yaml_config( + prompt_model, + version_name="test.cn" + ) + + self.assertIn("introduction:", yaml_config) + self.assertIn("target:", yaml_config) + self.assertIn("instruction:", yaml_config) + self.assertIn("metadata:", yaml_config) + self.assertIn("type: 'PROMPT'", yaml_config) + self.assertIn("version: 'test.cn'", yaml_config) + + def test_get_supported_agent_types(self): + """Test retrieving supported agent types.""" + types = PromptTemplateHelper.get_supported_agent_types() + + self.assertIn("react", types) + self.assertIn("rag", types) + self.assertIn("planning", types) + self.assertIn("executing", types) + self.assertIn("expressing", types) + self.assertIn("reviewing", types) + self.assertIn("workflow", types) + + def test_extract_domain(self): + """Test domain extraction functionality.""" + # Test financial domain recognition + domain = PromptTemplateHelper._extract_domain("Intelligent stock analysis assistant") + self.assertEqual(domain, "financial") + + # Test medical domain recognition + domain = PromptTemplateHelper._extract_domain("Medical diagnosis assistant") + self.assertEqual(domain, "medical") + + # Test customer service domain recognition + domain = PromptTemplateHelper._extract_domain("Intelligent customer service system") + self.assertEqual(domain, "service") + + +class TestPromptOptimization(unittest.TestCase): + """Prompt optimization functionality tests.""" + + def test_optimize_existing_prompt(self): + """Test optimizing existing prompt.""" + original_prompt = "You are an AI assistant that helps users answer questions." + + optimized_prompt = optimize_existing_prompt( + existing_prompt_text=original_prompt, + optimization_goal="Improve professionalism", + agent_type="react", + scenario="Technical support" + ) + + self.assertIsNotNone(optimized_prompt.introduction) + self.assertIsNotNone(optimized_prompt.target) + self.assertIsNotNone(optimized_prompt.instruction) + # The optimized prompt should be more professional than the original prompt + self.assertIn("professional", optimized_prompt.introduction.lower()) + + +class TestPromptConfigGeneration(unittest.TestCase): + """Prompt configuration generation functionality tests.""" + + def setUp(self): + """Set up temporary directory.""" + self.temp_dir = tempfile.mkdtemp() + + def tearDown(self): + """Clean up temporary directory.""" + import shutil + shutil.rmtree(self.temp_dir, ignore_errors=True) + + def test_generate_prompt_config(self): + """Test generating prompt configuration.""" + output_file = os.path.join(self.temp_dir, "test_prompt.yaml") + + yaml_config = generate_prompt_config( + task_description="Test agent", + agent_type="react", + scenario="Test scenario", + version_name="test.cn", + output_file=output_file + ) + + # Check returned configuration content + self.assertIn("introduction:", yaml_config) + self.assertIn("target:", yaml_config) + self.assertIn("instruction:", yaml_config) + self.assertIn("metadata:", yaml_config) + + # Check if file is generated + self.assertTrue(os.path.exists(output_file)) + + # Check file content + with open(output_file, 'r', encoding='utf-8') as f: + file_content = f.read() + self.assertEqual(file_content, yaml_config) + + def test_generate_different_agent_types(self): + """Test generating different types of agent configurations.""" + agent_types = ['react', 'rag', 'planning', 'executing', 'expressing'] + + for agent_type in agent_types: + with self.subTest(agent_type=agent_type): + yaml_config = generate_prompt_config( + task_description=f"Test {agent_type} agent", + agent_type=agent_type, + scenario="Test scenario" + ) + + self.assertIn("introduction:", yaml_config) + self.assertIn("target:", yaml_config) + self.assertIn("instruction:", yaml_config) + self.assertIn("metadata:", yaml_config) + self.assertIn(f"type: 'PROMPT'", yaml_config) + + +class TestPromptConfigIntegration(unittest.TestCase): + """Prompt configuration integration tests.""" + + def test_generated_config_format(self): + """Test whether the generated configuration format meets PromptConfiger requirements.""" + yaml_config = generate_prompt_config( + task_description="Integration test agent", + agent_type="react", + version_name="integration_test.cn" + ) + + # Check required YAML structure + lines = yaml_config.strip().split('\n') + + # Check if required fields are included + config_dict = {} + current_key = None + current_value = [] + + for line in lines: + if line.startswith(' ') or line.startswith('\t'): + # This is part of a multi-line value + current_value.append(line) + elif ':' in line and not line.strip().startswith('#'): + # New key-value pair + if current_key: + config_dict[current_key] = '\n'.join(current_value) + key_part = line.split(':', 1)[0].strip() + current_key = key_part + current_value = [line] + else: + current_value.append(line) + + if current_key: + config_dict[current_key] = '\n'.join(current_value) + + # Verify required fields exist + self.assertIn('introduction', config_dict) + self.assertIn('target', config_dict) + self.assertIn('instruction', config_dict) + self.assertIn('metadata', config_dict) + + +if __name__ == '__main__': + unittest.main(verbosity=2)