mirror of
https://github.com/wshobson/agents.git
synced 2026-03-18 09:37:15 +00:00
Replace GPT and Claude models to latest, better and cheaper models (#118)
* Updated GPT and Claude models to latest, better and cheaper models * updated more files to use GPT-5 and Sonnet/Haiku 4.5 because theu are the latest, cheaper and better models
This commit is contained in:
@@ -13,7 +13,7 @@ Expert AI engineer specializing in LLM application development, RAG systems, and
|
||||
|
||||
### LLM Integration & Model Management
|
||||
- OpenAI GPT-4o/4o-mini, o1-preview, o1-mini with function calling and structured outputs
|
||||
- Anthropic Claude 3.5 Sonnet, Claude 3 Haiku/Opus with tool use and computer use
|
||||
- Anthropic Claude 4.5 Sonnet/Haiku, Claude 4.1 Opus with tool use and computer use
|
||||
- Open-source models: Llama 3.1/3.2, Mixtral 8x7B/8x22B, Qwen 2.5, DeepSeek-V2
|
||||
- Local deployment with Ollama, vLLM, TGI (Text Generation Inference)
|
||||
- Model serving with TorchServe, MLflow, BentoML for production deployment
|
||||
@@ -68,7 +68,7 @@ Expert AI engineer specializing in LLM application development, RAG systems, and
|
||||
- Observability: logging, metrics, tracing with LangSmith, Phoenix, Weights & Biases
|
||||
|
||||
### Multimodal AI Integration
|
||||
- Vision models: GPT-4V, Claude 3 Vision, LLaVA, CLIP for image understanding
|
||||
- Vision models: GPT-4V, Claude 4 Vision, LLaVA, CLIP for image understanding
|
||||
- Audio processing: Whisper for speech-to-text, ElevenLabs for text-to-speech
|
||||
- Document AI: OCR, table extraction, layout understanding with models like LayoutLM
|
||||
- Video analysis and processing for multimedia applications
|
||||
@@ -111,7 +111,7 @@ Expert AI engineer specializing in LLM application development, RAG systems, and
|
||||
- Balances cutting-edge techniques with proven, stable solutions
|
||||
|
||||
## Knowledge Base
|
||||
- Latest LLM developments and model capabilities (GPT-4o, Claude 3.5, Llama 3.2)
|
||||
- Latest LLM developments and model capabilities (GPT-4o, Claude 4.5, Llama 3.2)
|
||||
- Modern vector database architectures and optimization techniques
|
||||
- Production AI system design patterns and best practices
|
||||
- AI safety and security considerations for enterprise deployments
|
||||
|
||||
@@ -53,7 +53,7 @@ Expert prompt engineer specializing in advanced prompting methodologies and LLM
|
||||
- Multi-turn conversation management
|
||||
- Image and multimodal prompt engineering
|
||||
|
||||
#### Anthropic Claude (3.5 Sonnet, Haiku, Opus)
|
||||
#### Anthropic Claude (4.5 Sonnet, Haiku, Opus)
|
||||
- Constitutional AI alignment with Claude's training
|
||||
- Tool use optimization for complex workflows
|
||||
- Computer use prompting for automation tasks
|
||||
|
||||
@@ -113,7 +113,7 @@ Final Response: [Refined]
|
||||
|
||||
### 5. Model-Specific Optimization
|
||||
|
||||
**GPT-4/GPT-4o**
|
||||
**GPT-5/GPT-4o**
|
||||
```python
|
||||
gpt4_optimized = """
|
||||
##CONTEXT##
|
||||
@@ -136,7 +136,7 @@ gpt4_optimized = """
|
||||
"""
|
||||
```
|
||||
|
||||
**Claude 3.5/4**
|
||||
**Claude 4.5/4**
|
||||
```python
|
||||
claude_optimized = """
|
||||
<context>
|
||||
@@ -566,7 +566,7 @@ testing_recommendations:
|
||||
metrics: ["accuracy", "satisfaction", "cost"]
|
||||
|
||||
deployment_strategy:
|
||||
model: "GPT-4 for quality, Claude for safety"
|
||||
model: "GPT-5 for quality, Claude for safety"
|
||||
temperature: 0.7
|
||||
max_tokens: 2000
|
||||
monitoring: "Track success, latency, feedback"
|
||||
|
||||
@@ -186,7 +186,7 @@ def calculate_factuality(claim, knowledge_base):
|
||||
### Single Output Evaluation
|
||||
```python
|
||||
def llm_judge_quality(response, question):
|
||||
"""Use GPT-4 to judge response quality."""
|
||||
"""Use GPT-5 to judge response quality."""
|
||||
prompt = f"""Rate the following response on a scale of 1-10 for:
|
||||
1. Accuracy (factually correct)
|
||||
2. Helpfulness (answers the question)
|
||||
@@ -205,7 +205,7 @@ Provide ratings in JSON format:
|
||||
"""
|
||||
|
||||
result = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
model="gpt-5",
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0
|
||||
)
|
||||
@@ -236,7 +236,7 @@ Answer with JSON:
|
||||
"""
|
||||
|
||||
result = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
model="gpt-5",
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0
|
||||
)
|
||||
|
||||
@@ -65,7 +65,7 @@ def self_consistency_cot(query, n=5, temperature=0.7):
|
||||
responses = []
|
||||
for _ in range(n):
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
model="gpt-5",
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=temperature
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user