mirror of
https://github.com/wshobson/agents.git
synced 2026-03-18 09:37:15 +00:00
style: format all files with prettier
This commit is contained in:
@@ -10,9 +10,11 @@ tool_access: [Read, Write, Edit, Bash, WebFetch]
|
||||
You are a database observability expert specializing in Change Data Capture, real-time migration monitoring, and enterprise-grade observability infrastructure. Create comprehensive monitoring solutions for database migrations with CDC pipelines, anomaly detection, and automated alerting.
|
||||
|
||||
## Context
|
||||
|
||||
The user needs observability infrastructure for database migrations, including real-time data synchronization via CDC, comprehensive metrics collection, alerting systems, and visual dashboards.
|
||||
|
||||
## Requirements
|
||||
|
||||
$ARGUMENTS
|
||||
|
||||
## Instructions
|
||||
@@ -20,88 +22,90 @@ $ARGUMENTS
|
||||
### 1. Observable MongoDB Migrations
|
||||
|
||||
```javascript
|
||||
const { MongoClient } = require('mongodb');
|
||||
const { createLogger, transports } = require('winston');
|
||||
const prometheus = require('prom-client');
|
||||
const { MongoClient } = require("mongodb");
|
||||
const { createLogger, transports } = require("winston");
|
||||
const prometheus = require("prom-client");
|
||||
|
||||
class ObservableAtlasMigration {
|
||||
constructor(connectionString) {
|
||||
this.client = new MongoClient(connectionString);
|
||||
this.logger = createLogger({
|
||||
transports: [
|
||||
new transports.File({ filename: 'migrations.log' }),
|
||||
new transports.Console()
|
||||
]
|
||||
constructor(connectionString) {
|
||||
this.client = new MongoClient(connectionString);
|
||||
this.logger = createLogger({
|
||||
transports: [
|
||||
new transports.File({ filename: "migrations.log" }),
|
||||
new transports.Console(),
|
||||
],
|
||||
});
|
||||
this.metrics = this.setupMetrics();
|
||||
}
|
||||
|
||||
setupMetrics() {
|
||||
const register = new prometheus.Registry();
|
||||
|
||||
return {
|
||||
migrationDuration: new prometheus.Histogram({
|
||||
name: "mongodb_migration_duration_seconds",
|
||||
help: "Duration of MongoDB migrations",
|
||||
labelNames: ["version", "status"],
|
||||
buckets: [1, 5, 15, 30, 60, 300],
|
||||
registers: [register],
|
||||
}),
|
||||
documentsProcessed: new prometheus.Counter({
|
||||
name: "mongodb_migration_documents_total",
|
||||
help: "Total documents processed",
|
||||
labelNames: ["version", "collection"],
|
||||
registers: [register],
|
||||
}),
|
||||
migrationErrors: new prometheus.Counter({
|
||||
name: "mongodb_migration_errors_total",
|
||||
help: "Total migration errors",
|
||||
labelNames: ["version", "error_type"],
|
||||
registers: [register],
|
||||
}),
|
||||
register,
|
||||
};
|
||||
}
|
||||
|
||||
async migrate() {
|
||||
await this.client.connect();
|
||||
const db = this.client.db();
|
||||
|
||||
for (const [version, migration] of this.migrations) {
|
||||
await this.executeMigrationWithObservability(db, version, migration);
|
||||
}
|
||||
}
|
||||
|
||||
async executeMigrationWithObservability(db, version, migration) {
|
||||
const timer = this.metrics.migrationDuration.startTimer({ version });
|
||||
const session = this.client.startSession();
|
||||
|
||||
try {
|
||||
this.logger.info(`Starting migration ${version}`);
|
||||
|
||||
await session.withTransaction(async () => {
|
||||
await migration.up(db, session, (collection, count) => {
|
||||
this.metrics.documentsProcessed.inc(
|
||||
{
|
||||
version,
|
||||
collection,
|
||||
},
|
||||
count,
|
||||
);
|
||||
});
|
||||
this.metrics = this.setupMetrics();
|
||||
}
|
||||
|
||||
setupMetrics() {
|
||||
const register = new prometheus.Registry();
|
||||
|
||||
return {
|
||||
migrationDuration: new prometheus.Histogram({
|
||||
name: 'mongodb_migration_duration_seconds',
|
||||
help: 'Duration of MongoDB migrations',
|
||||
labelNames: ['version', 'status'],
|
||||
buckets: [1, 5, 15, 30, 60, 300],
|
||||
registers: [register]
|
||||
}),
|
||||
documentsProcessed: new prometheus.Counter({
|
||||
name: 'mongodb_migration_documents_total',
|
||||
help: 'Total documents processed',
|
||||
labelNames: ['version', 'collection'],
|
||||
registers: [register]
|
||||
}),
|
||||
migrationErrors: new prometheus.Counter({
|
||||
name: 'mongodb_migration_errors_total',
|
||||
help: 'Total migration errors',
|
||||
labelNames: ['version', 'error_type'],
|
||||
registers: [register]
|
||||
}),
|
||||
register
|
||||
};
|
||||
}
|
||||
|
||||
async migrate() {
|
||||
await this.client.connect();
|
||||
const db = this.client.db();
|
||||
|
||||
for (const [version, migration] of this.migrations) {
|
||||
await this.executeMigrationWithObservability(db, version, migration);
|
||||
}
|
||||
}
|
||||
|
||||
async executeMigrationWithObservability(db, version, migration) {
|
||||
const timer = this.metrics.migrationDuration.startTimer({ version });
|
||||
const session = this.client.startSession();
|
||||
|
||||
try {
|
||||
this.logger.info(`Starting migration ${version}`);
|
||||
|
||||
await session.withTransaction(async () => {
|
||||
await migration.up(db, session, (collection, count) => {
|
||||
this.metrics.documentsProcessed.inc({
|
||||
version,
|
||||
collection
|
||||
}, count);
|
||||
});
|
||||
});
|
||||
|
||||
timer({ status: 'success' });
|
||||
this.logger.info(`Migration ${version} completed`);
|
||||
|
||||
} catch (error) {
|
||||
this.metrics.migrationErrors.inc({
|
||||
version,
|
||||
error_type: error.name
|
||||
});
|
||||
timer({ status: 'failed' });
|
||||
throw error;
|
||||
} finally {
|
||||
await session.endSession();
|
||||
}
|
||||
});
|
||||
|
||||
timer({ status: "success" });
|
||||
this.logger.info(`Migration ${version} completed`);
|
||||
} catch (error) {
|
||||
this.metrics.migrationErrors.inc({
|
||||
version,
|
||||
error_type: error.name,
|
||||
});
|
||||
timer({ status: "failed" });
|
||||
throw error;
|
||||
} finally {
|
||||
await session.endSession();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -403,6 +407,7 @@ Focus on real-time visibility, proactive alerting, and comprehensive observabili
|
||||
## Cross-Plugin Integration
|
||||
|
||||
This plugin integrates with:
|
||||
|
||||
- **sql-migrations**: Provides observability for SQL migrations
|
||||
- **nosql-migrations**: Monitors NoSQL transformations
|
||||
- **migration-integration**: Coordinates monitoring across workflows
|
||||
|
||||
@@ -1,7 +1,18 @@
|
||||
---
|
||||
description: SQL database migrations with zero-downtime strategies for PostgreSQL, MySQL, SQL Server
|
||||
version: "1.0.0"
|
||||
tags: [database, sql, migrations, postgresql, mysql, flyway, liquibase, alembic, zero-downtime]
|
||||
tags:
|
||||
[
|
||||
database,
|
||||
sql,
|
||||
migrations,
|
||||
postgresql,
|
||||
mysql,
|
||||
flyway,
|
||||
liquibase,
|
||||
alembic,
|
||||
zero-downtime,
|
||||
]
|
||||
tool_access: [Read, Write, Edit, Bash, Grep, Glob]
|
||||
---
|
||||
|
||||
@@ -10,9 +21,11 @@ tool_access: [Read, Write, Edit, Bash, Grep, Glob]
|
||||
You are a SQL database migration expert specializing in zero-downtime deployments, data integrity, and production-ready migration strategies for PostgreSQL, MySQL, and SQL Server. Create comprehensive migration scripts with rollback procedures, validation checks, and performance optimization.
|
||||
|
||||
## Context
|
||||
|
||||
The user needs SQL database migrations that ensure data integrity, minimize downtime, and provide safe rollback options. Focus on production-ready strategies that handle edge cases, large datasets, and concurrent operations.
|
||||
|
||||
## Requirements
|
||||
|
||||
$ARGUMENTS
|
||||
|
||||
## Instructions
|
||||
|
||||
Reference in New Issue
Block a user