diff --git a/agentex/docker-compose.yml b/agentex/docker-compose.yml index 0347315..3831c7e 100644 --- a/agentex/docker-compose.yml +++ b/agentex/docker-compose.yml @@ -134,6 +134,19 @@ services: retries: 5 start_period: 5s + agentex-otel-collector: + container_name: agentex-otel-collector + image: otel/opentelemetry-collector-contrib:0.101.0 + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel/otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro + ports: + - "4317:4317" # OTLP gRPC + - "4318:4318" # OTLP HTTP + - "8889:8889" # Prometheus metrics endpoint + - "13133:13133" # Health check endpoint + networks: + - agentex-network agentex: container_name: agentex build: @@ -152,6 +165,8 @@ services: - ENABLE_HEALTH_CHECK_WORKFLOW=true - AGENTEX_SERVER_TASK_QUEUE=agentex-server - ALLOWED_ORIGINS=http://localhost:3000 + - OTEL_EXPORTER_OTLP_ENDPOINT=http://agentex-otel-collector:4317 + - OTEL_SERVICE_NAME=agentex-api ports: - "5003:5003" volumes: @@ -165,6 +180,8 @@ services: condition: service_healthy agentex-mongodb: condition: service_healthy + agentex-otel-collector: + condition: service_started networks: - agentex-network command: | diff --git a/agentex/otel/otel-collector-config.yaml b/agentex/otel/otel-collector-config.yaml new file mode 100644 index 0000000..81c47a1 --- /dev/null +++ b/agentex/otel/otel-collector-config.yaml @@ -0,0 +1,41 @@ +# OpenTelemetry Collector configuration for local development +# Receives OTLP metrics and exports to console + Prometheus + +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +processors: + batch: + timeout: 10s + send_batch_size: 1024 + +exporters: + # Log metrics to console for debugging + debug: + verbosity: detailed + sampling_initial: 5 + sampling_thereafter: 200 + + # Expose Prometheus endpoint for scraping + prometheus: + endpoint: 0.0.0.0:8889 + namespace: agentex + send_timestamps: true + metric_expiration: 5m + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + +service: + extensions: [health_check] + pipelines: + metrics: + receivers: [otlp] + processors: [batch] + exporters: [debug, prometheus] diff --git a/agentex/pyproject.toml b/agentex/pyproject.toml index 2cf7b83..c5a52df 100644 --- a/agentex/pyproject.toml +++ b/agentex/pyproject.toml @@ -27,6 +27,9 @@ dependencies = [ "ddtrace>=3.13.0", "json_log_formatter>=1.1.1", "datadog>=0.52.1", + "opentelemetry-api>=1.28.0", + "opentelemetry-sdk>=1.28.0", + "opentelemetry-exporter-otlp>=1.28.0", ] [dependency-groups] diff --git a/agentex/src/api/app.py b/agentex/src/api/app.py index 3236cb5..d24ec06 100644 --- a/agentex/src/api/app.py +++ b/agentex/src/api/app.py @@ -28,10 +28,14 @@ tasks, ) from src.config import dependencies -from src.config.dependencies import resolve_environment_variable_dependency +from src.config.dependencies import ( + GlobalDependencies, + resolve_environment_variable_dependency, +) from src.config.environment_variables import EnvVarKeys from src.domain.exceptions import GenericException from src.utils.logging import make_logger +from src.utils.otel_metrics import init_otel_metrics, shutdown_otel_metrics logger = make_logger(__name__) @@ -66,14 +70,27 @@ def __init__( @asynccontextmanager async def lifespan(_: FastAPI): + # Initialize OpenTelemetry metrics first (before dependencies register instruments) + init_otel_metrics() + await dependencies.startup_global_dependencies() configure_statsd() + + # Start PostgreSQL metrics collection + global_deps = GlobalDependencies() + if global_deps.postgres_metrics_collector: + await global_deps.postgres_metrics_collector.start_collection() + yield + # Clean up HTTP clients before other shutdown tasks await HttpxGateway.close_clients() await dependencies.async_shutdown() dependencies.shutdown() + # Shutdown OTel metrics (flushes remaining data) + shutdown_otel_metrics() + fastapi_app = FastAPI( title="Agentex API", diff --git a/agentex/src/config/dependencies.py b/agentex/src/config/dependencies.py index 9a21678..65c042c 100644 --- a/agentex/src/config/dependencies.py +++ b/agentex/src/config/dependencies.py @@ -19,6 +19,7 @@ from src.config.environment_variables import Environment, EnvironmentVariables from src.utils.database import async_db_engine_creator +from src.utils.db_metrics import PostgresMetricsCollector from src.utils.logging import make_logger logger = make_logger(__name__) @@ -47,6 +48,7 @@ def __init__(self): self.httpx_client: httpx.AsyncClient | None = None self.redis_pool: redis.ConnectionPool | None = None self.database_async_read_only_engine: AsyncEngine | None = None + self.postgres_metrics_collector: PostgresMetricsCollector | None = None self._loaded = False async def create_temporal_client(self): @@ -192,10 +194,46 @@ async def load(self): pool_recycle=3600, ) + # Initialize PostgreSQL metrics collector + self.postgres_metrics_collector = PostgresMetricsCollector() + environment = self.environment_variables.ENVIRONMENT + service_name = os.environ.get("OTEL_SERVICE_NAME", "agentex") + + if self.database_async_read_write_engine: + self.postgres_metrics_collector.register_engine( + engine=self.database_async_read_write_engine, + pool_name="main", + db_url=self.environment_variables.DATABASE_URL, + environment=environment, + service_name=service_name, + ) + + if self.database_async_middleware_read_write_engine: + self.postgres_metrics_collector.register_engine( + engine=self.database_async_middleware_read_write_engine, + pool_name="middleware", + db_url=self.environment_variables.DATABASE_URL, + environment=environment, + service_name=service_name, + ) + + if self.database_async_read_only_engine: + self.postgres_metrics_collector.register_engine( + engine=self.database_async_read_only_engine, + pool_name="readonly", + db_url=read_only_db_url, + environment=environment, + service_name=service_name, + ) + self._loaded = True async def force_reload(self): """Force reload all dependencies with fresh environment variables""" + # Stop metrics collection + if self.postgres_metrics_collector: + await self.postgres_metrics_collector.stop_collection() + # Clear existing connections if self.database_async_read_write_engine: await self.database_async_read_write_engine.dispose() @@ -215,6 +253,7 @@ async def force_reload(self): self.docker_client = None self.mongodb_client = None self.mongodb_database = None + self.postgres_metrics_collector = None # Reload with fresh environment variables EnvironmentVariables.clear_cache() @@ -232,6 +271,11 @@ def shutdown(): async def async_shutdown(): global_dependencies = GlobalDependencies() + + # Stop PostgreSQL metrics collection + if global_dependencies.postgres_metrics_collector: + await global_dependencies.postgres_metrics_collector.stop_collection() + run_concurrently = [] if global_dependencies.database_async_read_only_engine: run_concurrently.append( diff --git a/agentex/src/utils/db_metrics.py b/agentex/src/utils/db_metrics.py new file mode 100644 index 0000000..9a6db92 --- /dev/null +++ b/agentex/src/utils/db_metrics.py @@ -0,0 +1,692 @@ +""" +PostgreSQL metrics instrumentation following OpenTelemetry semantic conventions. + +Metrics are exported via OpenTelemetry SDK to OTLP-compatible backends +(e.g., Grafana Cloud, Datadog Agent, OTel Collector). + +Reference: https://opentelemetry.io/docs/specs/semconv/database/ +""" + +from __future__ import annotations + +import asyncio +import os +import re +import time +from typing import TYPE_CHECKING +from urllib.parse import urlparse + +from datadog import statsd +from sqlalchemy import event, text +from sqlalchemy.engine import ExecutionContext + +from src.utils.logging import make_logger +from src.utils.otel_metrics import get_meter + +if TYPE_CHECKING: + from opentelemetry.metrics import Counter, Histogram, UpDownCounter + from sqlalchemy.ext.asyncio import AsyncEngine + from sqlalchemy.pool import ConnectionPoolEntry + +logger = make_logger(__name__) + +# Debounce interval for periodic metrics collection (seconds) +_METRICS_DEBOUNCE_INTERVAL = 30 + +# Slow query threshold in seconds (configurable via environment variable) +_SLOW_QUERY_THRESHOLD = float(os.environ.get("POSTGRES_SLOW_QUERY_THRESHOLD", "0.5")) + + +def _format_statsd_tags(attributes: dict) -> list[str]: + """Convert OTel attributes dict to Datadog StatsD tags list.""" + tag_mapping = { + "service.name": "service", + "db.system.name": "db_system", + "db.client.connection.pool.name": "pool", + "server.address": "server", + "db.namespace": "db_name", + "deployment.environment": "env", + "db.client.connection.state": "state", + "db.operation.name": "operation", + "db.collection.name": "table", + "error.type": "error_type", + } + tags = [] + for key, value in attributes.items(): + tag_name = tag_mapping.get(key, key.replace(".", "_")) + tags.append(f"{tag_name}:{value}") + return tags + + +def _parse_db_url(url: str) -> tuple[str, int, str]: + """Parse database URL to extract host, port, and database name.""" + parsed = urlparse(url) + host = parsed.hostname or "localhost" + port = parsed.port or 5432 + db_name = parsed.path.lstrip("/") if parsed.path else "postgres" + return host, port, db_name + + +class PostgresPoolMetrics: + """ + Collects and emits PostgreSQL connection pool metrics. + + Registers SQLAlchemy pool event listeners and provides a periodic + collection method for pool state metrics. + + If OTel is not configured (OTEL_EXPORTER_OTLP_ENDPOINT not set), + this class becomes a no-op. + """ + + def __init__( + self, + engine: AsyncEngine, + pool_name: str, + db_url: str, + environment: str, + service_name: str = "agentex", + ): + self.engine = engine + self.pool_name = pool_name + self._last_metrics_time = 0.0 + + # Get meter - if None, OTel is not configured + meter = get_meter("agentex.db.pool") + self._enabled = meter is not None + + host, port, db_name = _parse_db_url(db_url) + + # Always set base_attributes for StatsD (even if OTel is disabled) + self.base_attributes = { + "service.name": service_name, + "db.system.name": "postgresql", + "db.client.connection.pool.name": pool_name, + "server.address": host, + "server.port": port, + "db.namespace": db_name, + "deployment.environment": environment, + } + + if not self._enabled: + # Still register pool events for StatsD metrics + self._register_pool_events() + return + + self._connection_count: UpDownCounter = meter.create_up_down_counter( + name="db.client.connection.count", + description="Current number of connections in the pool", + unit="{connection}", + ) + + self._connection_max: UpDownCounter = meter.create_up_down_counter( + name="db.client.connection.max", + description="Maximum allowed connections", + unit="{connection}", + ) + + self._connection_overflow: UpDownCounter = meter.create_up_down_counter( + name="db.client.connection.overflow.current", + description="Current overflow connections", + unit="{connection}", + ) + + self._connection_created: Counter = meter.create_counter( + name="db.client.connection.created_total", + description="Total connections created", + unit="{connection}", + ) + + self._connection_invalidated: Counter = meter.create_counter( + name="db.client.connection.invalidated_total", + description="Total connections invalidated", + unit="{connection}", + ) + + self._connection_use_time: Histogram = meter.create_histogram( + name="db.client.connection.use_time", + description="Time a connection was checked out", + unit="s", + ) + + # Track last reported values for delta calculation + self._last_idle = 0 + self._last_used = 0 + self._last_overflow = 0 + self._last_max = 0 + + self._register_pool_events() + + def _register_pool_events(self): + """Register SQLAlchemy pool event listeners for timing metrics.""" + sync_pool = self.engine.sync_engine.pool + base_tags = _format_statsd_tags(self.base_attributes) + + @event.listens_for(sync_pool, "connect") + def on_connect( + dbapi_conn, + connection_record: ConnectionPoolEntry, + ): + """Track new connection creation.""" + if self._enabled: + self._connection_created.add(1, self.base_attributes) + # StatsD: increment counter for new connections + statsd.increment("db.client.connection.created", tags=base_tags) + + @event.listens_for(sync_pool, "checkout") + def on_checkout( + dbapi_conn, + connection_record: ConnectionPoolEntry, + connection_proxy, + ): + """Track connection checkout - store timestamp for use_time calculation.""" + connection_record.info["_checkout_time"] = time.monotonic() + + @event.listens_for(sync_pool, "checkin") + def on_checkin( + dbapi_conn, + connection_record: ConnectionPoolEntry, + ): + """Track connection checkin - calculate use_time.""" + checkout_time = connection_record.info.pop("_checkout_time", None) + if checkout_time is not None: + use_time = time.monotonic() - checkout_time + if self._enabled: + self._connection_use_time.record(use_time, self.base_attributes) + # StatsD: histogram for connection use time (in milliseconds for Datadog) + statsd.histogram( + "db.client.connection.use_time", use_time * 1000, tags=base_tags + ) + + @event.listens_for(sync_pool, "invalidate") + def on_invalidate( + dbapi_conn, + connection_record: ConnectionPoolEntry, + exception, + ): + """Track connection invalidation.""" + error_type = type(exception).__name__ if exception else "unknown" + attrs = {**self.base_attributes, "error.type": error_type} + if self._enabled: + self._connection_invalidated.add(1, attrs) + # StatsD: increment counter for invalidated connections + error_tags = base_tags + [f"error_type:{error_type}"] + statsd.increment("db.client.connection.invalidated", tags=error_tags) + + async def collect_pool_metrics(self): + """ + Collect current pool state metrics with debouncing. + + Only collects metrics if at least _METRICS_DEBOUNCE_INTERVAL seconds + have passed since the last collection to avoid overhead. + """ + now = time.monotonic() + if now - self._last_metrics_time < _METRICS_DEBOUNCE_INTERVAL: + return + + try: + self._last_metrics_time = now + pool = self.engine.sync_engine.pool + + # Connection counts by state + # Note: pool.overflow() can be negative (relative to max_overflow) + # Positive overflow means overflow connections are in use + idle_connections = pool.checkedin() + used_connections = pool.checkedout() + raw_overflow = pool.overflow() + overflow_in_use = max(0, raw_overflow) + max_connections = pool.size() + pool._max_overflow + + # OTel metrics (if enabled) + if self._enabled: + # Record idle connections (delta from last) + idle_attrs = { + **self.base_attributes, + "db.client.connection.state": "idle", + } + idle_delta = idle_connections - self._last_idle + if idle_delta != 0: + self._connection_count.add(idle_delta, idle_attrs) + self._last_idle = idle_connections + + # Record used connections (delta from last) + used_attrs = { + **self.base_attributes, + "db.client.connection.state": "used", + } + used_delta = used_connections - self._last_used + if used_delta != 0: + self._connection_count.add(used_delta, used_attrs) + self._last_used = used_connections + + # Record overflow connections in use (delta from last) + overflow_delta = overflow_in_use - self._last_overflow + if overflow_delta != 0: + self._connection_overflow.add(overflow_delta, self.base_attributes) + self._last_overflow = overflow_in_use + + # Record max connections (delta from last, usually static) + max_delta = max_connections - self._last_max + if max_delta != 0: + self._connection_max.add(max_delta, self.base_attributes) + self._last_max = max_connections + + # StatsD metrics (always send as gauges) + base_tags = _format_statsd_tags(self.base_attributes) + + def _send_statsd_pool_metrics(): + idle_tags = base_tags + ["state:idle"] + used_tags = base_tags + ["state:used"] + + statsd.gauge( + "db.client.connection.count", idle_connections, tags=idle_tags + ) + statsd.gauge( + "db.client.connection.count", used_connections, tags=used_tags + ) + statsd.gauge( + "db.client.connection.overflow", overflow_in_use, tags=base_tags + ) + statsd.gauge( + "db.client.connection.max", max_connections, tags=base_tags + ) + + await asyncio.to_thread(_send_statsd_pool_metrics) + + except Exception as e: + logger.error(f"Failed to collect pool metrics for {self.pool_name}: {e}") + + +class PostgresQueryMetrics: + """ + Instruments query performance metrics following OTel conventions. + + Registers SQLAlchemy engine events to track query duration, operation + types, and slow queries. + + If OTel is not configured (OTEL_EXPORTER_OTLP_ENDPOINT not set), + this class becomes a no-op. + """ + + # Regex patterns to extract operation type + OPERATION_PATTERNS = [ + (re.compile(r"^\s*SELECT\b", re.IGNORECASE), "SELECT"), + (re.compile(r"^\s*INSERT\b", re.IGNORECASE), "INSERT"), + (re.compile(r"^\s*UPDATE\b", re.IGNORECASE), "UPDATE"), + (re.compile(r"^\s*DELETE\b", re.IGNORECASE), "DELETE"), + (re.compile(r"^\s*BEGIN\b", re.IGNORECASE), "BEGIN"), + (re.compile(r"^\s*COMMIT\b", re.IGNORECASE), "COMMIT"), + (re.compile(r"^\s*ROLLBACK\b", re.IGNORECASE), "ROLLBACK"), + ] + + # Table extraction pattern + TABLE_PATTERN = re.compile( + r"(?:FROM|INTO|UPDATE|JOIN)\s+[\"']?(\w+)[\"']?", + re.IGNORECASE, + ) + + def __init__( + self, + engine: AsyncEngine, + pool_name: str, + db_url: str, + environment: str, + service_name: str = "agentex", + ): + self.engine = engine + self.pool_name = pool_name + + # Get meter - if None, OTel is not configured + meter = get_meter("agentex.db.query") + self._enabled = meter is not None + + host, port, db_name = _parse_db_url(db_url) + + # Always set base_attributes for StatsD (even if OTel is disabled) + self.base_attributes = { + "service.name": service_name, + "db.system.name": "postgresql", + "db.client.connection.pool.name": pool_name, + "server.address": host, + "db.namespace": db_name, + "deployment.environment": environment, + } + + # OTel metrics (only if enabled) + if self._enabled: + self._operation_duration: Histogram = meter.create_histogram( + name="db.client.operation.duration", + description="Database operation duration", + unit="s", + ) + + self._slow_queries: Counter = meter.create_counter( + name="db.client.operation.slow_total", + description="Total slow queries exceeding threshold", + unit="{query}", + ) + + self._operation_errors: Counter = meter.create_counter( + name="db.client.operation.errors_total", + description="Total query errors", + unit="{error}", + ) + + self._returned_rows: Histogram = meter.create_histogram( + name="db.client.response.returned_rows", + description="Number of rows returned by queries", + unit="{row}", + ) + + self._register_query_events() + + def _extract_operation(self, statement: str) -> str: + """Extract SQL operation type from statement.""" + for pattern, operation in self.OPERATION_PATTERNS: + if pattern.match(statement): + return operation + return "OTHER" + + def _extract_table(self, statement: str) -> str | None: + """Extract primary table name from statement.""" + match = self.TABLE_PATTERN.search(statement) + return match.group(1) if match else None + + def _register_query_events(self): + """Register SQLAlchemy event listeners for query metrics.""" + sync_engine = self.engine.sync_engine + base_tags = _format_statsd_tags(self.base_attributes) + + @event.listens_for(sync_engine, "before_cursor_execute") + def before_execute( + conn, + cursor, + statement: str, + parameters, + context: ExecutionContext, + executemany: bool, + ): + """Store query start time in context.""" + context._query_start_time = time.monotonic() + + @event.listens_for(sync_engine, "after_cursor_execute") + def after_execute( + conn, + cursor, + statement: str, + parameters, + context: ExecutionContext, + executemany: bool, + ): + """Calculate and emit query duration metrics.""" + start_time = getattr(context, "_query_start_time", None) + if start_time is None: + return + + duration = time.monotonic() - start_time + operation = self._extract_operation(statement) + table = self._extract_table(statement) + + attrs = {**self.base_attributes, "db.operation.name": operation} + if table: + attrs["db.collection.name"] = table + + # OTel metrics (if enabled) + if self._enabled: + # Record operation duration + self._operation_duration.record(duration, attrs) + + # Track slow queries + if duration >= _SLOW_QUERY_THRESHOLD: + self._slow_queries.add(1, attrs) + + # Record row count for SELECT queries + if operation == "SELECT" and cursor.rowcount >= 0: + self._returned_rows.record(cursor.rowcount, attrs) + + # StatsD metrics + tags = base_tags + [f"operation:{operation}"] + if table: + tags.append(f"table:{table}") + + # Duration in milliseconds for Datadog + statsd.histogram("db.client.operation.duration", duration * 1000, tags=tags) + + # Track slow queries + if duration >= _SLOW_QUERY_THRESHOLD: + statsd.increment("db.client.operation.slow", tags=tags) + + # Record row count for SELECT queries + if operation == "SELECT" and cursor.rowcount >= 0: + statsd.histogram( + "db.client.response.returned_rows", cursor.rowcount, tags=tags + ) + + @event.listens_for(sync_engine, "handle_error") + def on_error(exception_context): + """Track query errors.""" + statement = exception_context.statement or "" + operation = self._extract_operation(statement) + original_exception = exception_context.original_exception + error_type = type(original_exception).__name__ + + # Log the error with details for debugging + # Truncate statement to avoid logging sensitive data + truncated_stmt = ( + statement[:200] + "..." if len(statement) > 200 else statement + ) + logger.warning( + f"Database error on pool {self.pool_name}: " + f"type={error_type}, operation={operation}, " + f"message={original_exception}, statement={truncated_stmt}" + ) + + attrs = { + **self.base_attributes, + "db.operation.name": operation, + "error.type": error_type, + } + + # OTel metrics (if enabled) + if self._enabled: + self._operation_errors.add(1, attrs) + + # StatsD metrics + error_tags = base_tags + [ + f"operation:{operation}", + f"error_type:{error_type}", + ] + statsd.increment("db.client.operation.errors", tags=error_tags) + + +class PostgresHealthMetrics: + """ + Emits health-related metrics for PostgreSQL connections. + + Performs periodic health checks via simple SELECT 1 queries. + + If OTel is not configured (OTEL_EXPORTER_OTLP_ENDPOINT not set), + OTel metrics become a no-op but StatsD metrics are still emitted. + """ + + HEALTH_CHECK_TIMEOUT = 2.0 # seconds + + def __init__( + self, + engine: AsyncEngine, + pool_name: str, + db_url: str, + environment: str, + service_name: str = "agentex", + ): + self.engine = engine + self.pool_name = pool_name + + # Get meter - if None, OTel is not configured + meter = get_meter("agentex.db.health") + self._enabled = meter is not None + + host, _, db_name = _parse_db_url(db_url) + + # Always set base_attributes for StatsD + self.base_attributes = { + "service.name": service_name, + "db.system.name": "postgresql", + "db.client.connection.pool.name": pool_name, + "server.address": host, + "db.namespace": db_name, + "deployment.environment": environment, + } + + # OTel metrics (only if enabled) + if self._enabled: + self._health_status: UpDownCounter = meter.create_up_down_counter( + name="db.client.connection.health", + description="Connection health status (1=healthy, 0=unhealthy)", + unit="{status}", + ) + + self._health_check_failures: Counter = meter.create_counter( + name="db.client.connection.health_check_failures", + description="Total health check failures", + unit="{failure}", + ) + + # Track last health status for delta + self._last_health = 0 + + async def check_health(self): + """ + Perform health check and emit metrics. + + Emits db.client.connection.health as delta changes. + """ + base_tags = _format_statsd_tags(self.base_attributes) + + try: + async with asyncio.timeout(self.HEALTH_CHECK_TIMEOUT): + async with self.engine.connect() as conn: + await conn.execute(text("SELECT 1")) + + # Healthy + if self._enabled: + # OTel: report delta to get to 1 + health_delta = 1 - self._last_health + if health_delta != 0: + self._health_status.add(health_delta, self.base_attributes) + self._last_health = 1 + + # StatsD: gauge for health status (1=healthy) + statsd.gauge("db.client.connection.health", 1, tags=base_tags) + + except Exception as e: + error_type = type(e).__name__ + logger.warning( + f"Health check failed for pool {self.pool_name}: {error_type}" + ) + + # Unhealthy + if self._enabled: + # OTel: report delta to get to 0 + health_delta = 0 - self._last_health + if health_delta != 0: + self._health_status.add(health_delta, self.base_attributes) + + self._health_check_failures.add( + 1, {**self.base_attributes, "error.type": error_type} + ) + self._last_health = 0 + + # StatsD: gauge for health status (0=unhealthy) + statsd.gauge("db.client.connection.health", 0, tags=base_tags) + # StatsD: increment failure counter + error_tags = base_tags + [f"error_type:{error_type}"] + statsd.increment( + "db.client.connection.health_check_failures", tags=error_tags + ) + + +class PostgresMetricsCollector: + """ + Unified collector that manages all PostgreSQL metrics for multiple pools. + + Usage: + collector = PostgresMetricsCollector() + collector.register_engine(engine, "main", db_url, environment) + await collector.start_collection() # Starts background task + """ + + def __init__(self): + self._pool_metrics: dict[str, PostgresPoolMetrics] = {} + self._query_metrics: dict[str, PostgresQueryMetrics] = {} + self._health_metrics: dict[str, PostgresHealthMetrics] = {} + self._collection_task: asyncio.Task | None = None + + def register_engine( + self, + engine: AsyncEngine, + pool_name: str, + db_url: str, + environment: str, + service_name: str = "agentex", + ): + """Register an engine for metrics collection.""" + self._pool_metrics[pool_name] = PostgresPoolMetrics( + engine=engine, + pool_name=pool_name, + db_url=db_url, + environment=environment, + service_name=service_name, + ) + self._query_metrics[pool_name] = PostgresQueryMetrics( + engine=engine, + pool_name=pool_name, + db_url=db_url, + environment=environment, + service_name=service_name, + ) + self._health_metrics[pool_name] = PostgresHealthMetrics( + engine=engine, + pool_name=pool_name, + db_url=db_url, + environment=environment, + service_name=service_name, + ) + logger.info(f"Registered PostgreSQL metrics for pool: {pool_name}") + + async def collect_all_metrics(self): + """Collect all pool and health metrics once.""" + tasks = [] + + for metrics in self._pool_metrics.values(): + tasks.append(metrics.collect_pool_metrics()) + + for metrics in self._health_metrics.values(): + tasks.append(metrics.check_health()) + + await asyncio.gather(*tasks, return_exceptions=True) + + async def _collection_loop(self): + """Background loop that collects metrics periodically.""" + while True: + try: + await self.collect_all_metrics() + except Exception as e: + logger.error(f"Error in metrics collection loop: {e}") + await asyncio.sleep(_METRICS_DEBOUNCE_INTERVAL) + + async def start_collection(self): + """Start the background metrics collection task.""" + if self._collection_task is None: + self._collection_task = asyncio.create_task(self._collection_loop()) + logger.info("Started PostgreSQL metrics collection background task") + + async def stop_collection(self): + """Stop the background metrics collection task.""" + if self._collection_task is not None: + self._collection_task.cancel() + try: + await self._collection_task + except asyncio.CancelledError: + pass + self._collection_task = None + logger.info("Stopped PostgreSQL metrics collection background task") diff --git a/agentex/src/utils/otel_metrics.py b/agentex/src/utils/otel_metrics.py new file mode 100644 index 0000000..4f4cb45 --- /dev/null +++ b/agentex/src/utils/otel_metrics.py @@ -0,0 +1,172 @@ +""" +OpenTelemetry metrics configuration for Agentex. + +This module sets up the OTel MeterProvider with OTLP export for metrics. +Metrics are exported to an OTLP-compatible endpoint (e.g., OTel Collector, +Datadog Agent, or directly to Grafana Cloud/Mimir). + +Environment Variables: + OTEL_EXPORTER_OTLP_ENDPOINT: OTLP endpoint URL (default: http://localhost:4317) + OTEL_EXPORTER_OTLP_HEADERS: Optional headers for authentication + OTEL_SERVICE_NAME: Service name for metrics (default: agentex) + OTEL_METRICS_EXPORT_INTERVAL_MS: Export interval in ms (default: 30000) +""" + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING + +from opentelemetry import metrics +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader +from opentelemetry.sdk.resources import SERVICE_NAME, SERVICE_VERSION, Resource + +from src.utils.logging import make_logger + +if TYPE_CHECKING: + from opentelemetry.metrics import Meter + +logger = make_logger(__name__) + +# Global state +_meter_provider: MeterProvider | None = None +_initialized: bool = False + +# Default configuration +DEFAULT_OTLP_ENDPOINT = "http://localhost:4317" +DEFAULT_SERVICE_NAME = "agentex" +DEFAULT_EXPORT_INTERVAL_MS = 30000 # 30 seconds + + +def init_otel_metrics( + service_name: str | None = None, + service_version: str | None = None, + environment: str | None = None, + otlp_endpoint: str | None = None, + export_interval_ms: int | None = None, +) -> MeterProvider | None: + """ + Initialize OpenTelemetry metrics with OTLP exporter. + + This should be called once at application startup. Subsequent calls + will return the existing MeterProvider. + + NOTE: Only initializes if OTEL_EXPORTER_OTLP_ENDPOINT is configured. + Returns None if OTel is not configured. + + Args: + service_name: Service name for resource attributes + service_version: Service version for resource attributes + environment: Deployment environment (e.g., "development", "production") + otlp_endpoint: OTLP gRPC endpoint URL + export_interval_ms: Metric export interval in milliseconds + + Returns: + The configured MeterProvider, or None if not configured + """ + global _meter_provider, _initialized + + if _initialized: + return _meter_provider + + # Check if OTLP endpoint is configured + otlp_endpoint = otlp_endpoint or os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT") + if not otlp_endpoint: + logger.info( + "OpenTelemetry metrics disabled: OTEL_EXPORTER_OTLP_ENDPOINT not configured" + ) + _initialized = True + return None + + # Resolve configuration from environment or defaults + service_name = ( + service_name or os.environ.get("OTEL_SERVICE_NAME") or DEFAULT_SERVICE_NAME + ) + service_version = service_version or os.environ.get("SERVICE_VERSION", "0.1.0") + environment = environment or os.environ.get("ENVIRONMENT", "development") + export_interval_ms = export_interval_ms or int( + os.environ.get("OTEL_METRICS_EXPORT_INTERVAL_MS", DEFAULT_EXPORT_INTERVAL_MS) + ) + + # Create resource with service information + resource = Resource.create( + { + SERVICE_NAME: service_name, + SERVICE_VERSION: service_version, + "deployment.environment": environment, + } + ) + + # Create OTLP exporter + # The exporter will use OTEL_EXPORTER_OTLP_HEADERS env var for auth if set + exporter = OTLPMetricExporter( + endpoint=otlp_endpoint, + insecure=otlp_endpoint.startswith("http://"), # Use insecure for non-TLS + ) + + # Create periodic reader that exports at the specified interval + reader = PeriodicExportingMetricReader( + exporter=exporter, + export_interval_millis=export_interval_ms, + ) + + # Create and set the meter provider + _meter_provider = MeterProvider( + resource=resource, + metric_readers=[reader], + ) + metrics.set_meter_provider(_meter_provider) + + _initialized = True + logger.info( + f"OpenTelemetry metrics initialized: endpoint={otlp_endpoint}, " + f"service={service_name}, interval={export_interval_ms}ms" + ) + + return _meter_provider + + +def get_meter(name: str, version: str = "0.1.0") -> Meter | None: + """ + Get a meter for instrumenting a component. + + Args: + name: The name of the instrumentation scope (e.g., "db_metrics") + version: The version of the instrumentation + + Returns: + An OpenTelemetry Meter instance, or None if OTel is not configured + """ + global _initialized, _meter_provider + + if not _initialized: + # Auto-initialize with defaults if not already initialized + init_otel_metrics() + + # Return None if OTel is not configured + if _meter_provider is None: + return None + + return metrics.get_meter(name, version) + + +def shutdown_otel_metrics() -> None: + """ + Shutdown the meter provider, flushing any remaining metrics. + + Should be called during application shutdown. + """ + global _meter_provider, _initialized + + if _meter_provider is not None: + _meter_provider.shutdown() + logger.info("OpenTelemetry metrics shut down") + _meter_provider = None + _initialized = False + + +def is_otel_configured() -> bool: + """Check if an OTLP endpoint is configured via environment.""" + return bool(os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT")) diff --git a/uv.lock b/uv.lock index 708b274..7c598e7 100644 --- a/uv.lock +++ b/uv.lock @@ -58,6 +58,9 @@ dependencies = [ { name = "json-log-formatter" }, { name = "kubernetes-asyncio" }, { name = "litellm" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp" }, + { name = "opentelemetry-sdk" }, { name = "psycopg2-binary" }, { name = "pymongo" }, { name = "python-dotenv" }, @@ -113,6 +116,9 @@ requires-dist = [ { name = "json-log-formatter", specifier = ">=1.1.1" }, { name = "kubernetes-asyncio", specifier = ">=31.1.0,<32" }, { name = "litellm", specifier = ">=1.48.2,<2" }, + { name = "opentelemetry-api", specifier = ">=1.28.0" }, + { name = "opentelemetry-exporter-otlp", specifier = ">=1.28.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.28.0" }, { name = "psycopg2-binary", specifier = ">=2.9.9,<3" }, { name = "pymongo", specifier = ">=4.11.2,<5" }, { name = "python-dotenv", specifier = ">=1.0.1,<2" }, @@ -753,6 +759,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/17/63/b19553b658a1692443c62bd07e5868adaa0ad746a0751ba62c59568cd45b/google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca", size = 216137, upload-time = "2025-06-04T18:04:55.573Z" }, ] +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + [[package]] name = "greenlet" version = "3.2.4" @@ -767,6 +785,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" }, + { url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" }, { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, ] @@ -794,6 +814,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7a/bd/63e56c639e7a2f60955bbfd061a232a7c744f105f60b6ea4621d6d079beb/griffe_pydantic-1.1.7-py3-none-any.whl", hash = "sha256:516d6dbb6a6587bd0f70c2d23f1dc1b6e2e06eff7d9d37c2db9f0f60ea527af8", size = 12673, upload-time = "2025-09-05T16:11:37.542Z" }, ] +[[package]] +name = "grpcio" +version = "1.76.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/05/8e29121994b8d959ffa0afd28996d452f291b48cfc0875619de0bde2c50c/grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8", size = 5799718, upload-time = "2025-10-21T16:21:17.939Z" }, + { url = "https://files.pythonhosted.org/packages/d9/75/11d0e66b3cdf998c996489581bdad8900db79ebd83513e45c19548f1cba4/grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280", size = 11825627, upload-time = "2025-10-21T16:21:20.466Z" }, + { url = "https://files.pythonhosted.org/packages/28/50/2f0aa0498bc188048f5d9504dcc5c2c24f2eb1a9337cd0fa09a61a2e75f0/grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4", size = 6359167, upload-time = "2025-10-21T16:21:23.122Z" }, + { url = "https://files.pythonhosted.org/packages/66/e5/bbf0bb97d29ede1d59d6588af40018cfc345b17ce979b7b45424628dc8bb/grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11", size = 7044267, upload-time = "2025-10-21T16:21:25.995Z" }, + { url = "https://files.pythonhosted.org/packages/f5/86/f6ec2164f743d9609691115ae8ece098c76b894ebe4f7c94a655c6b03e98/grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6", size = 6573963, upload-time = "2025-10-21T16:21:28.631Z" }, + { url = "https://files.pythonhosted.org/packages/60/bc/8d9d0d8505feccfdf38a766d262c71e73639c165b311c9457208b56d92ae/grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8", size = 7164484, upload-time = "2025-10-21T16:21:30.837Z" }, + { url = "https://files.pythonhosted.org/packages/67/e6/5d6c2fc10b95edf6df9b8f19cf10a34263b7fd48493936fffd5085521292/grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980", size = 8127777, upload-time = "2025-10-21T16:21:33.577Z" }, + { url = "https://files.pythonhosted.org/packages/3f/c8/dce8ff21c86abe025efe304d9e31fdb0deaaa3b502b6a78141080f206da0/grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882", size = 7594014, upload-time = "2025-10-21T16:21:41.882Z" }, + { url = "https://files.pythonhosted.org/packages/e0/42/ad28191ebf983a5d0ecef90bab66baa5a6b18f2bfdef9d0a63b1973d9f75/grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958", size = 3984750, upload-time = "2025-10-21T16:21:44.006Z" }, + { url = "https://files.pythonhosted.org/packages/9e/00/7bd478cbb851c04a48baccaa49b75abaa8e4122f7d86da797500cccdd771/grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347", size = 4704003, upload-time = "2025-10-21T16:21:46.244Z" }, +] + [[package]] name = "h11" version = "0.16.0" @@ -1558,15 +1599,115 @@ wheels = [ [[package]] name = "opentelemetry-api" -version = "1.37.0" +version = "1.39.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "importlib-metadata" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/63/04/05040d7ce33a907a2a02257e601992f0cdf11c73b33f13c4492bf6c3d6d5/opentelemetry_api-1.37.0.tar.gz", hash = "sha256:540735b120355bd5112738ea53621f8d5edb35ebcd6fe21ada3ab1c61d1cd9a7", size = 64923, upload-time = "2025-09-11T10:29:01.662Z" } +sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-exporter-otlp-proto-grpc" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/9c/3ab1db90f32da200dba332658f2bbe602369e3d19f6aba394031a42635be/opentelemetry_exporter_otlp-1.39.1.tar.gz", hash = "sha256:7cf7470e9fd0060c8a38a23e4f695ac686c06a48ad97f8d4867bc9b420180b9c", size = 6147, upload-time = "2025-12-11T13:32:40.309Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/6c/bdc82a066e6fb1dcf9e8cc8d4e026358fe0f8690700cc6369a6bf9bd17a7/opentelemetry_exporter_otlp-1.39.1-py3-none-any.whl", hash = "sha256:68ae69775291f04f000eb4b698ff16ff685fdebe5cb52871bc4e87938a7b00fe", size = 7019, upload-time = "2025-12-11T13:32:19.387Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "grpcio" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/53/48/b329fed2c610c2c32c9366d9dc597202c9d1e58e631c137ba15248d8850f/opentelemetry_exporter_otlp_proto_grpc-1.39.1.tar.gz", hash = "sha256:772eb1c9287485d625e4dbe9c879898e5253fea111d9181140f51291b5fec3ad", size = 24650, upload-time = "2025-12-11T13:32:41.429Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/a3/cc9b66575bd6597b98b886a2067eea2693408d2d5f39dad9ab7fc264f5f3/opentelemetry_exporter_otlp_proto_grpc-1.39.1-py3-none-any.whl", hash = "sha256:fa1c136a05c7e9b4c09f739469cbdb927ea20b34088ab1d959a849b5cc589c18", size = 19766, upload-time = "2025-12-11T13:32:21.027Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.39.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.60b1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/91/48/28ed9e55dcf2f453128df738210a980e09f4e468a456fa3c763dbc8be70a/opentelemetry_api-1.37.0-py3-none-any.whl", hash = "sha256:accf2024d3e89faec14302213bc39550ec0f4095d1cf5ca688e1bfb1c8612f47", size = 65732, upload-time = "2025-09-11T10:28:41.826Z" }, + { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" }, ] [[package]]