feat: Add OpenTelemetry OTLP HTTP receiver

- Add POST /v1/traces endpoint for OTLP JSON trace ingestion
- Convert OTLP spans to internal format and save to PostgreSQL
- Manual JSON parsing (no Go 1.24 dependencies)
- Add Node.js instrumentation example with Express
- Add Python instrumentation example with Flask
- Auto-instrumentation support for both languages
This commit is contained in:
2026-02-06 14:59:29 -03:00
parent 8b6e59d346
commit 771cf6cf50
11 changed files with 1053 additions and 0 deletions

View File

@@ -0,0 +1,85 @@
# Python OpenTelemetry Example for Ophion
This example demonstrates how to instrument a Python Flask application with OpenTelemetry and send traces to Ophion.
## Setup
```bash
# Create virtual environment (recommended)
python -m venv venv
source venv/bin/activate # Linux/Mac
# or: venv\Scripts\activate # Windows
# Install dependencies
pip install -r requirements.txt
# Start Ophion server (in another terminal)
# cd ~/projetos_jarvis/ophion && go run cmd/server/main.go
# Run the app
python app.py
```
## Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `OTEL_EXPORTER_OTLP_ENDPOINT` | `http://localhost:8080` | Ophion base URL |
| `OTEL_SERVICE_NAME` | `python-example` | Service name in traces |
| `PORT` | `5000` | App HTTP port |
## Test Endpoints
```bash
# Health check
curl http://localhost:5000/health
# Get all users (generates trace)
curl http://localhost:5000/users
# Get single user
curl http://localhost:5000/users/1
# Create order (complex trace with nested spans)
curl -X POST http://localhost:5000/orders \
-H "Content-Type: application/json" \
-d '{"items": [{"id": 1, "qty": 2}]}'
# Trigger error (error trace)
curl http://localhost:5000/error
# External HTTP call (distributed tracing)
curl http://localhost:5000/external-call
```
## View Traces in Ophion
```bash
# List recent traces
curl http://localhost:8080/api/v1/traces
# Get specific trace
curl http://localhost:8080/api/v1/traces/<trace_id>
```
## Auto-Instrumentation Alternative
You can also use OpenTelemetry's auto-instrumentation:
```bash
# Install auto-instrumentation
pip install opentelemetry-distro
opentelemetry-bootstrap -a install
# Run with auto-instrumentation
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:8080 \
OTEL_SERVICE_NAME=python-example \
opentelemetry-instrument python app.py
```
## How It Works
1. `tracing.py` - Initializes OpenTelemetry SDK with OTLP HTTP exporter
2. `FlaskInstrumentor` auto-captures HTTP requests
3. Manual spans in `app.py` add custom business logic traces
4. All spans are sent to Ophion's `/v1/traces` endpoint in OTLP proto/JSON format

168
examples/otel-python/app.py Normal file
View File

@@ -0,0 +1,168 @@
"""
═══════════════════════════════════════════════════════════
📱 Example Flask App with OpenTelemetry
═══════════════════════════════════════════════════════════
Run with:
python app.py
Or with auto-instrumentation:
opentelemetry-instrument python app.py
"""
import os
import time
import random
import atexit
from flask import Flask, jsonify, request
from opentelemetry import trace
from opentelemetry.trace import Status, StatusCode
from opentelemetry.instrumentation.flask import FlaskInstrumentor
# Initialize tracing BEFORE creating Flask app
from tracing import init_tracing, get_tracer, shutdown
init_tracing()
tracer = get_tracer()
app = Flask(__name__)
# Auto-instrument Flask
FlaskInstrumentor().instrument_app(app)
# Register shutdown handler
atexit.register(shutdown)
# Simulated database
users = [
{"id": 1, "name": "Alice", "email": "alice@example.com"},
{"id": 2, "name": "Bob", "email": "bob@example.com"},
{"id": 3, "name": "Charlie", "email": "charlie@example.com"},
]
@app.route('/health')
def health():
"""Health check endpoint."""
return jsonify({"status": "healthy", "service": "python-example"})
@app.route('/users')
def get_users():
"""Get all users with custom span."""
with tracer.start_as_current_span("db.query.users") as span:
span.set_attribute("db.system", "memory")
span.set_attribute("db.operation", "SELECT")
# Simulate database latency
time.sleep(random.uniform(0.01, 0.1))
span.set_attribute("db.row_count", len(users))
span.set_status(Status(StatusCode.OK))
return jsonify({"users": users})
@app.route('/users/<int:user_id>')
def get_user(user_id: int):
"""Get user by ID."""
with tracer.start_as_current_span("db.query.user_by_id") as span:
span.set_attribute("db.system", "memory")
span.set_attribute("db.operation", "SELECT")
span.set_attribute("user.id", user_id)
# Simulate database latency
time.sleep(random.uniform(0.01, 0.05))
user = next((u for u in users if u["id"] == user_id), None)
if not user:
span.set_status(Status(StatusCode.ERROR, "User not found"))
return jsonify({"error": "User not found"}), 404
span.set_status(Status(StatusCode.OK))
return jsonify({"user": user})
@app.route('/orders', methods=['POST'])
def create_order():
"""Create order with nested spans."""
with tracer.start_as_current_span("order.create") as parent_span:
try:
data = request.get_json() or {}
items = data.get("items", [])
# Step 1: Validate inventory
with tracer.start_as_current_span("inventory.check") as span:
span.set_attribute("order.items", len(items))
time.sleep(random.uniform(0.05, 0.1))
span.set_status(Status(StatusCode.OK))
# Step 2: Process payment
with tracer.start_as_current_span("payment.process") as span:
span.set_attribute("payment.method", "credit_card")
time.sleep(random.uniform(0.1, 0.2))
span.set_status(Status(StatusCode.OK))
# Step 3: Create order record
with tracer.start_as_current_span("db.insert.order") as span:
time.sleep(random.uniform(0.02, 0.05))
order_id = hex(int(time.time() * 1000))[2:]
span.set_attribute("order.id", order_id)
span.set_status(Status(StatusCode.OK))
parent_span.set_status(Status(StatusCode.OK))
return jsonify({"orderId": order_id, "status": "created"})
except Exception as e:
parent_span.set_status(Status(StatusCode.ERROR, str(e)))
parent_span.record_exception(e)
return jsonify({"error": str(e)}), 500
@app.route('/error')
def trigger_error():
"""Trigger error for testing."""
span = trace.get_current_span()
error = Exception("Simulated error for testing")
span.record_exception(error)
span.set_status(Status(StatusCode.ERROR, str(error)))
return jsonify({"error": str(error)}), 500
@app.route('/external-call')
def external_call():
"""Make external HTTP call (demonstrates distributed tracing)."""
import requests
with tracer.start_as_current_span("external.http.call") as span:
span.set_attribute("http.url", "https://httpbin.org/get")
try:
# Note: requests auto-instrumentation propagates trace context
response = requests.get("https://httpbin.org/get", timeout=5)
span.set_attribute("http.status_code", response.status_code)
span.set_status(Status(StatusCode.OK))
return jsonify({"status": "ok", "external_status": response.status_code})
except Exception as e:
span.record_exception(e)
span.set_status(Status(StatusCode.ERROR, str(e)))
return jsonify({"error": str(e)}), 500
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print(f"""
🚀 Example app listening on http://localhost:{port}
Try these endpoints:
GET http://localhost:{port}/health
GET http://localhost:{port}/users
GET http://localhost:{port}/users/1
POST http://localhost:{port}/orders
GET http://localhost:{port}/error
GET http://localhost:{port}/external-call
""")
app.run(host='0.0.0.0', port=port, debug=False)

View File

@@ -0,0 +1,15 @@
# OpenTelemetry SDK and API
opentelemetry-api>=1.22.0
opentelemetry-sdk>=1.22.0
# OTLP HTTP Exporter
opentelemetry-exporter-otlp-proto-http>=1.22.0
# Auto-instrumentation
opentelemetry-instrumentation>=0.43b0
opentelemetry-instrumentation-flask>=0.43b0
opentelemetry-instrumentation-requests>=0.43b0
# Web framework
flask>=3.0.0
requests>=2.31.0

View File

@@ -0,0 +1,84 @@
"""
═══════════════════════════════════════════════════════════
🔭 OpenTelemetry Tracing Setup for Ophion
═══════════════════════════════════════════════════════════
This module initializes OpenTelemetry tracing and sends spans
to Ophion's OTLP HTTP endpoint.
Usage:
from tracing import init_tracing, get_tracer
init_tracing()
tracer = get_tracer()
"""
import os
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource, SERVICE_NAME, SERVICE_VERSION, DEPLOYMENT_ENVIRONMENT
# Global tracer
_tracer = None
def init_tracing(service_name: str = None) -> None:
"""
Initialize OpenTelemetry tracing with OTLP HTTP exporter.
Args:
service_name: Name of the service (default: from env or 'python-example')
"""
global _tracer
# Get configuration from environment
otlp_endpoint = os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT', 'http://localhost:8080')
service = service_name or os.getenv('OTEL_SERVICE_NAME', 'python-example')
environment = os.getenv('DEPLOYMENT_ENVIRONMENT', 'development')
# Create resource with service info
resource = Resource.create({
SERVICE_NAME: service,
SERVICE_VERSION: '1.0.0',
DEPLOYMENT_ENVIRONMENT: environment,
})
# Create tracer provider
provider = TracerProvider(resource=resource)
# Configure OTLP HTTP exporter
# Note: endpoint should be base URL, SDK adds /v1/traces
exporter = OTLPSpanExporter(
endpoint=f"{otlp_endpoint}/v1/traces",
# headers={"Authorization": f"Bearer {os.getenv('OPHION_API_KEY', '')}"},
)
# Add batch processor for efficient sending
processor = BatchSpanProcessor(exporter)
provider.add_span_processor(processor)
# Set as global tracer provider
trace.set_tracer_provider(provider)
# Create tracer
_tracer = trace.get_tracer(__name__)
print(f"🔭 OpenTelemetry tracing initialized")
print(f" Service: {service}")
print(f" Endpoint: {otlp_endpoint}/v1/traces")
def get_tracer():
"""Get the initialized tracer."""
global _tracer
if _tracer is None:
init_tracing()
return _tracer
def shutdown():
"""Gracefully shutdown the tracer provider."""
provider = trace.get_tracer_provider()
if hasattr(provider, 'shutdown'):
provider.shutdown()