fix: add agent key auth for ingest endpoints

This commit is contained in:
2026-02-06 19:13:30 -03:00
parent 615a8b5404
commit 6038e82b22
18 changed files with 1244 additions and 5 deletions

View File

@@ -103,7 +103,7 @@ func loadConfig() *Config {
return &Config{ return &Config{
ServerURL: getEnv("OPHION_SERVER", "http://localhost:8080"), ServerURL: getEnv("OPHION_SERVER", "http://localhost:8080"),
APIKey: getEnv("OPHION_API_KEY", ""), APIKey: getEnv("OPHION_API_KEY", getEnv("AGENT_KEY", "")),
Hostname: getEnv("OPHION_HOSTNAME", hostname), Hostname: getEnv("OPHION_HOSTNAME", hostname),
CollectInterval: interval, CollectInterval: interval,
DockerEnabled: dockerEnabled, DockerEnabled: dockerEnabled,

View File

@@ -307,10 +307,10 @@ func (s *Server) setupRoutes() {
ingest.Post("/logs", s.ingestLogs) ingest.Post("/logs", s.ingestLogs)
ingest.Post("/traces", s.ingestTraces) ingest.Post("/traces", s.ingestTraces)
// Legacy ingest routes (also protected, for backwards compat) // Legacy ingest routes (agent key auth for simplicity)
api.Post("/metrics", s.authMiddleware(), s.ingestMetrics) api.Post("/metrics", s.agentAuthMiddleware(), s.ingestMetrics)
api.Post("/logs", s.authMiddleware(), s.ingestLogs) api.Post("/logs", s.agentAuthMiddleware(), s.ingestLogs)
api.Post("/traces", s.authMiddleware(), s.ingestTraces) api.Post("/traces", s.agentAuthMiddleware(), s.ingestTraces)
// Protected routes // Protected routes
protected := api.Group("", s.authMiddleware()) protected := api.Group("", s.authMiddleware())
@@ -894,3 +894,32 @@ func parseInt(s string, def int) int {
} }
return def return def
} }
// agentAuthMiddleware creates middleware that accepts a simple agent key
func (s *Server) agentAuthMiddleware() fiber.Handler {
agentKey := getEnv("AGENT_KEY", "")
return func(c *fiber.Ctx) error {
// If no agent key is configured, allow all (for backwards compat)
if agentKey == "" {
return c.Next()
}
authHeader := c.Get("Authorization")
token := strings.TrimPrefix(authHeader, "Bearer ")
// Check agent key
if token == agentKey {
return c.Next()
}
// Also check X-Agent-Key header
if c.Get("X-Agent-Key") == agentKey {
return c.Next()
}
return c.Status(401).JSON(fiber.Map{
"error": "Unauthorized",
"message": "Invalid agent key",
})
}
}

View File

@@ -11,6 +11,7 @@ services:
- REDIS_URL=redis://redis:6379 - REDIS_URL=redis://redis:6379
- JWT_SECRET=ophion-jwt-secret-change-in-production - JWT_SECRET=ophion-jwt-secret-change-in-production
- ADMIN_PASSWORD=ophion123 - ADMIN_PASSWORD=ophion123
- AGENT_KEY=ophion-agent-2024
depends_on: depends_on:
- postgres - postgres
- redis - redis

View File

@@ -0,0 +1,123 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - OpenTelemetry Collector Configuration
# Receives traces/metrics/logs from instrumented applications
# ═══════════════════════════════════════════════════════════
receivers:
# OTLP receiver - accepts data from any OTLP-compatible SDK
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
cors:
allowed_origins:
- "*"
# Prometheus receiver for metrics scraping (optional)
prometheus:
config:
scrape_configs:
- job_name: 'otel-collector'
scrape_interval: 15s
static_configs:
- targets: ['localhost:8888']
processors:
# Batch processor for better performance
batch:
timeout: 5s
send_batch_size: 512
send_batch_max_size: 1024
# Memory limiter to prevent OOM
memory_limiter:
check_interval: 1s
limit_percentage: 80
spike_limit_percentage: 25
# Resource processor to add common attributes
resource:
attributes:
- key: collector.name
value: ophion-collector
action: upsert
- key: deployment.environment
from_attribute: OTEL_RESOURCE_ATTRIBUTES
action: upsert
# Attributes processor for enrichment
attributes:
actions:
- key: ophion.collected
value: true
action: upsert
exporters:
# Export to Ophion server via OTLP
otlphttp/ophion:
endpoint: http://server:8080
headers:
X-Ophion-Source: otel-collector
compression: gzip
retry_on_failure:
enabled: true
initial_interval: 5s
max_interval: 30s
max_elapsed_time: 300s
# Debug exporter for troubleshooting (disable in production)
debug:
verbosity: basic
sampling_initial: 5
sampling_thereafter: 200
# Prometheus exporter for collector metrics
prometheus:
endpoint: 0.0.0.0:8889
namespace: ophion_collector
extensions:
# Health check extension
health_check:
endpoint: 0.0.0.0:13133
path: /health
# Performance profiler
pprof:
endpoint: 0.0.0.0:1777
# zPages for debugging
zpages:
endpoint: 0.0.0.0:55679
service:
extensions: [health_check, pprof, zpages]
pipelines:
# Traces pipeline
traces:
receivers: [otlp]
processors: [memory_limiter, batch, resource, attributes]
exporters: [otlphttp/ophion, debug]
# Metrics pipeline
metrics:
receivers: [otlp, prometheus]
processors: [memory_limiter, batch, resource]
exporters: [otlphttp/ophion, debug]
# Logs pipeline
logs:
receivers: [otlp]
processors: [memory_limiter, batch, resource]
exporters: [otlphttp/ophion, debug]
telemetry:
logs:
level: info
encoding: json
metrics:
level: detailed
address: 0.0.0.0:8888

View File

@@ -0,0 +1,41 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - OpenTelemetry Collector Service
# Standalone compose file for the OTEL Collector
# ═══════════════════════════════════════════════════════════
version: '3.8'
services:
otel-collector:
image: otel/opentelemetry-collector-contrib:0.96.0
container_name: ophion-otel-collector
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "8888:8888" # Prometheus metrics exposed by the collector
- "8889:8889" # Prometheus exporter metrics
- "13133:13133" # Health check extension
- "55679:55679" # zPages extension
environment:
- OTEL_RESOURCE_ATTRIBUTES=service.name=ophion-collector,service.version=1.0.0
restart: unless-stopped
networks:
- ophion
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:13133/health"]
interval: 10s
timeout: 5s
retries: 3
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
networks:
ophion:
external: true

View File

@@ -113,6 +113,39 @@ services:
timeout: 5s timeout: 5s
retries: 5 retries: 5
# ─────────────────────────────────────────────────────────
# OpenTelemetry Collector (Traces, Metrics, Logs)
# ─────────────────────────────────────────────────────────
otel-collector:
image: otel/opentelemetry-collector-contrib:0.96.0
container_name: ophion-otel-collector
command: ["--config=/etc/otel-collector-config.yaml"]
volumes:
- ./deploy/docker/otel-collector-config.yaml:/etc/otel-collector-config.yaml:ro
ports:
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver
- "8889:8889" # Prometheus exporter metrics
- "13133:13133" # Health check extension
environment:
- OTEL_RESOURCE_ATTRIBUTES=service.name=ophion-collector,service.version=1.0.0
depends_on:
- server
restart: unless-stopped
networks:
- ophion
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:13133/health"]
interval: 10s
timeout: 5s
retries: 3
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
networks: networks:
ophion: ophion:
driver: bridge driver: bridge

415
docs/INSTRUMENTACAO.md Normal file
View File

@@ -0,0 +1,415 @@
# 🐍 Ophion - Guia de Instrumentação OpenTelemetry
Este guia explica como adicionar auto-instrumentação OpenTelemetry a qualquer container Docker, enviando traces, métricas e logs para o Ophion.
## 📋 Índice
- [Arquitetura](#arquitetura)
- [Setup Rápido](#setup-rápido)
- [Variáveis de Ambiente](#variáveis-de-ambiente)
- [Instrumentação por Linguagem](#instrumentação-por-linguagem)
- [Node.js](#nodejs)
- [Python](#python)
- [Java](#java)
- [Go](#go)
- [.NET](#net)
- [Instrumentação de Containers Existentes](#instrumentação-de-containers-existentes)
- [Troubleshooting](#troubleshooting)
---
## 🏗️ Arquitetura
```
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ Your App 1 │ │ Your App 2 │ │ Your App N │
│ (Node.js/Python/│ │ (Java/Go/.NET) │ │ (Any language) │
│ etc.) │ │ │ │ │
└────────┬────────┘ └────────┬────────┘ └────────┬────────┘
│ │ │
│ OTLP (gRPC/HTTP) │ │
│ │ │
└───────────────────────┼───────────────────────┘
┌────────────▼────────────┐
│ OpenTelemetry │
│ Collector │
│ (otel-collector) │
│ :4317 (gRPC) │
│ :4318 (HTTP) │
└────────────┬────────────┘
┌────────────▼────────────┐
│ Ophion Server │
│ (server:8080) │
│ │
│ Traces │ Metrics │ │
│ Logs │ Alerts │
└─────────────────────────┘
```
---
## 🚀 Setup Rápido
### 1. Inicie o Ophion com o Collector
```bash
cd ~/projetos_jarvis/ophion
docker-compose up -d
```
O `otel-collector` estará disponível em:
- **gRPC:** `otel-collector:4317`
- **HTTP:** `otel-collector:4318`
### 2. Configure seu container
Adicione estas variáveis de ambiente ao seu serviço:
```yaml
environment:
- OTEL_SERVICE_NAME=meu-servico
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
```
### 3. Conecte à rede do Ophion
```yaml
networks:
- ophion
```
---
## 🔧 Variáveis de Ambiente
### Essenciais
| Variável | Descrição | Exemplo |
|----------|-----------|---------|
| `OTEL_SERVICE_NAME` | Nome do seu serviço | `api-gateway` |
| `OTEL_EXPORTER_OTLP_ENDPOINT` | Endpoint do collector | `http://otel-collector:4318` |
| `OTEL_TRACES_EXPORTER` | Exporter de traces | `otlp` |
### Opcionais (Recomendadas)
| Variável | Descrição | Default |
|----------|-----------|---------|
| `OTEL_SERVICE_VERSION` | Versão do serviço | - |
| `OTEL_EXPORTER_OTLP_PROTOCOL` | Protocolo OTLP | `http/protobuf` |
| `OTEL_METRICS_EXPORTER` | Exporter de métricas | `none` |
| `OTEL_LOGS_EXPORTER` | Exporter de logs | `none` |
| `OTEL_RESOURCE_ATTRIBUTES` | Atributos extras | `env=prod` |
| `OTEL_TRACES_SAMPLER` | Tipo de sampler | `parentbased_always_on` |
| `OTEL_TRACES_SAMPLER_ARG` | Argumento do sampler | `1.0` |
| `OTEL_PROPAGATORS` | Formatos de propagação | `tracecontext,baggage` |
### Exemplo Completo
```yaml
environment:
# Identificação
- OTEL_SERVICE_NAME=meu-servico
- OTEL_SERVICE_VERSION=1.2.3
# Endpoint
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
# Exporters
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
# Recursos
- OTEL_RESOURCE_ATTRIBUTES=deployment.environment=production,team=backend
# Sampling (100% em dev, ajuste em prod)
- OTEL_TRACES_SAMPLER=parentbased_traceidratio
- OTEL_TRACES_SAMPLER_ARG=1.0
# Propagação (trace context para outros serviços)
- OTEL_PROPAGATORS=tracecontext,baggage,b3multi
```
---
## 📦 Instrumentação por Linguagem
### Node.js
**Auto-instrumentação sem alterar código:**
1. Instale os pacotes:
```bash
npm install @opentelemetry/auto-instrumentations-node @opentelemetry/api
```
2. Configure no docker-compose.yml:
```yaml
environment:
- OTEL_SERVICE_NAME=minha-api-node
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
- NODE_OPTIONS=--require @opentelemetry/auto-instrumentations-node/register
```
**Instrumentações automáticas incluídas:**
- HTTP/Express/Fastify
- MongoDB/PostgreSQL/MySQL
- Redis/Memcached
- gRPC
- AWS SDK
### Python
**Auto-instrumentação sem alterar código:**
1. Instale os pacotes:
```bash
pip install opentelemetry-distro opentelemetry-exporter-otlp
opentelemetry-bootstrap -a install
```
2. Execute com o wrapper:
```dockerfile
CMD ["opentelemetry-instrument", "python", "app.py"]
```
3. Configure no docker-compose.yml:
```yaml
environment:
- OTEL_SERVICE_NAME=minha-api-python
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
- OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
```
**Instrumentações automáticas incluídas:**
- Flask/Django/FastAPI
- Requests/HTTPX/aiohttp
- SQLAlchemy/psycopg2
- Redis/Celery
- boto3/botocore
### Java
**Auto-instrumentação via Java Agent:**
1. Baixe o agent no Dockerfile:
```dockerfile
RUN wget https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/latest/download/opentelemetry-javaagent.jar \
-O /opt/opentelemetry-javaagent.jar
```
2. Configure no docker-compose.yml:
```yaml
environment:
- OTEL_SERVICE_NAME=minha-api-java
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
- JAVA_TOOL_OPTIONS=-javaagent:/opt/opentelemetry-javaagent.jar
```
**Instrumentações automáticas incluídas:**
- Spring Boot/Spring MVC
- JAX-RS/Jersey
- Hibernate/JPA
- JDBC (PostgreSQL, MySQL, etc.)
- Kafka/RabbitMQ
- gRPC
### Go
Go requer instrumentação explícita no código, mas é simples:
```go
import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
"go.opentelemetry.io/otel/sdk/trace"
)
func initTracer() (*trace.TracerProvider, error) {
exporter, err := otlptracehttp.New(context.Background())
if err != nil {
return nil, err
}
tp := trace.NewTracerProvider(
trace.WithBatcher(exporter),
trace.WithResource(resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceName(os.Getenv("OTEL_SERVICE_NAME")),
)),
)
otel.SetTracerProvider(tp)
return tp, nil
}
```
### .NET
**Auto-instrumentação via .NET OpenTelemetry:**
1. Adicione os pacotes:
```bash
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
```
2. Configure no Program.cs:
```csharp
builder.Services.AddOpenTelemetry()
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddHttpClientInstrumentation()
.AddOtlpExporter());
```
3. Configure no docker-compose.yml:
```yaml
environment:
- OTEL_SERVICE_NAME=minha-api-dotnet
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
```
---
## 🔄 Instrumentação de Containers Existentes
### Método 1: Modificar docker-compose.yml
Adicione ao seu serviço existente:
```yaml
services:
meu-servico-existente:
# ... configuração existente ...
environment:
# Adicione estas linhas:
- OTEL_SERVICE_NAME=meu-servico
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
# Para Node.js:
- NODE_OPTIONS=--require @opentelemetry/auto-instrumentations-node/register
# Para Java:
# - JAVA_TOOL_OPTIONS=-javaagent:/opt/opentelemetry-javaagent.jar
networks:
- ophion
networks:
ophion:
external: true
```
### Método 2: Docker Compose Override
Crie um arquivo `docker-compose.otel.yml`:
```yaml
version: '3.8'
services:
meu-servico:
environment:
- OTEL_SERVICE_NAME=meu-servico
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_TRACES_EXPORTER=otlp
networks:
- ophion
networks:
ophion:
external: true
```
Execute com:
```bash
docker-compose -f docker-compose.yml -f docker-compose.otel.yml up -d
```
### Método 3: Sidecar Pattern
Para aplicações que não suportam modificação:
```yaml
services:
meu-servico:
# Aplicação original sem modificação
otel-proxy:
image: otel/opentelemetry-collector-contrib:latest
command: ["--config=/etc/otel-config.yaml"]
volumes:
- ./otel-proxy-config.yaml:/etc/otel-config.yaml
depends_on:
- meu-servico
```
---
## 🔍 Troubleshooting
### Traces não aparecem no Ophion
1. **Verifique conectividade:**
```bash
docker exec -it seu-container wget -q -O- http://otel-collector:13133/health
```
2. **Verifique logs do collector:**
```bash
docker logs ophion-otel-collector
```
3. **Verifique se as env vars estão corretas:**
```bash
docker exec -it seu-container env | grep OTEL
```
### Erros de conexão
- Certifique-se que o container está na rede `ophion`
- Use `otel-collector` (nome do serviço) e não `localhost`
- Porta 4318 para HTTP, 4317 para gRPC
### Muitos traces (custo alto)
Ajuste o sampling:
```yaml
environment:
- OTEL_TRACES_SAMPLER=parentbased_traceidratio
- OTEL_TRACES_SAMPLER_ARG=0.1 # 10% dos traces
```
### Traces incompletos
Verifique propagação entre serviços:
```yaml
environment:
- OTEL_PROPAGATORS=tracecontext,baggage,b3multi
```
---
## 📚 Exemplos
Veja os exemplos completos em:
- `examples/docker/nodejs-instrumented/` - Node.js
- `examples/docker/python-instrumented/` - Python
- `examples/docker/java-instrumented/` - Java
---
## 🆘 Suporte
- **Documentação OpenTelemetry:** https://opentelemetry.io/docs/
- **Ophion Issues:** Abra uma issue no repositório
- **Collector Debug:** Use `debug` exporter para troubleshooting

View File

@@ -0,0 +1,43 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Java Instrumented App Dockerfile
# Example showing how to add OpenTelemetry to any Java app
# ═══════════════════════════════════════════════════════════
# Build stage
FROM eclipse-temurin:21-jdk-alpine AS builder
WORKDIR /app
# Copy Maven/Gradle files (if using build tool)
COPY pom.xml* build.gradle* ./
# Copy source code
COPY src/ ./src/
# For simple example, compile directly
RUN mkdir -p target/classes && \
javac -d target/classes src/main/java/*.java
# Package as JAR
RUN cd target/classes && \
jar cfe ../app.jar Main .
# Runtime stage
FROM eclipse-temurin:21-jre-alpine
WORKDIR /app
# Download OpenTelemetry Java Agent
ARG OTEL_AGENT_VERSION=2.1.0
RUN wget -q https://github.com/open-telemetry/opentelemetry-java-instrumentation/releases/download/v${OTEL_AGENT_VERSION}/opentelemetry-javaagent.jar \
-O /opt/opentelemetry-javaagent.jar
# Copy the built application
COPY --from=builder /app/target/app.jar .
# Expose port
EXPOSE 8080
# Start with OpenTelemetry Java Agent
# The JAVA_TOOL_OPTIONS env var in docker-compose.yml enables the agent
CMD ["java", "-jar", "app.jar"]

View File

@@ -0,0 +1,62 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Java Auto-Instrumentation Example
# Demonstrates automatic tracing for Java applications
# ═══════════════════════════════════════════════════════════
#
# Usage:
# docker-compose up -d
#
# This example shows how to instrument ANY Java app without
# code changes using the OpenTelemetry Java Agent
# ═══════════════════════════════════════════════════════════
version: '3.8'
services:
java-app:
build:
context: .
dockerfile: Dockerfile
ports:
- "8081:8080"
environment:
# ════════════════════════════════════════════════════
# OpenTelemetry Auto-Instrumentation Configuration
# ════════════════════════════════════════════════════
# Service identification
- OTEL_SERVICE_NAME=java-example-app
- OTEL_SERVICE_VERSION=1.0.0
# OTLP Exporter configuration (pointing to Ophion collector)
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
# Resource attributes
- OTEL_RESOURCE_ATTRIBUTES=deployment.environment=development,service.namespace=ophion-examples
# Sampling (1.0 = 100% of traces)
- OTEL_TRACES_SAMPLER=parentbased_traceidratio
- OTEL_TRACES_SAMPLER_ARG=1.0
# Propagation format
- OTEL_PROPAGATORS=tracecontext,baggage,b3multi
# Java Agent path (set in Dockerfile JAVA_TOOL_OPTIONS)
- JAVA_TOOL_OPTIONS=-javaagent:/opt/opentelemetry-javaagent.jar
networks:
- ophion
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080/health"]
interval: 10s
timeout: 5s
retries: 3
networks:
ophion:
external: true

View File

@@ -0,0 +1,115 @@
/**
* ═══════════════════════════════════════════════════════════
* 🐍 OPHION - Example Java Application
* This app is automatically instrumented by OpenTelemetry Java Agent
* No code changes needed - just the agent JAR!
* ═══════════════════════════════════════════════════════════
*/
import com.sun.net.httpserver.HttpServer;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpExchange;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
import java.util.logging.Logger;
public class Main {
private static final Logger logger = Logger.getLogger(Main.class.getName());
private static final int PORT = 8080;
public static void main(String[] args) throws IOException {
HttpServer server = HttpServer.create(new InetSocketAddress(PORT), 0);
// Routes
server.createContext("/", new HomeHandler());
server.createContext("/health", new HealthHandler());
server.createContext("/api/users", new UsersHandler());
server.createContext("/api/slow", new SlowHandler());
server.setExecutor(Executors.newFixedThreadPool(10));
server.start();
logger.info("🚀 Java server running on port " + PORT);
logger.info("📊 OTEL Endpoint: " + System.getenv("OTEL_EXPORTER_OTLP_ENDPOINT"));
logger.info("🏷️ Service Name: " + System.getenv("OTEL_SERVICE_NAME"));
}
static class HomeHandler implements HttpHandler {
@Override
public void handle(HttpExchange exchange) throws IOException {
logger.info("Home endpoint called");
String response = """
{
"message": "Hello from Java!",
"instrumented": true,
"otelEndpoint": "%s"
}
""".formatted(System.getenv("OTEL_EXPORTER_OTLP_ENDPOINT"));
sendResponse(exchange, 200, response);
}
}
static class HealthHandler implements HttpHandler {
@Override
public void handle(HttpExchange exchange) throws IOException {
String response = """
{
"status": "healthy",
"service": "java-example"
}
""";
sendResponse(exchange, 200, response);
}
}
static class UsersHandler implements HttpHandler {
@Override
public void handle(HttpExchange exchange) throws IOException {
logger.info("Fetching users");
// Simulate some work
sleep(10 + (int)(Math.random() * 90));
String response = """
[
{"id": 1, "name": "Alice"},
{"id": 2, "name": "Bob"},
{"id": 3, "name": "Charlie"}
]
""";
sendResponse(exchange, 200, response);
}
}
static class SlowHandler implements HttpHandler {
@Override
public void handle(HttpExchange exchange) throws IOException {
logger.info("Starting slow operation");
sleep(500);
logger.info("Slow operation completed");
String response = """
{
"message": "Slow response",
"delay": "500ms"
}
""";
sendResponse(exchange, 200, response);
}
}
private static void sendResponse(HttpExchange exchange, int code, String response) throws IOException {
exchange.getResponseHeaders().set("Content-Type", "application/json");
exchange.sendResponseHeaders(code, response.getBytes().length);
try (OutputStream os = exchange.getResponseBody()) {
os.write(response.getBytes());
}
}
private static void sleep(int ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}

View File

@@ -0,0 +1,29 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Node.js Instrumented App Dockerfile
# Example showing how to add OpenTelemetry to any Node.js app
# ═══════════════════════════════════════════════════════════
FROM node:20-alpine
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies + OpenTelemetry auto-instrumentation
RUN npm install && \
npm install @opentelemetry/auto-instrumentations-node \
@opentelemetry/api \
@opentelemetry/sdk-node \
@opentelemetry/exporter-trace-otlp-http \
@opentelemetry/exporter-metrics-otlp-http
# Copy application code
COPY . .
# Expose port
EXPOSE 3000
# Start with auto-instrumentation
# The NODE_OPTIONS env var in docker-compose.yml handles instrumentation
CMD ["node", "server.js"]

View File

@@ -0,0 +1,62 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Node.js Auto-Instrumentation Example
# Demonstrates automatic tracing for Node.js applications
# ═══════════════════════════════════════════════════════════
#
# Usage:
# docker-compose up -d
#
# This example shows how to instrument ANY Node.js app without
# code changes using @opentelemetry/auto-instrumentations-node
# ═══════════════════════════════════════════════════════════
version: '3.8'
services:
nodejs-app:
build:
context: .
dockerfile: Dockerfile
ports:
- "3001:3000"
environment:
# ════════════════════════════════════════════════════
# OpenTelemetry Auto-Instrumentation Configuration
# ════════════════════════════════════════════════════
# Service identification
- OTEL_SERVICE_NAME=nodejs-example-app
- OTEL_SERVICE_VERSION=1.0.0
# OTLP Exporter configuration (pointing to Ophion collector)
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
# Resource attributes
- OTEL_RESOURCE_ATTRIBUTES=deployment.environment=development,service.namespace=ophion-examples
# Sampling (1.0 = 100% of traces)
- OTEL_TRACES_SAMPLER=parentbased_traceidratio
- OTEL_TRACES_SAMPLER_ARG=1.0
# Propagation format
- OTEL_PROPAGATORS=tracecontext,baggage,b3multi
# Enable auto-instrumentation via Node.js loader
- NODE_OPTIONS=--require @opentelemetry/auto-instrumentations-node/register
networks:
- ophion
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3000/health"]
interval: 10s
timeout: 5s
retries: 3
networks:
ophion:
external: true

View File

@@ -0,0 +1,17 @@
{
"name": "nodejs-instrumented-example",
"version": "1.0.0",
"description": "Example Node.js app with OpenTelemetry auto-instrumentation for Ophion",
"main": "server.js",
"scripts": {
"start": "node server.js",
"start:instrumented": "node --require @opentelemetry/auto-instrumentations-node/register server.js"
},
"dependencies": {
"express": "^4.18.2"
},
"devDependencies": {},
"keywords": ["ophion", "opentelemetry", "tracing", "observability"],
"author": "Ophion Team",
"license": "MIT"
}

View File

@@ -0,0 +1,67 @@
/**
* ═══════════════════════════════════════════════════════════
* 🐍 OPHION - Example Node.js Application
* This app is automatically instrumented by OpenTelemetry
* No code changes needed - just env vars!
* ═══════════════════════════════════════════════════════════
*/
const http = require('http');
const PORT = process.env.PORT || 3000;
// Simple HTTP server
const server = http.createServer((req, res) => {
const { url, method } = req;
console.log(`[${new Date().toISOString()}] ${method} ${url}`);
// Route handling
if (url === '/health') {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ status: 'healthy', service: 'nodejs-example' }));
return;
}
if (url === '/') {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
message: 'Hello from Node.js!',
instrumented: true,
otelEndpoint: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'not set'
}));
return;
}
if (url === '/api/users') {
// Simulate some work
setTimeout(() => {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify([
{ id: 1, name: 'Alice' },
{ id: 2, name: 'Bob' },
{ id: 3, name: 'Charlie' }
]));
}, Math.random() * 100);
return;
}
if (url === '/api/slow') {
// Simulate slow operation (good for testing traces)
setTimeout(() => {
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ message: 'Slow response', delay: '500ms' }));
}, 500);
return;
}
// 404
res.writeHead(404, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: 'Not Found' }));
});
server.listen(PORT, () => {
console.log(`🚀 Node.js server running on port ${PORT}`);
console.log(`📊 OTEL Endpoint: ${process.env.OTEL_EXPORTER_OTLP_ENDPOINT || 'not configured'}`);
console.log(`🏷️ Service Name: ${process.env.OTEL_SERVICE_NAME || 'unknown'}`);
});

View File

@@ -0,0 +1,31 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Python Instrumented App Dockerfile
# Example showing how to add OpenTelemetry to any Python app
# ═══════════════════════════════════════════════════════════
FROM python:3.12-slim
WORKDIR /app
# Install dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Install OpenTelemetry auto-instrumentation
RUN pip install --no-cache-dir \
opentelemetry-distro \
opentelemetry-exporter-otlp \
opentelemetry-instrumentation
# Auto-install all available instrumentations
RUN opentelemetry-bootstrap -a install
# Copy application code
COPY . .
# Expose port
EXPOSE 5000
# Start with auto-instrumentation wrapper
# opentelemetry-instrument auto-configures tracing based on env vars
CMD ["opentelemetry-instrument", "python", "app.py"]

View File

@@ -0,0 +1,105 @@
"""
═══════════════════════════════════════════════════════════
🐍 OPHION - Example Python Application
This app is automatically instrumented by OpenTelemetry
No code changes needed - just env vars!
═══════════════════════════════════════════════════════════
"""
import os
import time
import random
import logging
from flask import Flask, jsonify
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
@app.route('/')
def home():
"""Home endpoint"""
logger.info("Home endpoint called")
return jsonify({
'message': 'Hello from Python!',
'instrumented': True,
'otel_endpoint': os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT', 'not set')
})
@app.route('/health')
def health():
"""Health check endpoint"""
return jsonify({
'status': 'healthy',
'service': 'python-example'
})
@app.route('/api/users')
def get_users():
"""Get users - simulates DB call"""
logger.info("Fetching users")
# Simulate some work
time.sleep(random.uniform(0.01, 0.1))
return jsonify([
{'id': 1, 'name': 'Alice'},
{'id': 2, 'name': 'Bob'},
{'id': 3, 'name': 'Charlie'}
])
@app.route('/api/slow')
def slow_endpoint():
"""Slow endpoint - good for testing trace visualization"""
logger.info("Starting slow operation")
# Simulate slow operation
time.sleep(0.5)
logger.info("Slow operation completed")
return jsonify({
'message': 'Slow response',
'delay': '500ms'
})
@app.route('/api/chain')
def chain_endpoint():
"""Simulates a chain of operations"""
logger.info("Starting chain operation")
# Step 1: Validate
time.sleep(random.uniform(0.01, 0.05))
logger.info("Validation complete")
# Step 2: Process
time.sleep(random.uniform(0.05, 0.1))
logger.info("Processing complete")
# Step 3: Save
time.sleep(random.uniform(0.02, 0.08))
logger.info("Save complete")
return jsonify({
'message': 'Chain completed',
'steps': ['validate', 'process', 'save']
})
@app.route('/api/error')
def error_endpoint():
"""Endpoint that raises an error - good for testing error traces"""
logger.error("About to raise an error")
raise ValueError("Intentional error for testing traces")
@app.errorhandler(Exception)
def handle_error(error):
"""Global error handler"""
logger.exception(f"Unhandled error: {error}")
return jsonify({
'error': str(error),
'type': type(error).__name__
}), 500
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
logger.info(f"🚀 Python server starting on port {port}")
logger.info(f"📊 OTEL Endpoint: {os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT', 'not configured')}")
logger.info(f"🏷️ Service Name: {os.getenv('OTEL_SERVICE_NAME', 'unknown')}")
app.run(host='0.0.0.0', port=port)

View File

@@ -0,0 +1,62 @@
# ═══════════════════════════════════════════════════════════
# 🐍 OPHION - Python Auto-Instrumentation Example
# Demonstrates automatic tracing for Python applications
# ═══════════════════════════════════════════════════════════
#
# Usage:
# docker-compose up -d
#
# This example shows how to instrument ANY Python app without
# code changes using opentelemetry-instrument command
# ═══════════════════════════════════════════════════════════
version: '3.8'
services:
python-app:
build:
context: .
dockerfile: Dockerfile
ports:
- "5001:5000"
environment:
# ════════════════════════════════════════════════════
# OpenTelemetry Auto-Instrumentation Configuration
# ════════════════════════════════════════════════════
# Service identification
- OTEL_SERVICE_NAME=python-example-app
- OTEL_SERVICE_VERSION=1.0.0
# OTLP Exporter configuration (pointing to Ophion collector)
- OTEL_EXPORTER_OTLP_ENDPOINT=http://otel-collector:4318
- OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf
- OTEL_TRACES_EXPORTER=otlp
- OTEL_METRICS_EXPORTER=otlp
- OTEL_LOGS_EXPORTER=otlp
# Resource attributes
- OTEL_RESOURCE_ATTRIBUTES=deployment.environment=development,service.namespace=ophion-examples
# Sampling (1.0 = 100% of traces)
- OTEL_TRACES_SAMPLER=parentbased_traceidratio
- OTEL_TRACES_SAMPLER_ARG=1.0
# Propagation format
- OTEL_PROPAGATORS=tracecontext,baggage,b3multi
# Python-specific: auto-detect instrumentations
- OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true
networks:
- ophion
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:5000/health"]
interval: 10s
timeout: 5s
retries: 3
networks:
ophion:
external: true

View File

@@ -0,0 +1,4 @@
# Python dependencies for instrumented example
flask>=3.0.0
gunicorn>=21.2.0
requests>=2.31.0