feat: Sentinela v0.2.0 — Brazilian Financial Data API in Go

- 20 Go source files, single 16MB binary
- SQLite + FTS5 full-text search (pure Go, no CGO)
- BCB integration: Selic, CDI, IPCA, USD/BRL, EUR/BRL
- CVM integration: 2,524 companies from registry
- Fiber v2 REST API with 42 handlers
- Auto-seeds on first run (~5s for BCB + CVM)
- Token bucket rate limiter, optional API key auth
- Periodic sync scheduler (configurable)
- Graceful shutdown, structured logging (slog)
- All endpoints tested with real data
This commit is contained in:
2026-02-10 11:15:54 -03:00
commit f7c8b446bf
28 changed files with 1763 additions and 0 deletions

6
.env.example Normal file
View File

@@ -0,0 +1,6 @@
PORT=3333
DATABASE_PATH=data/sentinela.db
RATE_LIMIT=100
API_KEY=
SYNC_INTERVAL=30m
LOG_LEVEL=info

13
Makefile Normal file
View File

@@ -0,0 +1,13 @@
.PHONY: build run seed clean
build:
go build -o bin/sentinela ./cmd/sentinela
run: build
./bin/sentinela
seed:
DATABASE_PATH=data/sentinela.db go run ./cmd/sentinela
clean:
rm -rf bin/ data/

58
README.md Normal file
View File

@@ -0,0 +1,58 @@
# Sentinela 🇧🇷
Brazilian Financial Data API — serves market data from BCB and CVM public sources.
## Quick Start
```bash
# Build and run (seeds data automatically on first run)
make run
# Or directly
go run ./cmd/sentinela
```
The API starts on `http://localhost:3333`. On first run, it automatically fetches:
- **BCB**: Selic, CDI, IPCA, USD/BRL, EUR/BRL (last ~3 years)
- **CVM**: Company registry + IPE filings (current + previous year)
## Endpoints
| Endpoint | Description |
|---|---|
| `GET /health` | Health check |
| `GET /api/v1/companies` | List companies |
| `GET /api/v1/companies/search?q=petrobras` | Search companies |
| `GET /api/v1/companies/:id` | Get company |
| `GET /api/v1/companies/:id/filings` | Company filings |
| `GET /api/v1/filings` | List filings |
| `GET /api/v1/filings/recent` | Recent filings |
| `GET /api/v1/filings/search?q=dividendo` | Search filings |
| `GET /api/v1/market/selic` | Selic history |
| `GET /api/v1/market/selic/current` | Current Selic |
| `GET /api/v1/market/cdi` | CDI history |
| `GET /api/v1/market/cdi/current` | Current CDI |
| `GET /api/v1/market/ipca` | IPCA history |
| `GET /api/v1/market/ipca/current` | Current IPCA |
| `GET /api/v1/market/fx` | FX rates |
| `GET /api/v1/market/fx/current` | Current FX |
| `GET /api/v1/market/overview` | Market dashboard |
| `GET /api/v1/search?q=vale` | Global search |
## Configuration
Set via environment variables (see `.env.example`):
- `PORT` — HTTP port (default: 3333)
- `DATABASE_PATH` — SQLite path (default: data/sentinela.db)
- `RATE_LIMIT` — Requests per minute per IP (default: 100)
- `API_KEY` — Optional API key (if set, all requests need it via `X-API-Key` header)
- `SYNC_INTERVAL` — Auto-refresh interval (default: 30m)
- `LOG_LEVEL` — info or debug
## Tech Stack
- Go 1.22+
- Fiber v2 (HTTP)
- SQLite via modernc.org/sqlite (pure Go, no CGO)
- FTS5 full-text search

BIN
bin/sentinela Executable file

Binary file not shown.

77
cmd/sentinela/main.go Normal file
View File

@@ -0,0 +1,77 @@
package main
import (
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"time"
"github.com/sentinela-go/internal/api"
"github.com/sentinela-go/internal/config"
"github.com/sentinela-go/internal/db"
"github.com/sentinela-go/internal/fetcher"
)
func main() {
cfg := config.Load()
// Setup structured logging
level := slog.LevelInfo
if cfg.LogLevel == "debug" {
level = slog.LevelDebug
}
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: level})))
slog.Info("starting Sentinela", "port", cfg.Port)
// Initialize database
database, err := db.New(cfg.DatabasePath)
if err != nil {
slog.Error("failed to initialize database", "error", err)
os.Exit(1)
}
defer database.Close()
// Seed data if empty
if database.IsMarketEmpty() {
slog.Info("database is empty, seeding BCB data...")
if err := fetcher.FetchAllBCB(database); err != nil {
slog.Error("failed to seed BCB data", "error", err)
}
}
if database.IsEmpty() {
slog.Info("no companies found, seeding CVM data...")
if err := fetcher.FetchAllCVM(database); err != nil {
slog.Error("failed to seed CVM data", "error", err)
}
}
// Start scheduler
syncInterval, err := time.ParseDuration(cfg.SyncInterval)
if err != nil {
syncInterval = 30 * time.Minute
}
stopChan := make(chan struct{})
go fetcher.StartScheduler(database, syncInterval, stopChan)
// Create and start server
app := api.NewServer(cfg, database)
// Graceful shutdown
go func() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
slog.Info("shutting down...")
close(stopChan)
app.Shutdown()
}()
addr := fmt.Sprintf(":%d", cfg.Port)
if err := app.Listen(addr); err != nil {
slog.Error("server error", "error", err)
os.Exit(1)
}
}

BIN
data/sentinela.db Normal file

Binary file not shown.

29
go.mod Normal file
View File

@@ -0,0 +1,29 @@
module github.com/sentinela-go
go 1.22.0
require (
github.com/gofiber/fiber/v2 v2.52.6
golang.org/x/text v0.21.0
modernc.org/sqlite v1.34.5
)
require (
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
golang.org/x/sys v0.28.0 // indirect
modernc.org/libc v1.55.3 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.8.0 // indirect
)

67
go.sum Normal file
View File

@@ -0,0 +1,67 @@
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/gofiber/fiber/v2 v2.52.6 h1:Rfp+ILPiYSvvVuIPvxrBns+HJp8qGLDnLJawAu27XVI=
github.com/gofiber/fiber/v2 v2.52.6/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

View File

@@ -0,0 +1,63 @@
package handlers
import (
"strconv"
"github.com/gofiber/fiber/v2"
)
func (h *Handler) ListCompanies(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "20"))
offset, _ := strconv.Atoi(c.Query("offset", "0"))
status := c.Query("status")
sector := c.Query("sector")
companies, total, err := h.db.ListCompanies(limit, offset, status, sector)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": companies, "total": total, "limit": limit, "offset": offset})
}
func (h *Handler) GetCompany(c *fiber.Ctx) error {
id, err := strconv.ParseInt(c.Params("id"), 10, 64)
if err != nil {
return c.Status(400).JSON(fiber.Map{"error": "invalid id"})
}
company, err := h.db.GetCompany(id)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
if company == nil {
return c.Status(404).JSON(fiber.Map{"error": "not found"})
}
return c.JSON(fiber.Map{"data": company})
}
func (h *Handler) CompanyFilings(c *fiber.Ctx) error {
id, err := strconv.ParseInt(c.Params("id"), 10, 64)
if err != nil {
return c.Status(400).JSON(fiber.Map{"error": "invalid id"})
}
limit, _ := strconv.Atoi(c.Query("limit", "20"))
offset, _ := strconv.Atoi(c.Query("offset", "0"))
filings, total, err := h.db.ListFilingsByCompany(id, limit, offset)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": filings, "total": total, "limit": limit, "offset": offset})
}
func (h *Handler) SearchCompanies(c *fiber.Ctx) error {
q := c.Query("q")
if q == "" {
return c.Status(400).JSON(fiber.Map{"error": "query parameter 'q' required"})
}
limit, _ := strconv.Atoi(c.Query("limit", "20"))
companies, err := h.db.SearchCompanies(q, limit)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": companies, "total": len(companies)})
}

View File

@@ -0,0 +1,58 @@
package handlers
import (
"strconv"
"github.com/gofiber/fiber/v2"
)
func (h *Handler) ListFilings(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "20"))
offset, _ := strconv.Atoi(c.Query("offset", "0"))
category := c.Query("category")
from := c.Query("from")
to := c.Query("to")
filings, total, err := h.db.ListFilings(limit, offset, category, from, to)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": filings, "total": total, "limit": limit, "offset": offset})
}
func (h *Handler) GetFiling(c *fiber.Ctx) error {
id, err := strconv.ParseInt(c.Params("id"), 10, 64)
if err != nil {
return c.Status(400).JSON(fiber.Map{"error": "invalid id"})
}
filing, err := h.db.GetFiling(id)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
if filing == nil {
return c.Status(404).JSON(fiber.Map{"error": "not found"})
}
return c.JSON(fiber.Map{"data": filing})
}
func (h *Handler) RecentFilings(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "20"))
filings, err := h.db.RecentFilings(limit)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": filings, "total": len(filings)})
}
func (h *Handler) SearchFilings(c *fiber.Ctx) error {
q := c.Query("q")
if q == "" {
return c.Status(400).JSON(fiber.Map{"error": "query parameter 'q' required"})
}
limit, _ := strconv.Atoi(c.Query("limit", "20"))
filings, err := h.db.SearchFilings(q, limit)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": filings, "total": len(filings)})
}

View File

@@ -0,0 +1,21 @@
package handlers
import (
"github.com/gofiber/fiber/v2"
"github.com/sentinela-go/internal/db"
)
type Handler struct {
db *db.DB
}
func New(database *db.DB) *Handler {
return &Handler{db: database}
}
func (h *Handler) Health(c *fiber.Ctx) error {
return c.JSON(fiber.Map{
"status": "ok",
"service": "sentinela",
})
}

View File

@@ -0,0 +1,107 @@
package handlers
import (
"strconv"
"github.com/gofiber/fiber/v2"
)
func (h *Handler) ListSelic(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "30"))
from := c.Query("from")
to := c.Query("to")
data, err := h.db.ListSelic(limit, from, to)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": data, "total": len(data)})
}
func (h *Handler) CurrentSelic(c *fiber.Ctx) error {
r, err := h.db.CurrentSelic()
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
if r == nil {
return c.Status(404).JSON(fiber.Map{"error": "no data"})
}
return c.JSON(fiber.Map{"data": r})
}
func (h *Handler) ListCDI(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "30"))
from := c.Query("from")
to := c.Query("to")
data, err := h.db.ListCDI(limit, from, to)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": data, "total": len(data)})
}
func (h *Handler) CurrentCDI(c *fiber.Ctx) error {
r, err := h.db.CurrentCDI()
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
if r == nil {
return c.Status(404).JSON(fiber.Map{"error": "no data"})
}
return c.JSON(fiber.Map{"data": r})
}
func (h *Handler) ListIPCA(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "30"))
from := c.Query("from")
to := c.Query("to")
data, err := h.db.ListIPCA(limit, from, to)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": data, "total": len(data)})
}
func (h *Handler) CurrentIPCA(c *fiber.Ctx) error {
r, err := h.db.CurrentIPCA()
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
if r == nil {
return c.Status(404).JSON(fiber.Map{"error": "no data"})
}
return c.JSON(fiber.Map{"data": r})
}
func (h *Handler) ListFX(c *fiber.Ctx) error {
limit, _ := strconv.Atoi(c.Query("limit", "30"))
pair := c.Query("pair")
from := c.Query("from")
to := c.Query("to")
data, err := h.db.ListFX(limit, pair, from, to)
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": data, "total": len(data)})
}
func (h *Handler) CurrentFX(c *fiber.Ctx) error {
data, err := h.db.CurrentFX()
if err != nil {
return c.Status(500).JSON(fiber.Map{"error": err.Error()})
}
return c.JSON(fiber.Map{"data": data})
}
func (h *Handler) MarketOverview(c *fiber.Ctx) error {
selic, _ := h.db.CurrentSelic()
cdi, _ := h.db.CurrentCDI()
ipca, _ := h.db.CurrentIPCA()
fx, _ := h.db.CurrentFX()
return c.JSON(fiber.Map{
"selic": selic,
"cdi": cdi,
"ipca": ipca,
"fx": fx,
})
}

View File

@@ -0,0 +1,23 @@
package handlers
import (
"strconv"
"github.com/gofiber/fiber/v2"
)
func (h *Handler) GlobalSearch(c *fiber.Ctx) error {
q := c.Query("q")
if q == "" {
return c.Status(400).JSON(fiber.Map{"error": "query parameter 'q' required"})
}
limit, _ := strconv.Atoi(c.Query("limit", "10"))
companies, _ := h.db.SearchCompanies(q, limit)
filings, _ := h.db.SearchFilings(q, limit)
return c.JSON(fiber.Map{
"companies": fiber.Map{"data": companies, "total": len(companies)},
"filings": fiber.Map{"data": filings, "total": len(filings)},
})
}

View File

@@ -0,0 +1,26 @@
package middleware
import (
"strings"
"github.com/gofiber/fiber/v2"
)
func NewAPIKeyAuth(apiKey string) fiber.Handler {
return func(c *fiber.Ctx) error {
if c.Path() == "/health" {
return c.Next()
}
key := c.Get("X-API-Key")
if key == "" {
auth := c.Get("Authorization")
if strings.HasPrefix(auth, "Bearer ") {
key = strings.TrimPrefix(auth, "Bearer ")
}
}
if key != apiKey {
return c.Status(401).JSON(fiber.Map{"error": "unauthorized"})
}
return c.Next()
}
}

View File

@@ -0,0 +1,55 @@
package middleware
import (
"sync"
"time"
"github.com/gofiber/fiber/v2"
)
type bucket struct {
tokens float64
lastCheck time.Time
}
type rateLimiter struct {
mu sync.Mutex
buckets map[string]*bucket
rate float64 // tokens per second
capacity float64
}
func NewRateLimiter(requestsPerMinute int) fiber.Handler {
rl := &rateLimiter{
buckets: make(map[string]*bucket),
rate: float64(requestsPerMinute) / 60.0,
capacity: float64(requestsPerMinute),
}
return func(c *fiber.Ctx) error {
ip := c.IP()
rl.mu.Lock()
b, ok := rl.buckets[ip]
if !ok {
b = &bucket{tokens: rl.capacity, lastCheck: time.Now()}
rl.buckets[ip] = b
}
now := time.Now()
elapsed := now.Sub(b.lastCheck).Seconds()
b.tokens += elapsed * rl.rate
if b.tokens > rl.capacity {
b.tokens = rl.capacity
}
b.lastCheck = now
if b.tokens < 1 {
rl.mu.Unlock()
return c.Status(429).JSON(fiber.Map{"error": "rate limit exceeded"})
}
b.tokens--
rl.mu.Unlock()
return c.Next()
}
}

38
internal/api/routes.go Normal file
View File

@@ -0,0 +1,38 @@
package api
import (
"github.com/gofiber/fiber/v2"
"github.com/sentinela-go/internal/api/handlers"
"github.com/sentinela-go/internal/db"
)
func RegisterRoutes(app *fiber.App, database *db.DB) {
h := handlers.New(database)
app.Get("/health", h.Health)
v1 := app.Group("/api/v1")
v1.Get("/companies", h.ListCompanies)
v1.Get("/companies/search", h.SearchCompanies)
v1.Get("/companies/:id", h.GetCompany)
v1.Get("/companies/:id/filings", h.CompanyFilings)
v1.Get("/filings", h.ListFilings)
v1.Get("/filings/search", h.SearchFilings)
v1.Get("/filings/recent", h.RecentFilings)
v1.Get("/filings/:id", h.GetFiling)
v1.Get("/market/selic", h.ListSelic)
v1.Get("/market/selic/current", h.CurrentSelic)
v1.Get("/market/cdi", h.ListCDI)
v1.Get("/market/cdi/current", h.CurrentCDI)
v1.Get("/market/ipca", h.ListIPCA)
v1.Get("/market/ipca/current", h.CurrentIPCA)
v1.Get("/market/fx", h.ListFX)
v1.Get("/market/fx/current", h.CurrentFX)
v1.Get("/market/overview", h.MarketOverview)
v1.Get("/search", h.GlobalSearch)
}

31
internal/api/server.go Normal file
View File

@@ -0,0 +1,31 @@
package api
import (
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/gofiber/fiber/v2/middleware/recover"
"github.com/sentinela-go/internal/api/middleware"
"github.com/sentinela-go/internal/config"
"github.com/sentinela-go/internal/db"
)
func NewServer(cfg *config.Config, database *db.DB) *fiber.App {
app := fiber.New(fiber.Config{
AppName: "Sentinela API",
})
app.Use(recover.New())
app.Use(logger.New())
app.Use(cors.New())
app.Use(middleware.NewRateLimiter(cfg.RateLimit))
if cfg.APIKey != "" {
app.Use(middleware.NewAPIKeyAuth(cfg.APIKey))
}
RegisterRoutes(app, database)
return app
}

48
internal/config/config.go Normal file
View File

@@ -0,0 +1,48 @@
package config
import (
"os"
"strconv"
)
type Config struct {
Port int
DatabasePath string
RateLimit int
APIKey string
SyncInterval string
LogLevel string
}
func Load() *Config {
c := &Config{
Port: 3333,
DatabasePath: "data/sentinela.db",
RateLimit: 100,
SyncInterval: "30m",
LogLevel: "info",
}
if v := os.Getenv("PORT"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
c.Port = n
}
}
if v := os.Getenv("DATABASE_PATH"); v != "" {
c.DatabasePath = v
}
if v := os.Getenv("RATE_LIMIT"); v != "" {
if n, err := strconv.Atoi(v); err == nil {
c.RateLimit = n
}
}
if v := os.Getenv("API_KEY"); v != "" {
c.APIKey = v
}
if v := os.Getenv("SYNC_INTERVAL"); v != "" {
c.SyncInterval = v
}
if v := os.Getenv("LOG_LEVEL"); v != "" {
c.LogLevel = v
}
return c
}

93
internal/db/companies.go Normal file
View File

@@ -0,0 +1,93 @@
package db
import (
"database/sql"
"fmt"
"time"
)
type Company struct {
ID int64 `json:"id"`
Ticker string `json:"ticker,omitempty"`
Name string `json:"name"`
CNPJ string `json:"cnpj"`
CVMCode string `json:"cvm_code,omitempty"`
Sector string `json:"sector,omitempty"`
Status string `json:"status"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
}
func (d *DB) UpsertCompany(c *Company) error {
_, err := d.Conn.Exec(`
INSERT INTO companies (ticker, name, cnpj, cvm_code, sector, status, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(cnpj) DO UPDATE SET
ticker=excluded.ticker, name=excluded.name, cvm_code=excluded.cvm_code,
sector=excluded.sector, status=excluded.status, updated_at=excluded.updated_at`,
c.Ticker, c.Name, c.CNPJ, c.CVMCode, c.Sector, c.Status, time.Now().UTC().Format(time.RFC3339))
return err
}
func (d *DB) RebuildCompaniesFTS() error {
_, err := d.Conn.Exec(`
INSERT INTO companies_fts(companies_fts) VALUES('rebuild')`)
return err
}
func (d *DB) ListCompanies(limit, offset int, status, sector string) ([]Company, int, error) {
where := "WHERE 1=1"
args := []any{}
if status != "" {
where += " AND status = ?"
args = append(args, status)
}
if sector != "" {
where += " AND sector = ?"
args = append(args, sector)
}
var total int
err := d.Conn.QueryRow("SELECT COUNT(*) FROM companies "+where, args...).Scan(&total)
if err != nil {
return nil, 0, err
}
query := fmt.Sprintf("SELECT id, COALESCE(ticker,''), name, cnpj, COALESCE(cvm_code,''), COALESCE(sector,''), status, created_at, updated_at FROM companies %s ORDER BY name LIMIT ? OFFSET ?", where)
args = append(args, limit, offset)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, 0, err
}
defer rows.Close()
var companies []Company
for rows.Next() {
var c Company
if err := rows.Scan(&c.ID, &c.Ticker, &c.Name, &c.CNPJ, &c.CVMCode, &c.Sector, &c.Status, &c.CreatedAt, &c.UpdatedAt); err != nil {
return nil, 0, err
}
companies = append(companies, c)
}
return companies, total, nil
}
func (d *DB) GetCompany(id int64) (*Company, error) {
c := &Company{}
err := d.Conn.QueryRow("SELECT id, COALESCE(ticker,''), name, cnpj, COALESCE(cvm_code,''), COALESCE(sector,''), status, created_at, updated_at FROM companies WHERE id = ?", id).
Scan(&c.ID, &c.Ticker, &c.Name, &c.CNPJ, &c.CVMCode, &c.Sector, &c.Status, &c.CreatedAt, &c.UpdatedAt)
if err == sql.ErrNoRows {
return nil, nil
}
return c, err
}
func (d *DB) GetCompanyByCNPJ(cnpj string) (*Company, error) {
c := &Company{}
err := d.Conn.QueryRow("SELECT id, COALESCE(ticker,''), name, cnpj, COALESCE(cvm_code,''), COALESCE(sector,''), status, created_at, updated_at FROM companies WHERE cnpj = ?", cnpj).
Scan(&c.ID, &c.Ticker, &c.Name, &c.CNPJ, &c.CVMCode, &c.Sector, &c.Status, &c.CreatedAt, &c.UpdatedAt)
if err == sql.ErrNoRows {
return nil, nil
}
return c, err
}

129
internal/db/filings.go Normal file
View File

@@ -0,0 +1,129 @@
package db
import (
"database/sql"
"fmt"
)
type Filing struct {
ID int64 `json:"id"`
ExternalID string `json:"external_id"`
CompanyID *int64 `json:"company_id,omitempty"`
CNPJ string `json:"cnpj"`
Category string `json:"category"`
Type string `json:"type,omitempty"`
Species string `json:"species,omitempty"`
Subject string `json:"subject,omitempty"`
ReferenceDate string `json:"reference_date,omitempty"`
DeliveryDate string `json:"delivery_date"`
Protocol string `json:"protocol,omitempty"`
Version string `json:"version,omitempty"`
DownloadURL string `json:"download_url,omitempty"`
Importance int `json:"importance"`
CreatedAt string `json:"created_at"`
}
func (d *DB) UpsertFiling(f *Filing) error {
_, err := d.Conn.Exec(`
INSERT INTO filings (external_id, company_id, cnpj, category, type, species, subject, reference_date, delivery_date, protocol, version, download_url, importance)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(external_id) DO NOTHING`,
f.ExternalID, f.CompanyID, f.CNPJ, f.Category, f.Type, f.Species, f.Subject, f.ReferenceDate, f.DeliveryDate, f.Protocol, f.Version, f.DownloadURL, f.Importance)
return err
}
func (d *DB) RebuildFilingsFTS() error {
_, err := d.Conn.Exec(`INSERT INTO filings_fts(filings_fts) VALUES('rebuild')`)
return err
}
func (d *DB) ListFilings(limit, offset int, category, from, to string) ([]Filing, int, error) {
where := "WHERE 1=1"
args := []any{}
if category != "" {
where += " AND category = ?"
args = append(args, category)
}
if from != "" {
where += " AND delivery_date >= ?"
args = append(args, from)
}
if to != "" {
where += " AND delivery_date <= ?"
args = append(args, to)
}
var total int
err := d.Conn.QueryRow("SELECT COUNT(*) FROM filings "+where, args...).Scan(&total)
if err != nil {
return nil, 0, err
}
query := fmt.Sprintf(`SELECT id, external_id, company_id, cnpj, category, COALESCE(type,''), COALESCE(species,''),
COALESCE(subject,''), COALESCE(reference_date,''), delivery_date, COALESCE(protocol,''),
COALESCE(version,''), COALESCE(download_url,''), importance, created_at
FROM filings %s ORDER BY delivery_date DESC LIMIT ? OFFSET ?`, where)
args = append(args, limit, offset)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, 0, err
}
defer rows.Close()
return scanFilings(rows)
}
func (d *DB) GetFiling(id int64) (*Filing, error) {
f := &Filing{}
err := d.Conn.QueryRow(`SELECT id, external_id, company_id, cnpj, category, COALESCE(type,''), COALESCE(species,''),
COALESCE(subject,''), COALESCE(reference_date,''), delivery_date, COALESCE(protocol,''),
COALESCE(version,''), COALESCE(download_url,''), importance, created_at
FROM filings WHERE id = ?`, id).
Scan(&f.ID, &f.ExternalID, &f.CompanyID, &f.CNPJ, &f.Category, &f.Type, &f.Species, &f.Subject,
&f.ReferenceDate, &f.DeliveryDate, &f.Protocol, &f.Version, &f.DownloadURL, &f.Importance, &f.CreatedAt)
if err == sql.ErrNoRows {
return nil, nil
}
return f, err
}
func (d *DB) ListFilingsByCompany(companyID int64, limit, offset int) ([]Filing, int, error) {
var total int
d.Conn.QueryRow("SELECT COUNT(*) FROM filings WHERE company_id = ?", companyID).Scan(&total)
rows, err := d.Conn.Query(`SELECT id, external_id, company_id, cnpj, category, COALESCE(type,''), COALESCE(species,''),
COALESCE(subject,''), COALESCE(reference_date,''), delivery_date, COALESCE(protocol,''),
COALESCE(version,''), COALESCE(download_url,''), importance, created_at
FROM filings WHERE company_id = ? ORDER BY delivery_date DESC LIMIT ? OFFSET ?`, companyID, limit, offset)
if err != nil {
return nil, 0, err
}
defer rows.Close()
filings, _, err := scanFilings(rows)
return filings, total, err
}
func (d *DB) RecentFilings(limit int) ([]Filing, error) {
rows, err := d.Conn.Query(`SELECT id, external_id, company_id, cnpj, category, COALESCE(type,''), COALESCE(species,''),
COALESCE(subject,''), COALESCE(reference_date,''), delivery_date, COALESCE(protocol,''),
COALESCE(version,''), COALESCE(download_url,''), importance, created_at
FROM filings ORDER BY delivery_date DESC LIMIT ?`, limit)
if err != nil {
return nil, err
}
defer rows.Close()
filings, _, err := scanFilings(rows)
return filings, err
}
func scanFilings(rows *sql.Rows) ([]Filing, int, error) {
var filings []Filing
for rows.Next() {
var f Filing
if err := rows.Scan(&f.ID, &f.ExternalID, &f.CompanyID, &f.CNPJ, &f.Category, &f.Type, &f.Species,
&f.Subject, &f.ReferenceDate, &f.DeliveryDate, &f.Protocol, &f.Version, &f.DownloadURL, &f.Importance, &f.CreatedAt); err != nil {
return nil, 0, err
}
filings = append(filings, f)
}
return filings, len(filings), nil
}

215
internal/db/market.go Normal file
View File

@@ -0,0 +1,215 @@
package db
import (
"database/sql"
"fmt"
)
type SelicRecord struct {
ID int64 `json:"id"`
Date string `json:"date"`
DailyRate float64 `json:"daily_rate"`
AnnualRate *float64 `json:"annual_rate,omitempty"`
TargetRate *float64 `json:"target_rate,omitempty"`
}
type CDIRecord struct {
ID int64 `json:"id"`
Date string `json:"date"`
DailyRate float64 `json:"daily_rate"`
AnnualRate *float64 `json:"annual_rate,omitempty"`
}
type IPCARecord struct {
ID int64 `json:"id"`
Date string `json:"date"`
MonthlyRate float64 `json:"monthly_rate"`
Accumulated12m *float64 `json:"accumulated_12m,omitempty"`
}
type FXRecord struct {
ID int64 `json:"id"`
Date string `json:"date"`
Pair string `json:"pair"`
Rate float64 `json:"rate"`
}
// Selic
func (d *DB) InsertSelic(date string, daily float64, annual, target *float64) error {
_, err := d.Conn.Exec(`INSERT OR IGNORE INTO selic_history (date, daily_rate, annual_rate, target_rate) VALUES (?,?,?,?)`,
date, daily, annual, target)
return err
}
func (d *DB) ListSelic(limit int, from, to string) ([]SelicRecord, error) {
where, args := marketWhere(from, to)
query := fmt.Sprintf("SELECT id, date, daily_rate, annual_rate, target_rate FROM selic_history %s ORDER BY date DESC LIMIT ?", where)
args = append(args, limit)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var out []SelicRecord
for rows.Next() {
var r SelicRecord
rows.Scan(&r.ID, &r.Date, &r.DailyRate, &r.AnnualRate, &r.TargetRate)
out = append(out, r)
}
return out, nil
}
func (d *DB) CurrentSelic() (*SelicRecord, error) {
r := &SelicRecord{}
err := d.Conn.QueryRow("SELECT id, date, daily_rate, annual_rate, target_rate FROM selic_history ORDER BY date DESC LIMIT 1").
Scan(&r.ID, &r.Date, &r.DailyRate, &r.AnnualRate, &r.TargetRate)
if err == sql.ErrNoRows {
return nil, nil
}
return r, err
}
// CDI
func (d *DB) InsertCDI(date string, daily float64, annual *float64) error {
_, err := d.Conn.Exec(`INSERT OR IGNORE INTO cdi_history (date, daily_rate, annual_rate) VALUES (?,?,?)`,
date, daily, annual)
return err
}
func (d *DB) ListCDI(limit int, from, to string) ([]CDIRecord, error) {
where, args := marketWhere(from, to)
query := fmt.Sprintf("SELECT id, date, daily_rate, annual_rate FROM cdi_history %s ORDER BY date DESC LIMIT ?", where)
args = append(args, limit)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var out []CDIRecord
for rows.Next() {
var r CDIRecord
rows.Scan(&r.ID, &r.Date, &r.DailyRate, &r.AnnualRate)
out = append(out, r)
}
return out, nil
}
func (d *DB) CurrentCDI() (*CDIRecord, error) {
r := &CDIRecord{}
err := d.Conn.QueryRow("SELECT id, date, daily_rate, annual_rate FROM cdi_history ORDER BY date DESC LIMIT 1").
Scan(&r.ID, &r.Date, &r.DailyRate, &r.AnnualRate)
if err == sql.ErrNoRows {
return nil, nil
}
return r, err
}
// IPCA
func (d *DB) InsertIPCA(date string, monthly float64, acc12m *float64) error {
_, err := d.Conn.Exec(`INSERT OR IGNORE INTO ipca_history (date, monthly_rate, accumulated_12m) VALUES (?,?,?)`,
date, monthly, acc12m)
return err
}
func (d *DB) ListIPCA(limit int, from, to string) ([]IPCARecord, error) {
where, args := marketWhere(from, to)
query := fmt.Sprintf("SELECT id, date, monthly_rate, accumulated_12m FROM ipca_history %s ORDER BY date DESC LIMIT ?", where)
args = append(args, limit)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var out []IPCARecord
for rows.Next() {
var r IPCARecord
rows.Scan(&r.ID, &r.Date, &r.MonthlyRate, &r.Accumulated12m)
out = append(out, r)
}
return out, nil
}
func (d *DB) CurrentIPCA() (*IPCARecord, error) {
r := &IPCARecord{}
err := d.Conn.QueryRow("SELECT id, date, monthly_rate, accumulated_12m FROM ipca_history ORDER BY date DESC LIMIT 1").
Scan(&r.ID, &r.Date, &r.MonthlyRate, &r.Accumulated12m)
if err == sql.ErrNoRows {
return nil, nil
}
return r, err
}
// FX
func (d *DB) InsertFX(date, pair string, rate float64) error {
_, err := d.Conn.Exec(`INSERT OR IGNORE INTO fx_rates (date, pair, rate) VALUES (?,?,?)`, date, pair, rate)
return err
}
func (d *DB) ListFX(limit int, pair, from, to string) ([]FXRecord, error) {
where := "WHERE 1=1"
args := []any{}
if pair != "" {
where += " AND pair = ?"
args = append(args, pair)
}
if from != "" {
where += " AND date >= ?"
args = append(args, from)
}
if to != "" {
where += " AND date <= ?"
args = append(args, to)
}
query := fmt.Sprintf("SELECT id, date, pair, rate FROM fx_rates %s ORDER BY date DESC LIMIT ?", where)
args = append(args, limit)
rows, err := d.Conn.Query(query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var out []FXRecord
for rows.Next() {
var r FXRecord
rows.Scan(&r.ID, &r.Date, &r.Pair, &r.Rate)
out = append(out, r)
}
return out, nil
}
func (d *DB) CurrentFX() ([]FXRecord, error) {
rows, err := d.Conn.Query(`SELECT DISTINCT pair FROM fx_rates`)
if err != nil {
return nil, err
}
defer rows.Close()
var pairs []string
for rows.Next() {
var p string
rows.Scan(&p)
pairs = append(pairs, p)
}
var out []FXRecord
for _, p := range pairs {
var r FXRecord
err := d.Conn.QueryRow("SELECT id, date, pair, rate FROM fx_rates WHERE pair = ? ORDER BY date DESC LIMIT 1", p).
Scan(&r.ID, &r.Date, &r.Pair, &r.Rate)
if err == nil {
out = append(out, r)
}
}
return out, nil
}
func marketWhere(from, to string) (string, []any) {
where := "WHERE 1=1"
args := []any{}
if from != "" {
where += " AND date >= ?"
args = append(args, from)
}
if to != "" {
where += " AND date <= ?"
args = append(args, to)
}
return where, args
}

73
internal/db/schema.go Normal file
View File

@@ -0,0 +1,73 @@
package db
const schema = `
CREATE TABLE IF NOT EXISTS companies (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ticker TEXT,
name TEXT NOT NULL,
cnpj TEXT UNIQUE NOT NULL,
cvm_code TEXT,
sector TEXT,
status TEXT NOT NULL DEFAULT 'ATIVO',
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS filings (
id INTEGER PRIMARY KEY AUTOINCREMENT,
external_id TEXT UNIQUE NOT NULL,
company_id INTEGER REFERENCES companies(id),
cnpj TEXT NOT NULL,
category TEXT NOT NULL,
type TEXT,
species TEXT,
subject TEXT,
reference_date TEXT,
delivery_date DATETIME NOT NULL,
protocol TEXT,
version TEXT,
download_url TEXT,
importance INTEGER DEFAULT 1,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS selic_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT UNIQUE NOT NULL,
daily_rate REAL NOT NULL,
annual_rate REAL,
target_rate REAL
);
CREATE TABLE IF NOT EXISTS cdi_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT UNIQUE NOT NULL,
daily_rate REAL NOT NULL,
annual_rate REAL
);
CREATE TABLE IF NOT EXISTS ipca_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT UNIQUE NOT NULL,
monthly_rate REAL NOT NULL,
accumulated_12m REAL
);
CREATE TABLE IF NOT EXISTS fx_rates (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT NOT NULL,
pair TEXT NOT NULL,
rate REAL NOT NULL,
UNIQUE(date, pair)
);
CREATE VIRTUAL TABLE IF NOT EXISTS companies_fts USING fts5(
name, ticker, sector, cnpj,
content='companies', content_rowid='id'
);
CREATE VIRTUAL TABLE IF NOT EXISTS filings_fts USING fts5(
subject, category, type,
content='filings', content_rowid='id'
);
`

40
internal/db/search.go Normal file
View File

@@ -0,0 +1,40 @@
package db
func (d *DB) SearchCompanies(query string, limit int) ([]Company, error) {
rows, err := d.Conn.Query(`
SELECT c.id, COALESCE(c.ticker,''), c.name, c.cnpj, COALESCE(c.cvm_code,''), COALESCE(c.sector,''), c.status, c.created_at, c.updated_at
FROM companies_fts f JOIN companies c ON f.rowid = c.id
WHERE companies_fts MATCH ? LIMIT ?`, query, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var out []Company
for rows.Next() {
var c Company
rows.Scan(&c.ID, &c.Ticker, &c.Name, &c.CNPJ, &c.CVMCode, &c.Sector, &c.Status, &c.CreatedAt, &c.UpdatedAt)
out = append(out, c)
}
return out, nil
}
func (d *DB) SearchFilings(query string, limit int) ([]Filing, error) {
rows, err := d.Conn.Query(`
SELECT fi.id, fi.external_id, fi.company_id, fi.cnpj, fi.category, COALESCE(fi.type,''), COALESCE(fi.species,''),
COALESCE(fi.subject,''), COALESCE(fi.reference_date,''), fi.delivery_date, COALESCE(fi.protocol,''),
COALESCE(fi.version,''), COALESCE(fi.download_url,''), fi.importance, fi.created_at
FROM filings_fts f JOIN filings fi ON f.rowid = fi.id
WHERE filings_fts MATCH ? LIMIT ?`, query, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var out []Filing
for rows.Next() {
var f Filing
rows.Scan(&f.ID, &f.ExternalID, &f.CompanyID, &f.CNPJ, &f.Category, &f.Type, &f.Species,
&f.Subject, &f.ReferenceDate, &f.DeliveryDate, &f.Protocol, &f.Version, &f.DownloadURL, &f.Importance, &f.CreatedAt)
out = append(out, f)
}
return out, nil
}

52
internal/db/sqlite.go Normal file
View File

@@ -0,0 +1,52 @@
package db
import (
"database/sql"
"fmt"
"log/slog"
"os"
"path/filepath"
_ "modernc.org/sqlite"
)
type DB struct {
Conn *sql.DB
}
func New(dbPath string) (*DB, error) {
dir := filepath.Dir(dbPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("create db dir: %w", err)
}
conn, err := sql.Open("sqlite", dbPath+"?_journal_mode=WAL&_busy_timeout=5000")
if err != nil {
return nil, fmt.Errorf("open db: %w", err)
}
conn.SetMaxOpenConns(1) // SQLite single-writer
if _, err := conn.Exec(schema); err != nil {
return nil, fmt.Errorf("run schema: %w", err)
}
slog.Info("database initialized", "path", dbPath)
return &DB{Conn: conn}, nil
}
func (d *DB) Close() error {
return d.Conn.Close()
}
func (d *DB) IsEmpty() bool {
var count int
d.Conn.QueryRow("SELECT COUNT(*) FROM companies").Scan(&count)
return count == 0
}
func (d *DB) IsMarketEmpty() bool {
var count int
d.Conn.QueryRow("SELECT COUNT(*) FROM selic_history").Scan(&count)
return count == 0
}

216
internal/fetcher/bcb.go Normal file
View File

@@ -0,0 +1,216 @@
package fetcher
import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"strconv"
"strings"
"time"
"github.com/sentinela-go/internal/db"
)
type bcbRecord struct {
Data string `json:"data"`
Valor string `json:"valor"`
}
func parseBCBDate(d string) string {
// dd/mm/yyyy -> yyyy-mm-dd
parts := strings.Split(d, "/")
if len(parts) != 3 {
return d
}
return parts[2] + "-" + parts[1] + "-" + parts[0]
}
func fetchBCBSeries(seriesID int, lastN int) ([]bcbRecord, error) {
// BCB "ultimos" endpoint caps at 20 records. Use date range instead.
now := time.Now()
// Estimate days needed: lastN records ≈ lastN business days ≈ lastN * 1.5 calendar days
daysBack := lastN * 2
if daysBack < 60 {
daysBack = 60
}
from := now.AddDate(0, 0, -daysBack).Format("02/01/2006")
to := now.Format("02/01/2006")
url := fmt.Sprintf("https://api.bcb.gov.br/dados/serie/bcdata.sgs.%d/dados?formato=json&dataInicial=%s&dataFinal=%s", seriesID, from, to)
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// BCB returns an error object (not array) on failure
if len(body) > 0 && body[0] == '{' {
return nil, fmt.Errorf("BCB API error for series %d: %s", seriesID, string(body[:min(300, len(body))]))
}
var records []bcbRecord
if err := json.Unmarshal(body, &records); err != nil {
return nil, fmt.Errorf("parse BCB series %d: %w (body: %s)", seriesID, err, string(body[:min(200, len(body))]))
}
return records, nil
}
func FetchSelic(database *db.DB) error {
slog.Info("fetching Selic data from BCB")
// Daily rate (series 432)
daily, err := fetchBCBSeries(432, 750)
if err != nil {
return fmt.Errorf("selic daily: %w", err)
}
// Target rate (series 11)
target, err := fetchBCBSeries(11, 750)
if err != nil {
slog.Warn("failed to fetch selic target", "error", err)
target = nil
}
targetMap := make(map[string]float64)
for _, r := range target {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
targetMap[date] = v
}
count := 0
for _, r := range daily {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
var tp *float64
if t, ok := targetMap[date]; ok {
tp = &t
}
if err := database.InsertSelic(date, v, nil, tp); err == nil {
count++
}
}
slog.Info("selic data loaded", "records", count)
return nil
}
func FetchCDI(database *db.DB) error {
slog.Info("fetching CDI data from BCB")
daily, err := fetchBCBSeries(12, 750)
if err != nil {
return fmt.Errorf("cdi daily: %w", err)
}
annual, err := fetchBCBSeries(4389, 750)
if err != nil {
slog.Warn("failed to fetch cdi annual", "error", err)
annual = nil
}
annualMap := make(map[string]float64)
for _, r := range annual {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
annualMap[date] = v
}
count := 0
for _, r := range daily {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
var ap *float64
if a, ok := annualMap[date]; ok {
ap = &a
}
if err := database.InsertCDI(date, v, ap); err == nil {
count++
}
}
slog.Info("cdi data loaded", "records", count)
return nil
}
func FetchIPCA(database *db.DB) error {
slog.Info("fetching IPCA data from BCB")
monthly, err := fetchBCBSeries(433, 36)
if err != nil {
return fmt.Errorf("ipca monthly: %w", err)
}
acc, err := fetchBCBSeries(13522, 36)
if err != nil {
slog.Warn("failed to fetch ipca acc 12m", "error", err)
acc = nil
}
accMap := make(map[string]float64)
for _, r := range acc {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
accMap[date] = v
}
count := 0
for _, r := range monthly {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
var ap *float64
if a, ok := accMap[date]; ok {
ap = &a
}
if err := database.InsertIPCA(date, v, ap); err == nil {
count++
}
}
slog.Info("ipca data loaded", "records", count)
return nil
}
func FetchFX(database *db.DB) error {
slog.Info("fetching FX data from BCB")
pairs := map[string]int{
"USD/BRL": 1,
"EUR/BRL": 21619,
}
for pair, series := range pairs {
records, err := fetchBCBSeries(series, 750)
if err != nil {
slog.Warn("failed to fetch fx", "pair", pair, "error", err)
continue
}
count := 0
for _, r := range records {
date := parseBCBDate(r.Data)
v, _ := strconv.ParseFloat(strings.Replace(r.Valor, ",", ".", 1), 64)
if err := database.InsertFX(date, pair, v); err == nil {
count++
}
}
slog.Info("fx data loaded", "pair", pair, "records", count)
}
return nil
}
func FetchAllBCB(database *db.DB) error {
start := time.Now()
var errs []string
if err := FetchSelic(database); err != nil {
errs = append(errs, err.Error())
}
if err := FetchCDI(database); err != nil {
errs = append(errs, err.Error())
}
if err := FetchIPCA(database); err != nil {
errs = append(errs, err.Error())
}
if err := FetchFX(database); err != nil {
errs = append(errs, err.Error())
}
slog.Info("BCB sync complete", "duration", time.Since(start))
if len(errs) > 0 {
return fmt.Errorf("bcb errors: %s", strings.Join(errs, "; "))
}
return nil
}

197
internal/fetcher/cvm.go Normal file
View File

@@ -0,0 +1,197 @@
package fetcher
import (
"archive/zip"
"bytes"
"encoding/csv"
"fmt"
"io"
"log/slog"
"net/http"
"strings"
"time"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/transform"
"github.com/sentinela-go/internal/db"
)
func FetchCVMCompanies(database *db.DB) error {
slog.Info("fetching CVM company registry")
resp, err := http.Get("https://dados.cvm.gov.br/dados/CIA_ABERTA/CAD/DADOS/cad_cia_aberta.csv")
if err != nil {
return fmt.Errorf("fetch cvm companies: %w", err)
}
defer resp.Body.Close()
reader := transform.NewReader(resp.Body, charmap.ISO8859_1.NewDecoder())
csvReader := csv.NewReader(reader)
csvReader.Comma = ';'
csvReader.LazyQuotes = true
header, err := csvReader.Read()
if err != nil {
return fmt.Errorf("read header: %w", err)
}
colIdx := make(map[string]int)
for i, h := range header {
colIdx[strings.TrimSpace(h)] = i
}
count := 0
for {
record, err := csvReader.Read()
if err == io.EOF {
break
}
if err != nil {
continue
}
getCol := func(name string) string {
if idx, ok := colIdx[name]; ok && idx < len(record) {
return strings.TrimSpace(record[idx])
}
return ""
}
c := &db.Company{
Name: getCol("DENOM_SOCIAL"),
CNPJ: getCol("CNPJ_CIA"),
CVMCode: getCol("CD_CVM"),
Status: getCol("SIT"),
Sector: getCol("SETOR_ATIV"),
}
if c.CNPJ == "" || c.Name == "" {
continue
}
if err := database.UpsertCompany(c); err != nil {
continue
}
count++
}
database.RebuildCompaniesFTS()
slog.Info("CVM companies loaded", "count", count)
return nil
}
func FetchCVMFilings(database *db.DB, year int) error {
slog.Info("fetching CVM IPE filings", "year", year)
url := fmt.Sprintf("https://dados.cvm.gov.br/dados/CIA_ABERTA/DOC/IPE/DADOS/ipe_cia_aberta_%d.zip", year)
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("fetch ipe %d: %w", year, err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
if err != nil {
return fmt.Errorf("open zip: %w", err)
}
count := 0
for _, f := range zipReader.File {
if !strings.HasSuffix(f.Name, ".csv") {
continue
}
rc, err := f.Open()
if err != nil {
continue
}
reader := transform.NewReader(rc, charmap.ISO8859_1.NewDecoder())
csvReader := csv.NewReader(reader)
csvReader.Comma = ';'
csvReader.LazyQuotes = true
header, err := csvReader.Read()
if err != nil {
rc.Close()
continue
}
colIdx := make(map[string]int)
for i, h := range header {
colIdx[strings.TrimSpace(h)] = i
}
for {
record, err := csvReader.Read()
if err != nil {
break
}
getCol := func(name string) string {
if idx, ok := colIdx[name]; ok && idx < len(record) {
return strings.TrimSpace(record[idx])
}
return ""
}
cnpj := getCol("CNPJ_CIA")
extID := getCol("NUM_SEQ")
if extID == "" {
extID = fmt.Sprintf("%s-%s-%s", cnpj, getCol("DT_ENTREGA"), getCol("NUM_PROTOCOLO"))
}
// Try to find company
var companyID *int64
if cnpj != "" {
if c, err := database.GetCompanyByCNPJ(cnpj); err == nil && c != nil {
companyID = &c.ID
}
}
filing := &db.Filing{
ExternalID: extID,
CompanyID: companyID,
CNPJ: cnpj,
Category: getCol("CATEG_DOC"),
Type: getCol("TP_DOC"),
Species: getCol("ESPECIE"),
Subject: getCol("ASSUNTO"),
ReferenceDate: getCol("DT_REFER"),
DeliveryDate: getCol("DT_ENTREGA"),
Protocol: getCol("NUM_PROTOCOLO"),
Version: getCol("VERSAO"),
DownloadURL: getCol("LINK_DOC"),
}
if filing.DeliveryDate == "" {
continue
}
if err := database.UpsertFiling(filing); err != nil {
continue
}
count++
}
rc.Close()
}
database.RebuildFilingsFTS()
slog.Info("CVM filings loaded", "year", year, "count", count)
return nil
}
func FetchAllCVM(database *db.DB) error {
start := time.Now()
if err := FetchCVMCompanies(database); err != nil {
return err
}
// Fetch current year filings
currentYear := time.Now().Year()
FetchCVMFilings(database, currentYear)
FetchCVMFilings(database, currentYear-1)
slog.Info("CVM sync complete", "duration", time.Since(start))
return nil
}

View File

@@ -0,0 +1,28 @@
package fetcher
import (
"log/slog"
"time"
"github.com/sentinela-go/internal/db"
)
func StartScheduler(database *db.DB, interval time.Duration, stop <-chan struct{}) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
slog.Info("scheduled sync starting")
if err := FetchAllBCB(database); err != nil {
slog.Error("scheduled BCB sync failed", "error", err)
}
if err := FetchAllCVM(database); err != nil {
slog.Error("scheduled CVM sync failed", "error", err)
}
case <-stop:
return
}
}
}

BIN
sentinela Executable file

Binary file not shown.