diff --git a/gcp/README.md b/gcp/README.md new file mode 100644 index 0000000..f6a86b7 --- /dev/null +++ b/gcp/README.md @@ -0,0 +1 @@ +GCP Logs initial diff --git a/gcp/gcp-log-forwarding/.terraform.lock.hcl b/gcp/gcp-log-forwarding/.terraform.lock.hcl new file mode 100644 index 0000000..9b6a832 --- /dev/null +++ b/gcp/gcp-log-forwarding/.terraform.lock.hcl @@ -0,0 +1,62 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/archive" { + version = "2.7.1" + constraints = "~> 2.4" + hashes = [ + "h1:A7EnRBVm4h9ryO9LwxYnKr4fy7ExPMwD5a1DsY7m1Y0=", + "zh:19881bb356a4a656a865f48aee70c0b8a03c35951b7799b6113883f67f196e8e", + "zh:2fcfbf6318dd514863268b09bbe19bfc958339c636bcbcc3664b45f2b8bf5cc6", + "zh:3323ab9a504ce0a115c28e64d0739369fe85151291a2ce480d51ccbb0c381ac5", + "zh:362674746fb3da3ab9bd4e70c75a3cdd9801a6cf258991102e2c46669cf68e19", + "zh:7140a46d748fdd12212161445c46bbbf30a3f4586c6ac97dd497f0c2565fe949", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:875e6ce78b10f73b1efc849bfcc7af3a28c83a52f878f503bb22776f71d79521", + "zh:b872c6ed24e38428d817ebfb214da69ea7eefc2c38e5a774db2ccd58e54d3a22", + "zh:cd6a44f731c1633ae5d37662af86e7b01ae4c96eb8b04144255824c3f350392d", + "zh:e0600f5e8da12710b0c52d6df0ba147a5486427c1a2cc78f31eea37a47ee1b07", + "zh:f21b2e2563bbb1e44e73557bcd6cdbc1ceb369d471049c40eb56cb84b6317a60", + "zh:f752829eba1cc04a479cf7ae7271526b402e206d5bcf1fcce9f535de5ff9e4e6", + ] +} + +provider "registry.terraform.io/hashicorp/google" { + version = "6.50.0" + constraints = "~> 6.0" + hashes = [ + "h1:79CwMTsp3Ud1nOl5hFS5mxQHyT0fGVye7pqpU0PPlHI=", + "zh:1f3513fcfcbf7ca53d667a168c5067a4dd91a4d4cccd19743e248ff31065503c", + "zh:3da7db8fc2c51a77dd958ea8baaa05c29cd7f829bd8941c26e2ea9cb3aadc1e5", + "zh:3e09ac3f6ca8111cbb659d38c251771829f4347ab159a12db195e211c76068bb", + "zh:7bb9e41c568df15ccf1a8946037355eefb4dfb4e35e3b190808bb7c4abae547d", + "zh:81e5d78bdec7778e6d67b5c3544777505db40a826b6eb5abe9b86d4ba396866b", + "zh:8d309d020fb321525883f5c4ea864df3d5942b6087f6656d6d8b3a1377f340fc", + "zh:93e112559655ab95a523193158f4a4ac0f2bfed7eeaa712010b85ebb551d5071", + "zh:d3efe589ffd625b300cef5917c4629513f77e3a7b111c9df65075f76a46a63c7", + "zh:d4a4d672bbef756a870d8f32b35925f8ce2ef4f6bbd5b71a3cb764f1b6c85421", + "zh:e13a86bca299ba8a118e80d5f84fbdd708fe600ecdceea1a13d4919c068379fe", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:fec30c095647b583a246c39d557704947195a1b7d41f81e369ba377d997faef6", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.7.2" + constraints = "~> 3.6" + hashes = [ + "h1:KG4NuIBl1mRWU0KD/BGfCi1YN/j3F7H4YgeeM7iSdNs=", + "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", + "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", + "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", + "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", + "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", + "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", + "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", + "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", + "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", + "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", + ] +} diff --git a/gcp/gcp-log-forwarding/README.md b/gcp/gcp-log-forwarding/README.md new file mode 100644 index 0000000..d96cfa8 --- /dev/null +++ b/gcp/gcp-log-forwarding/README.md @@ -0,0 +1,111 @@ +# GCP Log Forwarder + +This repository deploys a **Cloud Logging → Pub/Sub → Cloud Functions Gen2** pipeline on Google Cloud using Terraform. + +It captures selected **GCP Logs** (for example, Compute Engine VM lifecycle events) and forwards them to Solarwinds **OTLP-compatible endpoint** using a Go-based Cloud Function. + +--- + +## What this deploys + +- Required Google Cloud APIs +- Cloud Logging **sink** with configurable filter +- Pub/Sub **topic** for log delivery +- Cloud Functions **Gen2 (Go)** function triggered via Eventarc +- IAM bindings required for Cloud Build, Eventarc, Pub/Sub, and Cloud Run + +--- + +## Prerequisites + +- Terraform **>= 1.6.0** +- Google Cloud SDK (`gcloud`) +- A GCP project with billing enabled +- Permissions to create IAM bindings, Pub/Sub topics, Logging sinks, and Cloud Functions + +--- + +## GCP authentication (local development) + +Terraform uses **Application Default Credentials (ADC)**. + +### Step 1: Git clone the Solarwinds gcp-poller public repo, and traverse to the gcp-log-forwarding + +https://github.com/solarwinds/solarwinds-gcp-poller + + +### Step 2: Install Google Cloud SDK + +https://cloud.google.com/sdk/docs/install + +### Step 3: Authenticate + +```bash +gcloud auth application-default login +``` + +This opens a browser and stores credentials locally. + +### Step 4: Set the active project + +```bash +gcloud config set project YOUR_PROJECT_ID +``` + +Terraform will automatically use these credentials. +No service account keys are required. + +--- + +## Configuration + +Edit `terraform.tfvars`: + +```hcl +project_id = "my-gcp-project" +region = "us-central1" + +topic_name = "solarwinds-gcp-events" +sink_name = "solarwinds-gcp-events-sink" + +function_name = "ForwardLogs" +otlp_endpoint = "otel.collector.na-01.solarwinds.com:443" +api_token_value = "SOLARWINDS_OTEL_INGESTION_TOKEN" +``` + + +--- + +## Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +--- + +## Outputs + +After deployment, Terraform prints: + +- **Pub/Sub topic ID** receiving log entries +- **Logging sink writer identity** +- **Cloud Run service URI** backing the Cloud Function + +--- + +## Notes + +- The Cloud Function uses **internal-only ingress** and is invoked via Eventarc. +- Log volume depends on your logging filter – start narrow. +- Updating `terraform.tfvars` will redeploy the function. + +--- + +## Cleanup + +```bash +terraform destroy +``` diff --git a/gcp/gcp-log-forwarding/function_src/go.mod b/gcp/gcp-log-forwarding/function_src/go.mod new file mode 100644 index 0000000..f45a0ec --- /dev/null +++ b/gcp/gcp-log-forwarding/function_src/go.mod @@ -0,0 +1,29 @@ +module example.com/gcp-logs-otlp + +go 1.23.6 + +toolchain go1.23.12 + +require ( + github.com/GoogleCloudPlatform/functions-framework-go v1.9.2 + github.com/cloudevents/sdk-go/v2 v2.15.2 + go.opentelemetry.io/collector/pdata v1.38.0 + go.opentelemetry.io/collector/semconv v0.128.0 + google.golang.org/grpc v1.74.2 +) + +require ( + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.10.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/protobuf v1.36.7 // indirect +) diff --git a/gcp/gcp-log-forwarding/function_src/go.sum b/gcp/gcp-log-forwarding/function_src/go.sum new file mode 100644 index 0000000..80bdec4 --- /dev/null +++ b/gcp/gcp-log-forwarding/function_src/go.sum @@ -0,0 +1,64 @@ +github.com/GoogleCloudPlatform/functions-framework-go v1.9.2 h1:Cev/PdoxY86bJjGwHJcpiWMhrZMVEoKp9wuEp9gCUvw= +github.com/GoogleCloudPlatform/functions-framework-go v1.9.2/go.mod h1:wLEV4uSJztSBI+QyUy2fkHBuGFjRIAEDOqcEQ2hwmgE= +github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= +github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/collector/pdata v1.38.0/go.mod h1:DSvnwj37IKyQj2hpB97cGITyauR8tvAauJ6/gsxg8mg= +go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= diff --git a/gcp/gcp-log-forwarding/function_src/main.go b/gcp/gcp-log-forwarding/function_src/main.go new file mode 100644 index 0000000..f69cba6 --- /dev/null +++ b/gcp/gcp-log-forwarding/function_src/main.go @@ -0,0 +1,543 @@ +// go:build !ignore +package gcp_logs_oltp + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "log" + "math" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/GoogleCloudPlatform/functions-framework-go/functions" + cloudevents "github.com/cloudevents/sdk-go/v2" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + semconv "go.opentelemetry.io/collector/semconv/v1.5.0" + + "golang.org/x/sync/errgroup" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" +) + +// +// ─────────────────────────── Config ─────────────────────────── +// + +const ( + envOTLPEndpoint = "OTLP_ENDPOINT" + envAPIToken = "API_TOKEN" + envLogLevel = "LOG_LEVEL" + + defaultMaxBatchRecords = 2000 + defaultMaxBatchBytes = 1_500_000 + defaultExportTimeout = 7 * time.Second + defaultMaxRetries = 3 + defaultWorkers = 4 // request-scoped parallel exports +) + +type cfg struct { + Endpoint string + APIToken string + Debug bool + MaxBatchRecs int + MaxBatchBytes int + ExportTimeout time.Duration + MaxRetries int + Workers int // parallel export concurrency (request-scoped) +} + +func mustEnv(k string) string { + v := strings.TrimSpace(os.Getenv(k)) + if v == "" { + log.Fatalf("missing required env %s", k) + } + return v +} + +func getenvInt(k string, def int) int { + if s := strings.TrimSpace(os.Getenv(k)); s != "" { + if n, err := strconv.Atoi(s); err == nil && n > 0 { + return n + } + } + return def +} + +func getenvDur(k string, def time.Duration) time.Duration { + if s := strings.TrimSpace(os.Getenv(k)); s != "" { + if d, err := time.ParseDuration(s); err == nil && d > 0 { + return d + } + } + return def +} + +func loadConfig() cfg { + return cfg{ + Endpoint: mustEnv(envOTLPEndpoint), + APIToken: mustEnv(envAPIToken), + Debug: strings.EqualFold(os.Getenv(envLogLevel), "DEBUG"), + MaxBatchRecs: getenvInt("MAX_BATCH_RECORDS", defaultMaxBatchRecords), + MaxBatchBytes: getenvInt("MAX_BATCH_BYTES", defaultMaxBatchBytes), + ExportTimeout: getenvDur("EXPORT_TIMEOUT", defaultExportTimeout), + MaxRetries: getenvInt("MAX_RETRIES", defaultMaxRetries), + Workers: getenvInt("WORKERS", defaultWorkers), + } +} + +// +// ───────────────────── Pub/Sub CloudEvent types ───────────────────── +// + +type MessagePublishedData struct { + Message PubSubMessage `json:"message"` + Subscription string `json:"subscription"` +} + +type PubSubMessage struct { + Data []byte `json:"data"` // base64 → []byte auto-decoded by encoding/json + Attributes map[string]string `json:"attributes"` // optional +} + +// +// ───────────────────────── Export pipeline ───────────────────────── +// + +type anyMap = map[string]interface{} + +type resourceKey struct { + Service string + Plat string + Host string + Region string +} + +type exporter struct { + c cfg + conn *grpc.ClientConn + client plogotlp.GRPCClient +} + +var ( + globalExporter *exporter + globalCfg cfg + initOnce sync.Once +) + +func newExporter(c cfg) (*exporter, error) { + kp := keepalive.ClientParameters{ + Time: 60 * time.Second, + Timeout: 10 * time.Second, + PermitWithoutStream: true, + } + retryBackoff := backoff.Config{ + BaseDelay: 200 * time.Millisecond, + Multiplier: 1.6, + MaxDelay: 3 * time.Second, + } + conn, err := grpc.Dial( + c.Endpoint, + grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{})), + grpc.WithKeepaliveParams(kp), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: retryBackoff, + MinConnectTimeout: 5 * time.Second, + }), + grpc.WithBlock(), + ) + if err != nil { + return nil, err + } + return &exporter{ + c: c, + conn: conn, + client: plogotlp.NewGRPCClient(conn), + }, nil +} + +func (e *exporter) exportWithRetry(ctx context.Context, req plogotlp.ExportRequest) error { + var err error + for attempt := 0; attempt <= e.c.MaxRetries; attempt++ { + // Nudge unhealthy channel + if e.conn.GetState() == connectivity.TransientFailure { + e.conn.Connect() + } + rctx := metadata.AppendToOutgoingContext(ctx, "authorization", "Bearer "+e.c.APIToken) + if _, err = e.client.Export(rctx, req); err == nil { + return nil + } + if attempt == e.c.MaxRetries { + break + } + time.Sleep(backoffJitter(200*time.Millisecond, attempt)) + } + return err +} + +func backoffJitter(base time.Duration, n int) time.Duration { + ms := float64(base.Milliseconds()) * math.Pow(1.6, float64(n)) + j := ms * (0.2 + rand.Float64()*0.4) + return time.Duration(ms+j) * time.Millisecond +} + +func estimateEntryBytes(e anyMap) int { + if b, err := json.Marshal(e); err == nil { + return len(b) + } + return 256 +} + +// +// ─────────────────────────── Helpers ─────────────────────────── +// + +func extractLogEntries(raw []byte) []anyMap { + var arr []anyMap + if err := json.Unmarshal(raw, &arr); err == nil { + return arr + } + var one anyMap + if err := json.Unmarshal(raw, &one); err == nil { + if v, ok := one["entries"]; ok { + if list, ok := v.([]interface{}); ok { + out := make([]anyMap, 0, len(list)) + for _, it := range list { + if m, ok := it.(map[string]interface{}); ok { + out = append(out, m) + } + } + return out + } + } + return []anyMap{one} + } + return nil +} + +func getString(m anyMap, k string) string { + if v, ok := m[k]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func chooseBody(e anyMap) string { + if s := getString(e, "textPayload"); s != "" { + return s + } + if jp, ok := e["jsonPayload"]; ok && jp != nil { + if b, err := json.Marshal(jp); err == nil { + return string(b) + } + } + if pp, ok := e["protoPayload"]; ok && pp != nil { + if b, err := json.Marshal(pp); err == nil { + return string(b) + } + } + if b, err := json.Marshal(e); err == nil { + return string(b) + } + return "" +} + +func parseRFC3339Nanos(s string) int64 { + if s == "" { + return 0 + } + if t, err := time.Parse(time.RFC3339Nano, s); err == nil { + return t.UnixNano() + } + if t, err := time.Parse(time.RFC3339, s); err == nil { + return t.UnixNano() + } + return 0 +} + +func attrsFromEntry(entry anyMap) map[string]string { + defaults := map[string]string{ + "service.name": "compute.googleapis.com", + "cloud.provider": "gcp", + "host.id": "unknown", + "cloud.platform": "gcp_compute_engine", + } + attrs := map[string]string{"cloud.provider": "gcp"} + + zoneToRegion := func(val string) string { + if val == "" { + return "" + } + parts := strings.Split(val, "-") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "-") + } + return val + } + + if pp, ok := entry["protoPayload"].(map[string]interface{}); ok { + if s, ok := pp["serviceName"].(string); ok && s != "" { + attrs["service.name"] = s + } + } else if s := getString(entry, "serviceName"); s != "" { + attrs["service.name"] = s + } + + var mr map[string]interface{} + if x, ok := entry["resource"].(map[string]interface{}); ok { + mr = x + } else if x, ok := entry["monitoredResource"].(map[string]interface{}); ok { + mr = x + } + rtype := strings.ToLower(getString(mr, "type")) + var labels map[string]interface{} + if v, ok := mr["labels"].(map[string]interface{}); ok { + labels = v + } else { + labels = map[string]interface{}{} + } + + getLabel := func(k string) string { + if v, ok := labels[k]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" + } + + switch { + case rtype == "gce_instance": + attrs["cloud.platform"] = "gcp_compute_engine" + if id := getLabel("instance_id"); id != "" { + attrs["host.id"] = id + } + if z := getLabel("zone"); z != "" { + attrs["cloud.availability_zone"] = z + attrs["cloud.region"] = zoneToRegion(z) + } + case strings.Contains(rtype, "sql"): + attrs["cloud.platform"] = "gcp_cloud_sql" + if id := getLabel("database_id"); id != "" { + attrs["host.id"] = id + } + if r := getLabel("region"); r != "" { + attrs["cloud.region"] = r + } else if loc := getLabel("location"); loc != "" { + attrs["cloud.region"] = loc + } + case strings.Contains(rtype, "gcs"), strings.Contains(rtype, "storage"): + attrs["cloud.platform"] = "gcp_cloud_storage" + if b := getLabel("bucket_name"); b != "" { + attrs["gcp.bucket.name"] = b + attrs["host.id"] = b + } + if loc := getLabel("location"); loc != "" { + attrs["cloud.region"] = zoneToRegion(loc) + } + } + + if sev := getString(entry, "severity"); strings.TrimSpace(sev) != "" { + attrs["SeverityText"] = strings.ToUpper(strings.TrimSpace(sev)) + } + + for k, v := range defaults { + if _, ok := attrs[k]; !ok { + attrs[k] = v + } + } + return attrs +} + +// +// ────────────────────── CloudEvent entrypoint ────────────────────── +// + +func init() { + rand.Seed(time.Now().UnixNano()) + + initOnce.Do(func() { + globalCfg = loadConfig() + exp, err := newExporter(globalCfg) + if err != nil { + log.Fatalf("exporter init failed: %v", err) + } + globalExporter = exp + log.Printf("gcp-logs-otlp (CF Gen2, Pub/Sub trigger) ready: endpoint=%s workers=%d batch=%d/%d timeout=%s", + globalCfg.Endpoint, globalCfg.Workers, globalCfg.MaxBatchRecs, globalCfg.MaxBatchBytes, globalCfg.ExportTimeout) + }) + + functions.CloudEvent("ForwardLogs", ForwardLogs) +} + +// ForwardLogs: parse → group → chunk → export in parallel (bounded). +// Any export error → return error (NACK) so Pub/Sub retries. +func ForwardLogs(ctx context.Context, e cloudevents.Event) error { + if globalExporter == nil { + return fmt.Errorf("exporter not initialized") + } + start := time.Now() + + var d MessagePublishedData + if err := e.DataAs(&d); err != nil { + return fmt.Errorf("cloudevent decode failed: %w", err) // NACK + } + + raw := d.Message.Data + if len(raw) == 0 { + log.Printf("forwardlogs: empty payload (event=%s, sub=%s, attrs=%v)", e.ID(), d.Subscription, d.Message.Attributes) + return nil // ACK, nothing to do + } + + entries := extractLogEntries(raw) + if len(entries) == 0 { + log.Printf("forwardlogs: parsed 0 entries (event=%s, bytes=%d, attrs=%v)", e.ID(), len(raw), d.Message.Attributes) + return nil // ACK + } + + // Group by resource identity + buckets := map[resourceKey][]anyMap{} + for _, ent := range entries { + a := attrsFromEntry(ent) + k := resourceKey{ + Service: a["service.name"], + Plat: a["cloud.platform"], + Host: a["host.id"], + Region: a["cloud.region"], + } + buckets[k] = append(buckets[k], ent) + } + + // Export concurrently (bounded by WORKERS) + exported, err := exportBucketsConcurrently(ctx, buckets) + if err != nil { + return err // NACK → Pub/Sub retry + } + + log.Printf("forwardlogs: OK exported=%d buckets=%d entries_in=%d bytes_in=%d dur=%s event=%s", + exported, len(buckets), len(entries), len(raw), time.Since(start), e.ID()) + return nil +} + +// +// ────────────────────── Request-scoped concurrency ────────────────────── +// + +// exportBucketsConcurrently splits each bucket into size-bounded chunks +// and exports chunks in parallel, bounded by cfg.Workers. +// Returns total records exported or error (to trigger Pub/Sub retry). +func exportBucketsConcurrently(ctx context.Context, buckets map[resourceKey][]anyMap) (int, error) { + maxConc := globalCfg.Workers + if maxConc < 1 { + maxConc = 1 + } + sem := make(chan struct{}, maxConc) + g, gctx := errgroup.WithContext(ctx) + + var exported int64 + + // Iterate buckets + for rk, list := range buckets { + // Chunk by count and approx bytes + for i := 0; i < len(list); { + count, approxBytes := 0, 0 + j := i + for j < len(list) && count < globalCfg.MaxBatchRecs { + est := estimateEntryBytes(list[j]) + if count > 0 && (approxBytes+est) > globalCfg.MaxBatchBytes { + break + } + approxBytes += est + count++ + j++ + } + chunk := list[i:j] + i = j + + // Acquire a parallel slot or abort on context cancel + select { + case sem <- struct{}{}: + case <-gctx.Done(): + return int(atomic.LoadInt64(&exported)), gctx.Err() + } + + // Capture variables for goroutine + rk = rk + chunk = chunk + approxBytes = approxBytes + + g.Go(func() error { + defer func() { <-sem }() + + // Build OTLP logs for this chunk + logs := buildOTLPLogs(rk, chunk) + + // Export with timeout + retry + req := plogotlp.NewExportRequestFromLogs(logs) + ctxExp, cancel := context.WithTimeout(gctx, globalCfg.ExportTimeout) + err := globalExporter.exportWithRetry(ctxExp, req) + cancel() + if err != nil { + log.Printf("forwardlogs: OTLP export failed: %v (svc=%s region=%s approx=%dB n=%d)", + err, rk.Service, rk.Region, approxBytes, len(chunk)) + return fmt.Errorf("otlp export failed: %w", err) + } + + atomic.AddInt64(&exported, int64(len(chunk))) + return nil + }) + } + } + + if err := g.Wait(); err != nil { + return int(atomic.LoadInt64(&exported)), err + } + return int(atomic.LoadInt64(&exported)), nil +} + +// buildOTLPLogs constructs a plog.Logs payload for a single resource chunk. +func buildOTLPLogs(k resourceKey, chunk []anyMap) plog.Logs { + logs := plog.NewLogs() + rl := logs.ResourceLogs().AppendEmpty() + rl.SetSchemaUrl(semconv.SchemaURL) + rattrs := rl.Resource().Attributes() + rattrs.PutStr("cloud.provider", "gcp") + rattrs.PutStr("service.name", k.Service) + rattrs.PutStr("cloud.platform", k.Plat) + rattrs.PutStr("host.id", k.Host) + if k.Region != "" { + rattrs.PutStr("cloud.region", k.Region) + } + + sl := rl.ScopeLogs().AppendEmpty() + now := time.Now().UnixNano() + for _, ent := range chunk { + lr := sl.LogRecords().AppendEmpty() + if ts := parseRFC3339Nanos(getString(ent, "timestamp")); ts > 0 { + lr.SetTimestamp(pcommon.Timestamp(ts)) + } else { + lr.SetTimestamp(pcommon.Timestamp(now)) + } + lr.Body().SetStr(chooseBody(ent)) + if sev := getString(ent, "severity"); sev != "" { + lr.SetSeverityText(strings.ToUpper(strings.TrimSpace(sev))) + } + } + return logs +} diff --git a/gcp/gcp-log-forwarding/main.tf b/gcp/gcp-log-forwarding/main.tf new file mode 100644 index 0000000..bfa785d --- /dev/null +++ b/gcp/gcp-log-forwarding/main.tf @@ -0,0 +1,301 @@ +terraform { + required_version = ">= 1.6.0" + required_providers { + google = { source = "hashicorp/google", version = "~> 6.0" } + archive = { source = "hashicorp/archive", version = "~> 2.4" } + random = { source = "hashicorp/random", version = "~> 3.6" } + } + # backend "gcs" {} # optional +} + +provider "google" { + project = var.project_id + region = var.region +} + +# ────────────────────────────────────────────────────────────────────────────── +# Project metadata → identities & constants +# ────────────────────────────────────────────────────────────────────────────── +data "google_project" "this" {} + +locals { + project_number = data.google_project.this.number + cloud_build_sa = "${local.project_number}@cloudbuild.gserviceaccount.com" + compute_default_sa = "${local.project_number}-compute@developer.gserviceaccount.com" + + # Google-managed bucket used by CFv2 builds to fetch sources + gcf_sources_bucket = "gcf-v2-sources-${local.project_number}-${var.region}" + + # Service agents that will invoke the function (via Eventarc → Run) + eventarc_sa = "service-${local.project_number}@gcp-sa-eventarc.iam.gserviceaccount.com" + pubsub_sa = "service-${local.project_number}@gcp-sa-pubsub.iam.gserviceaccount.com" + + apis = [ + "cloudfunctions.googleapis.com", + "eventarc.googleapis.com", + "run.googleapis.com", + "pubsub.googleapis.com", + "logging.googleapis.com", + "cloudbuild.googleapis.com", + "storage.googleapis.com", + "artifactregistry.googleapis.com", + ] + + default_log_filter = <<-EOT + protoPayload.serviceName="compute.googleapis.com" AND + logName="projects/${var.project_id}/logs/cloudaudit.googleapis.com%2Factivity" AND + protoPayload.methodName:( + "compute.instances.insert" OR + "compute.instances.delete" OR + "compute.instances.start" OR + "compute.instances.stop" + ) + EOT +} + +# ────────────────────────────────────────────────────────────────────────────── +# Enable required APIs +# ────────────────────────────────────────────────────────────────────────────── +resource "google_project_service" "services" { + for_each = toset(local.apis) + project = var.project_id + service = each.value + disable_on_destroy = false +} + +# ────────────────────────────────────────────────────────────────────────────── +# Pub/Sub topic (must exist before function trigger) +# ────────────────────────────────────────────────────────────────────────────── +resource "google_pubsub_topic" "logs" { + name = var.topic_name + project = var.project_id + depends_on = [google_project_service.services] +} + +# ────────────────────────────────────────────────────────────────────────────── +# Staging bucket for function code +# ────────────────────────────────────────────────────────────────────────────── +resource "random_id" "suffix" { + byte_length = 2 +} + +resource "google_storage_bucket" "src" { + name = "${var.project_id}-cf-src-${random_id.suffix.hex}" + project = var.project_id + location = var.region + uniform_bucket_level_access = true + force_destroy = true + depends_on = [google_project_service.services] +} + +# Allow BOTH potential builders to read your custom staging bucket +resource "google_storage_bucket_iam_member" "cb_can_read_src" { + bucket = google_storage_bucket.src.name + role = "roles/storage.objectViewer" + member = "serviceAccount:${local.cloud_build_sa}" +} + +resource "google_storage_bucket_iam_member" "compute_can_read_src" { + bucket = google_storage_bucket.src.name + role = "roles/storage.objectViewer" + member = "serviceAccount:${local.compute_default_sa}" +} + +# Google-managed CFv2 sources bucket grants (fixes gcs-fetcher access) +resource "google_storage_bucket_iam_member" "gcf_sources_cb_object_viewer" { + bucket = local.gcf_sources_bucket + role = "roles/storage.objectViewer" + member = "serviceAccount:${local.cloud_build_sa}" +} + +resource "google_storage_bucket_iam_member" "gcf_sources_compute_object_viewer" { + bucket = local.gcf_sources_bucket + role = "roles/storage.objectViewer" + member = "serviceAccount:${local.compute_default_sa}" +} + +# ────────────────────────────────────────────────────────────────────────────── +# Project-level IAM for Artifact Registry + Logging (for builds) +# ────────────────────────────────────────────────────────────────────────────── +resource "google_project_iam_member" "cb_ar_writer" { + project = var.project_id + role = "roles/artifactregistry.writer" + member = "serviceAccount:${local.cloud_build_sa}" + depends_on = [google_project_service.services] +} + +resource "google_project_iam_member" "compute_ar_writer" { + project = var.project_id + role = "roles/artifactregistry.writer" + member = "serviceAccount:${local.compute_default_sa}" + depends_on = [google_project_service.services] +} + +resource "google_project_iam_member" "compute_logs_writer" { + project = var.project_id + role = "roles/logging.logWriter" + member = "serviceAccount:${local.compute_default_sa}" +} + +resource "google_project_iam_member" "cb_logs_writer" { + project = var.project_id + role = "roles/logging.logWriter" + member = "serviceAccount:${local.cloud_build_sa}" +} + +# ────────────────────────────────────────────────────────────────────────────── +# Package function code and upload to your staging bucket +# ────────────────────────────────────────────────────────────────────────────── +data "archive_file" "fn_zip" { + type = "zip" + source_dir = "${path.module}/function_src" + output_path = "${path.module}/build/function.zip" +} + +resource "google_storage_bucket_object" "fn_code" { + name = "function-${filesha256(data.archive_file.fn_zip.output_path)}.zip" + bucket = google_storage_bucket.src.name + source = data.archive_file.fn_zip.output_path +} + +# ────────────────────────────────────────────────────────────────────────────── +# Cloud Function Gen2 (Go 1.23) with Pub/Sub (Eventarc) trigger +# ────────────────────────────────────────────────────────────────────────────── +resource "google_cloudfunctions2_function" "forward_logs" { + name = var.function_name + location = var.region + + build_config { + runtime = "go123" + entry_point = "ForwardLogs" + source { + storage_source { + bucket = google_storage_bucket.src.name + object = google_storage_bucket_object.fn_code.name + } + } + } + + service_config { + available_memory = "${var.fn_memory_mb}Mi" + timeout_seconds = var.fn_timeout_seconds + max_instance_count = var.fn_max_instances + ingress_settings = "ALLOW_INTERNAL_ONLY" + + environment_variables = { + OTLP_ENDPOINT = var.otlp_endpoint + API_TOKEN = var.api_token_value + LOG_LEVEL = var.log_level + MAX_BATCH_RECORDS = tostring(var.max_batch_records) + MAX_BATCH_BYTES = tostring(var.max_batch_bytes) + EXPORT_TIMEOUT = var.export_timeout + MAX_RETRIES = tostring(var.max_retries) + WORKERS = tostring(var.workers) + } + } + + event_trigger { + trigger_region = var.region + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.logs.id + retry_policy = "RETRY_POLICY_RETRY" + } + + # Ensure build-time IAM exists before building + depends_on = [ + google_pubsub_topic.logs, + google_storage_bucket_iam_member.cb_can_read_src, + google_storage_bucket_iam_member.compute_can_read_src, + google_storage_bucket_iam_member.gcf_sources_cb_object_viewer, + google_storage_bucket_iam_member.gcf_sources_compute_object_viewer, + google_project_iam_member.cb_ar_writer, + google_project_iam_member.compute_ar_writer, + google_project_iam_member.compute_logs_writer, + google_project_iam_member.cb_logs_writer, + ] +} + +# Grant project-wide Cloud Run invoker to the Compute Default SA +resource "google_project_iam_member" "compute_default_run_invoker_project" { + project = var.project_id + role = "roles/run.invoker" + member = "serviceAccount:${local.compute_default_sa}" +} + +# (Optional) Also allow it to invoke CF Gen2 functions (project-wide) +resource "google_project_iam_member" "compute_default_cf_invoker_project" { + project = var.project_id + role = "roles/cloudfunctions.invoker" + member = "serviceAccount:${local.compute_default_sa}" +} + + +# ────────────────────────────────────────────────────────────────────────────── +# Function-level invoker IAM (CFv2) +# ────────────────────────────────────────────────────────────────────────────── +resource "google_cloudfunctions2_function_iam_member" "eventarc_can_invoke" { + project = var.project_id + location = var.region + cloud_function = google_cloudfunctions2_function.forward_logs.name + role = "roles/cloudfunctions.invoker" + member = "serviceAccount:${local.eventarc_sa}" + depends_on = [google_cloudfunctions2_function.forward_logs] +} + +resource "google_cloudfunctions2_function_iam_member" "pubsub_can_invoke" { + project = var.project_id + location = var.region + cloud_function = google_cloudfunctions2_function.forward_logs.name + role = "roles/cloudfunctions.invoker" + member = "serviceAccount:${local.pubsub_sa}" + depends_on = [google_cloudfunctions2_function.forward_logs] +} + +# ────────────────────────────────────────────────────────────────────────────── +# Cloud Run backing service IAM — add run.invoker using the *actual* service id +# ────────────────────────────────────────────────────────────────────────────── +resource "google_cloud_run_v2_service_iam_member" "eventarc_run_invoker" { + project = var.project_id + location = var.region + # Use the emitted Run service name, not the function name + name = google_cloudfunctions2_function.forward_logs.service_config[0].service + role = "roles/run.invoker" + member = "serviceAccount:${local.eventarc_sa}" + depends_on = [google_cloudfunctions2_function.forward_logs] +} + +resource "google_cloud_run_v2_service_iam_member" "pubsub_run_invoker" { + project = var.project_id + location = var.region + name = google_cloudfunctions2_function.forward_logs.service_config[0].service + role = "roles/run.invoker" + member = "serviceAccount:${local.pubsub_sa}" + depends_on = [google_cloudfunctions2_function.forward_logs] +} + +# ────────────────────────────────────────────────────────────────────────────── +# Log Router sink (AFTER both CF + Run invoker IAM are in place) +# ────────────────────────────────────────────────────────────────────────────── +resource "google_logging_project_sink" "router" { + name = var.sink_name + project = var.project_id + destination = "pubsub.googleapis.com/${google_pubsub_topic.logs.id}" + filter = coalesce(var.log_filter, local.default_log_filter) + unique_writer_identity = true + + depends_on = [ + google_cloudfunctions2_function_iam_member.eventarc_can_invoke, + google_cloudfunctions2_function_iam_member.pubsub_can_invoke, + google_cloud_run_v2_service_iam_member.eventarc_run_invoker, + google_cloud_run_v2_service_iam_member.pubsub_run_invoker, + ] +} + +# Allow the sink’s writer identity to publish to the topic (after sink exists) +resource "google_pubsub_topic_iam_member" "sink_publisher" { + project = var.project_id + topic = google_pubsub_topic.logs.name + role = "roles/pubsub.publisher" + member = google_logging_project_sink.router.writer_identity + depends_on = [google_logging_project_sink.router] +} diff --git a/gcp/gcp-log-forwarding/outputs.tf b/gcp/gcp-log-forwarding/outputs.tf new file mode 100644 index 0000000..26eb5ef --- /dev/null +++ b/gcp/gcp-log-forwarding/outputs.tf @@ -0,0 +1,11 @@ +output "topic_id" { + value = google_pubsub_topic.logs.id +} + +output "sink_writer_identity" { + value = google_logging_project_sink.router.writer_identity +} + +output "function_uri" { + value = google_cloudfunctions2_function.forward_logs.service_config[0].uri +} diff --git a/gcp/gcp-log-forwarding/terraform.tfvars b/gcp/gcp-log-forwarding/terraform.tfvars new file mode 100644 index 0000000..2f15ff4 --- /dev/null +++ b/gcp/gcp-log-forwarding/terraform.tfvars @@ -0,0 +1,36 @@ +# terraform.tfvars + +project_id = "" +region = "us-central1" + +# Pub/Sub + Sink +topic_name = "solarwinds-gcp-events" +sink_name = "solarwinds-gcp-events-sink" + +# Log filter +log_filter = <<-EOT + protoPayload.serviceName="compute.googleapis.com" AND + logName="projects/your-gcp-project-id/logs/cloudaudit.googleapis.com%2Factivity" AND + protoPayload.methodName=( + "v1.compute.instances.insert" OR + "v1.compute.instances.delete" OR + "v1.compute.instances.start" OR + "v1.compute.instances.stop" + ) +EOT + +# Function + OTLP +function_name = "ForwardLogs" +otlp_endpoint = "" +api_token_value = "" + +# Optional tuning +log_level = "INFO" +max_batch_records = 2000 +max_batch_bytes = 1500000 +export_timeout = "7s" +max_retries = 3 +workers = 8 +fn_memory_mb = 512 +fn_timeout_seconds = 60 +fn_max_instances = 10 diff --git a/gcp/gcp-log-forwarding/variables.tf b/gcp/gcp-log-forwarding/variables.tf new file mode 100644 index 0000000..6a2be2e --- /dev/null +++ b/gcp/gcp-log-forwarding/variables.tf @@ -0,0 +1,86 @@ +variable "project_id" { + type = string +} + +variable "region" { + type = string + default = "us-central1" +} + +variable "topic_name" { + type = string + default = "vm-events" +} + +variable "sink_name" { + type = string + default = "vm-events-sink" +} + +# If null, main.tf will construct a default filter using project_id (in locals) +variable "log_filter" { + type = string + default = null + description = "Optional custom logging filter; leave null to use main.tf default." +} + +variable "function_name" { + type = string + default = "ForwardLogs" +} + +variable "fn_memory_mb" { + type = number + default = 512 +} + +variable "fn_timeout_seconds" { + type = number + default = 60 +} + +variable "fn_max_instances" { + type = number + default = 10 +} + +variable "otlp_endpoint" { + type = string + default = "apm.collector.na-01.cloud.solarwinds.com:443" +} + +variable "api_token_value" { + type = string + sensitive = true + description = "OTLP API token injected as an env var." +} + +variable "log_level" { + type = string + default = "INFO" +} + +variable "max_batch_records" { + type = number + default = 2000 +} + +variable "max_batch_bytes" { + type = number + default = 1500000 +} + +variable "export_timeout" { + type = string + default = "7s" +} + +variable "max_retries" { + type = number + default = 3 +} + +variable "workers" { + type = number + default = 8 +}