Skip to content

Commit ae417fa

Browse files
committed
feat: bump cheetah and do not use subpath while mounting cheetah configmaps
Signed-off-by: Lenin Mehedy <lenin.mehedy@hashgraph.com>
1 parent 010621d commit ae417fa

File tree

7 files changed

+243
-36
lines changed

7 files changed

+243
-36
lines changed

charts/solo-deployment/config-files/cheetah/pipelines/block-streams.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
enabled: true
33
stopOnError: "{{ .cheetah.stopOnError }}" # stop the pipeline if any error occurs
44
scanner:
5-
directory: /opt/hgcapp/blockStreams/block-{{ .node.accountId }}
5+
directory: /opt/hgcapp/blockStreams
66
pattern: ".mf"
77
interval: 100ms
88
batchSize: 1000 # max number of files per scanned items batch

charts/solo-deployment/config-files/cheetah/pipelines/events-streams.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
enabled: true
33
stopOnError: "{{ .cheetah.stopOnError }}" # stop the pipeline if any error occurs
44
scanner:
5-
directory: /opt/hgcapp/eventsStreams/events_{{ .node.accountId }}
5+
directory: /opt/hgcapp/eventsStreams
66
pattern: ".evts_sig"
77
interval: 100ms
88
batchSize: 1000 # max number of files per scanned items batch

charts/solo-deployment/config-files/cheetah/pipelines/record-streams.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
enabled: true
33
stopOnError: "{{ .cheetah.stopOnError }}" # stop the pipeline if any error occurs
44
scanner:
5-
directory: /opt/hgcapp/recordStreams/record{{ .node.accountId }}
5+
directory: /opt/hgcapp/recordStreams
66
pattern: ".rcd_sig"
77
interval: 100ms
88
batchSize: 1000 # max number of files per scanned items batch

charts/solo-deployment/templates/network-node-statefulset.yaml

Lines changed: 24 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919
{{- $cheetah := $.Values.cheetah }}
2020
{{- $bucketPrefixString := toString $cloud.buckets.streamBucketPrefix }}
2121
{{- $bucketPrefix := empty $bucketPrefixString | ternary "" (printf "%s/" $bucketPrefixString) }}
22-
{{- $blockStreamBucketPrefix := printf "%sblockStreams/block-%s" $bucketPrefix $node.accountId }}
23-
{{- $recordStreamBucketPrefix := printf "%srecordstreams/record%s" $bucketPrefix $node.accountId }}
24-
{{ $eventStreamsBucketPrefix := printf "%seventsStreams/events_%s" $bucketPrefix $node.accountId -}}
22+
{{- $blockStreamBucketPrefix := printf "%sblockStreams" $bucketPrefix }}
23+
{{- $recordStreamBucketPrefix := printf "%srecordstreams" $bucketPrefix }}
24+
{{- $eventStreamsBucketPrefix := printf "%seventsStreams" $bucketPrefix }}
2525
---
2626
apiVersion: apps/v1
2727
kind: StatefulSet
@@ -233,22 +233,17 @@ spec:
233233
- upload
234234
- --config
235235
- /app/config/cheetah.yaml
236+
- --config-check-interval
237+
- {{ default "30s" $cheetah.configCheckInterval }}
236238
volumeMounts:
237239
- name: hgcapp-blockstream
238-
mountPath: /opt/hgcapp/blockStreams/block-{{ $node.accountId }}
239-
subPath: block-{{ $node.accountId }}
240+
mountPath: /opt/hgcapp/blockStreams
240241
- name: hgcapp-record-streams
241-
mountPath: /opt/hgcapp/recordStreams/record{{ $node.accountId }}
242-
subPath: record{{ $node.accountId }}
243-
- name: hgcapp-record-streams-sidecar
244-
mountPath: /opt/hgcapp/recordStreams/record{{ $node.accountId }}/sidecar
242+
mountPath: /opt/hgcapp/recordStreams
245243
- name: hgcapp-event-streams
246-
mountPath: /opt/hgcapp/eventsStreams/events_{{ $node.accountId }}
247-
subPath: events_{{ $node.accountId }}
244+
mountPath: /opt/hgcapp/eventsStreams
248245
- name: solo-cheetah-config
249-
subPath: cheetah.yaml
250-
mountPath: /app/config/cheetah.yaml
251-
readOnly: true
246+
mountPath: /app/config
252247
ports:
253248
- name: pprof
254249
containerPort: 6061
@@ -335,15 +330,14 @@ spec:
335330
- /app/bin/cheetah
336331
- upload
337332
- --config
338-
- /app/config/cheetah.yaml
333+
- /app/config/cheetah-block-streams.yaml
334+
- --config-check-interval
335+
- {{ default "30s" $cheetah.configCheckInterval }}
339336
volumeMounts:
340337
- name: hgcapp-blockstream
341-
mountPath: /opt/hgcapp/blockStreams/block-{{ $node.accountId }}
342-
subPath: block-{{ $node.accountId }}
338+
mountPath: /opt/hgcapp/blockStreams
343339
- name: solo-cheetah-config
344-
subPath: cheetah-block-streams.yaml
345-
mountPath: /app/config/cheetah.yaml
346-
readOnly: true
340+
mountPath: /app/config
347341
ports:
348342
- name: pprof
349343
containerPort: 6061
@@ -419,15 +413,14 @@ spec:
419413
- /app/bin/cheetah
420414
- upload
421415
- --config
422-
- /app/config/cheetah.yaml
416+
- /app/config/cheetah-record-streams.yaml
417+
- --config-check-interval
418+
- {{ default "30s" $cheetah.configCheckInterval }}
423419
volumeMounts:
424420
- name: hgcapp-record-streams
425-
mountPath: /opt/hgcapp/recordStreams/record{{ $node.accountId }}
426-
subPath: record{{ $node.accountId }}
421+
mountPath: /opt/hgcapp/recordStreams
427422
- name: solo-cheetah-config
428-
subPath: cheetah-record-streams.yaml
429-
mountPath: /app/config/cheetah.yaml
430-
readOnly: true
423+
mountPath: /app/config
431424
ports:
432425
- name: pprof
433426
containerPort: 6061
@@ -504,15 +497,14 @@ spec:
504497
- /app/bin/cheetah
505498
- upload
506499
- --config
507-
- /app/config/cheetah.yaml
500+
- /app/config/cheetah-events-streams.yaml
501+
- --config-check-interval
502+
- {{ default "30s" $cheetah.configCheckInterval }}
508503
volumeMounts:
509504
- name: hgcapp-event-streams
510-
mountPath: /opt/hgcapp/eventsStreams/events_{{ $node.accountId }}
511-
subPath: events_{{ $node.accountId }}
505+
mountPath: /opt/hgcapp/eventsStreams
512506
- name: solo-cheetah-config
513-
subPath: cheetah-events-streams.yaml
514-
mountPath: /app/config/cheetah.yaml
515-
readOnly: true
507+
mountPath: /app/config
516508
ports:
517509
- name: pprof
518510
containerPort: 6061

charts/solo-deployment/values.yaml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,13 @@ cheetah:
2222
image:
2323
registry: "ghcr.io"
2424
repository: "hashgraph/solo-cheetah/cheetah"
25-
tag: "0.4.0" # https://github.com/hashgraph/solo-cheetah/pkgs/container/solo-cheetah%2Fcheetah
25+
tag: "0.4.1" # https://github.com/hashgraph/solo-cheetah/pkgs/container/solo-cheetah%2Fcheetah
2626
pullPolicy: "IfNotPresent"
2727
maxProcessors: 10 # max number of concurrent processors for each pipeline
2828
# whether it should stop the pipelines if any error occurs; if false, it will continue with the expectation that the
2929
# error may get fixed in the next run
3030
stopOnError: false
31+
configCheckInterval: 30s # interval to check for config changes and update pipelines accordingly
3132
deployment:
3233
# if true, a single instance of cheetah will be deployed for all stream files (i.e. record-streams, events-streams, block-streams etc.)
3334
# WARNING: Other steams files uploader sidecar configuration will be ignored

dev/README.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,22 @@ This document outlines the steps to set up a development environment for the pro
99
- K9s (https://k9scli.io/)
1010
- `yq` from this link: [yq](https://github.com/mikefarah/yq/#install)
1111

12+
## Steps to run using Solo
13+
- Install Node
14+
```
15+
nvm use 24
16+
```
17+
18+
- Deploy the network and other components using Solo:
19+
```bash
20+
task deploy-network
21+
```
22+
23+
- Destroy the network and other components using Solo:
24+
```bash
25+
task destroy-network
26+
```
27+
1228
## Steps to run the tests
1329

1430
- Open a separate terminal and run the following command to deploy the network and other components:

dev/Taskfile.yml

Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,198 @@
1+
version: '3'
2+
3+
env:
4+
CONSENSUS_NODE_VERSION: v0.66.0
5+
SOLO_VERSION: v0.54.0
6+
SOLO_CLUSTER_NAME: solo
7+
SOLO_NAMESPACE: solo
8+
SOLO_CLUSTER_SETUP_NAMESPACE: solo-cluster
9+
SOLO_DEPLOYMENT: solo-deployment
10+
NODEJS_VERSION: 20.18.0
11+
NODES: 1
12+
RELAY: false
13+
MIRROR_NODE: false
14+
HEDERA_EXPLORER: false
15+
IMAGE: "solo-test:local"
16+
17+
vars:
18+
UUID:
19+
sh: uuidgen | tr -d '-' | head -c 8 | tr '[:upper:]' '[:lower:]'
20+
21+
tasks:
22+
install-solo:
23+
desc: Install Solo CLI tool
24+
silent: true
25+
cmds:
26+
- |
27+
# Check if solo is installed and matches requested version
28+
if command -v solo >/dev/null 2>&1; then
29+
ver=$(solo --version 2>/dev/null || solo version 2>/dev/null || true)
30+
if [ -n "$ver" ] && echo "$ver" | grep -q "{{.SOLO_VERSION}}"; then
31+
echo "✅ Solo CLI {{.SOLO_VERSION}} already installed."
32+
exit 0
33+
else
34+
echo "ℹ️ Found solo ($ver), but version differs. Installing {{.SOLO_VERSION}}..."
35+
fi
36+
else
37+
echo "⬇️ Solo CLI not found. Installing {{.SOLO_VERSION}}..."
38+
fi
39+
npm install -g @hashgraph/solo@{{.SOLO_VERSION}}
40+
- echo "✅ Solo CLI {{.SOLO_VERSION}} installed."
41+
42+
deploy-network:
43+
desc: Deploy a n-node Solo network
44+
silent: true
45+
deps:
46+
- install-solo
47+
cmds:
48+
- echo "🚀 Deploying Solo network with NODES={{.NODES}} MIRROR_NODE={{.MIRROR_NODE}} HEDERA_EXPLORER={{.HEDERA_EXPLORER}} RELAY={{.RELAY}}..."
49+
- task destroy-network
50+
- |
51+
if ! kind get clusters | grep -q "{{.SOLO_CLUSTER_NAME}}"; then
52+
echo "⬇️ Creating Kind cluster {{.SOLO_CLUSTER_NAME}}..."
53+
kind create cluster --name "{{.SOLO_CLUSTER_NAME}}"
54+
else
55+
echo "✅ Kind cluster {{.SOLO_CLUSTER_NAME}} already exists"
56+
fi
57+
- task enable-metrics-server
58+
- task set-proxy
59+
- echo "🧹 Removing old ~/.solo data..."
60+
- rm -rf ~/.solo || true
61+
- echo "⬇️ Initializing Solo..."
62+
- solo init
63+
- solo cluster-ref config connect --cluster-ref kind-{{.SOLO_CLUSTER_NAME}} --context kind-{{.SOLO_CLUSTER_NAME}}
64+
- solo deployment config create -n "{{.SOLO_NAMESPACE}}" --deployment "{{.SOLO_DEPLOYMENT}}"
65+
- solo deployment cluster attach --deployment "{{.SOLO_DEPLOYMENT}}" --cluster-ref kind-{{.SOLO_CLUSTER_NAME}} --num-consensus-nodes {{.NODES}}
66+
- solo keys consensus generate --gossip-keys --tls-keys --deployment "{{.SOLO_DEPLOYMENT}}"
67+
- solo cluster-ref config setup --prometheus-stack -s "{{.SOLO_CLUSTER_SETUP_NAMESPACE}}"
68+
- solo consensus network deploy --deployment "{{.SOLO_DEPLOYMENT}}" --chart-dir ../charts
69+
- solo consensus node setup --deployment "{{.SOLO_DEPLOYMENT}}" --release-tag "{{.CONSENSUS_NODE_VERSION}}"
70+
- solo consensus node start --deployment "{{.SOLO_DEPLOYMENT}}"
71+
- |
72+
{{if .MIRROR_NODE}}
73+
echo "📡 Deploying Mirror Node..."
74+
solo mirror-node deploy --deployment "{{.SOLO_DEPLOYMENT}}" --cluster-ref kind-{{.SOLO_CLUSTER_NAME}}
75+
{{end}}
76+
- |
77+
{{if .HEDERA_EXPLORER}}
78+
echo "🌐 Deploying Explorer..."
79+
solo explorer deploy --deployment "{{.SOLO_DEPLOYMENT}}" --cluster-ref kind-{{.SOLO_CLUSTER_NAME}}
80+
{{end}}
81+
- |
82+
{{if .RELAY}}
83+
echo "🔁 Deploying Relay..."
84+
solo relay deploy -i node1 --deployment "{{.SOLO_DEPLOYMENT}}"
85+
{{end}}
86+
- echo "🎉 Solo network deployed with {{.NODES}} nodes! Run 👉 k9s to manage the cluster."
87+
88+
destroy-network:
89+
desc: Destroy the Solo network and clean up resources
90+
silent: true
91+
cmds:
92+
- echo "💣 Destroying existing Solo network (if any)..."
93+
- helm uninstall solo-cluster-setup -n "{{.SOLO_CLUSTER_SETUP_NAMESPACE}}" --wait --ignore-not-found || true
94+
- kubectl delete ns "{{.SOLO_CLUSTER_SETUP_NAMESPACE}}" --wait --ignore-not-found || true
95+
- kubectl delete ns "{{.SOLO_NAMESPACE}}" --wait --ignore-not-found || true
96+
- rm -rf ~/.solo || true
97+
- echo "✅ Solo network destroyed."
98+
99+
enable-metrics-server:
100+
desc: Enable metrics server in the Solo network (idempotent)
101+
cmds:
102+
- |
103+
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
104+
kubectl patch deployment -n kube-system metrics-server --type='json' \
105+
-p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--kubelet-insecure-tls"}]'
106+
echo "Metrics server is enabled."
107+
108+
run-proxy:
109+
desc: Run a proxy to the Solo network (daemon mode, idempotent)
110+
cmds:
111+
- |
112+
if docker ps -q --filter "name=docker_registry_proxy" | grep -q .; then
113+
echo "✅ Proxy is already running."
114+
exit 0
115+
fi
116+
echo "Starting docker_registry_proxy..."
117+
docker run --rm --name docker_registry_proxy -d \
118+
--net kind --hostname docker-registry-proxy \
119+
-p 0.0.0.0:3128:3128 \
120+
-e ENABLE_MANIFEST_CACHE=true \
121+
-e REGISTRIES="docker.io registry.k8s.io quay.io ghcr.io" \
122+
-v "$HOME/docker_mirror_cache":/docker_mirror_cache \
123+
-v "$HOME/docker_mirror_certs":/ca \
124+
rpardini/docker-registry-proxy:0.6.5
125+
sleep 5 # Wait for the proxy to start
126+
echo "✅Proxy is running at localhost:3128"
127+
128+
stop-proxy:
129+
desc: Stop and remove the proxy container (idempotent)
130+
cmds:
131+
- echo "💣 Stopping and removing docker_registry_proxy container (if any)..."
132+
- |
133+
if docker ps -q --filter "name=docker_registry_proxy" | grep -q . || true; then
134+
docker stop docker_registry_proxy || true
135+
docker rm docker_registry_proxy || true
136+
echo "✅Stopped and removed docker_registry_proxy container."
137+
fi
138+
139+
set-proxy:
140+
desc: Set up Docker to use the proxy (idempotent)
141+
cmds:
142+
- task run-proxy
143+
- echo "Setting up cluster to use the proxy..."
144+
- |
145+
# see: https://github.com/rpardini/docker-registry-proxy
146+
KIND_NAME={{.SOLO_CLUSTER_NAME}}
147+
SETUP_URL=http://docker-registry-proxy:3128/setup/systemd
148+
docker exec solo-control-plane sh -c "\
149+
curl "${SETUP_URL}" \
150+
| sed s/docker\.service/containerd\.service/g \
151+
| sed '/Environment/ s/$/ \"NO_PROXY=127.0.0.0\/8,10.0.0.0\/8,172.16.0.0\/12,192.168.0.0\/16\"/' \
152+
| bash" # Configure every node in background
153+
wait $! # Wait for all configurations to end
154+
echo "✅ Cluster configured to use the proxy."
155+
156+
check-docker:
157+
desc: Check docker settings
158+
silent: true
159+
cmds:
160+
- task run-proxy
161+
- |
162+
echo "🔍 Checking Docker Desktop resources..."
163+
docker_info=$(docker info --format '{{"{{json .}}"}}')
164+
mem=$(echo "$docker_info" | jq '.MemTotal' 2>/dev/null || echo 0)
165+
cpus=$(echo "$docker_info" | jq '.NCPU' 2>/dev/null || echo 0)
166+
min_mem_gb=31
167+
min_cpus=8
168+
min_mem=$(("${min_mem_gb}" * 1024 * 1024 * 1024))
169+
if [ "$mem" -lt "$min_mem" ] || [ "$cpus" -lt "$min_cpus" ]; then
170+
echo "❌ Docker Desktop resources too low: CPUs=$cpus, Mem=$(($mem/1024/1024/1024))GB"
171+
echo "➡️ Please set at least ${min_mem_gb}GB RAM and ${min_cpus} CPUs in Docker Desktop > Settings > Resources."
172+
exit 1
173+
else
174+
echo "✅ Docker Desktop resources OK: CPUs=$cpus, Mem=$(($mem/1024/1024/1024))GB"
175+
fi
176+
load-image:
177+
desc: Load a local Docker image into the Kind cluster
178+
vars:
179+
IMAGE: "{{.IMAGE}}"
180+
cmds:
181+
- echo "🚚 Loading image {{.IMAGE}} into Kind cluster {{.SOLO_CLUSTER_NAME}}..."
182+
- kind load docker-image "{{.IMAGE}}" -n "{{.SOLO_CLUSTER_NAME}}"
183+
- echo "✅ Image {{.IMAGE}} loaded into Kind cluster {{.SOLO_CLUSTER_NAME}}."
184+
refresh-node:
185+
desc: Refresh a Solo node by running setup and start (idempotent)
186+
vars:
187+
NODE: "{{.NODE}}"
188+
cmds:
189+
- echo "🔄 Refreshing Solo node {{.NODE}}..."
190+
- |
191+
bash -c "
192+
if [ -z '{{.NODE}}' ]; then
193+
echo '❌ NODE variable is required. Usage: task refresh-node -- NODE=node1'
194+
exit 1
195+
fi
196+
"
197+
solo node setup --deployment "{{.SOLO_DEPLOYMENT}}" -i {{.NODE}}
198+
solo node start --deployment "{{.SOLO_DEPLOYMENT}}" -i {{.NODE}}

0 commit comments

Comments
 (0)