Skip to content

Commit 88cf9fe

Browse files
authored
chore(e2e): enable ConfigMap Runtime Test and Database Cleanup for Nightly Jobs (redhat-developer#2928)
* chore(e2e): RHIDP-7172 Plugin 'permission' fails during startup - Add PostgreSQL CLI installation to Dockerfile - Enhance database cleanup script with error handling and logging - Refactor ConfigMap handling and increase timeouts in tests - Remove unused UIhelper instantiation in config-map.spec.ts - Improve deployment restart with better logging and explicit timeouts * Remove clear_database call from utils.sh and add it to periodic.sh Signed-off-by: Gustavo Lira <guga.java@gmail.com> * Remove clear_database call from utils.sh and add it to periodic.sh Signed-off-by: Gustavo Lira <guga.java@gmail.com> * Enable config-map.spec again * fix(ci): Enable ConfigMap test and fix database cleanup in nightly jobs - Fix clear_database function call in ocp-nightly.sh (was calling clear-database instead of clear_database) - Enable ConfigMap runtime test by removing test.describe.skip - Integrate database cleanup into nightly CI pipeline for improved test reliability * refactor(e2e): Update ConfigMap name in configuration test - Changed configMapName from "rhdh-backstage-app-config" to "app-config" for clarity and consistency in the test suite. * Fix config-map name in e2e tests * Enhance KubeClient functionality and update ConfigMap handling in e2e tests - Added methods to list and find ConfigMaps dynamically, improving test reliability. Updated logging for better traceability during ConfigMap updates. * Refactor KubeClient to use a constant for ConfigMap names - Replaced inline array of possible ConfigMap names with a class constant for improved maintainability and clarity in the findAppConfigMap method. * Update bulk-import.spec.ts to check button state - Changed expectation from button visibility to disabled state for improved accuracy in bulk import tests. --------- Signed-off-by: Gustavo Lira <guga.java@gmail.com>
1 parent 0f00683 commit 88cf9fe

File tree

7 files changed

+193
-24
lines changed

7 files changed

+193
-24
lines changed

.ibm/images/Dockerfile

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,11 @@ RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.
7777
RUN apt-get update -y && \
7878
apt-get install -y skopeo
7979

80+
# Install PostgreSQL CLI (psql only)
81+
RUN apt-get update && \
82+
apt-get install -y --no-install-recommends postgresql-client && \
83+
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
84+
8085
# Install umoci
8186
RUN curl -LO "https://github.com/opencontainers/umoci/releases/download/v0.4.7/umoci.amd64" && \
8287
chmod +x umoci.amd64 && \

.ibm/pipelines/clear-database.sh

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
#!/bin/bash
2+
3+
clear_database() {
4+
export POSTGRES_USER="$(echo -n "$RDS_USER" | base64 --decode)"
5+
export PGPASSWORD=$RDS_PASSWORD
6+
export POSTGRES_HOST=$RDS_1_HOST
7+
8+
echo "Starting database cleanup process..."
9+
10+
# Get list of databases, handle potential connection errors
11+
DATABASES=$(psql -h "$POSTGRES_HOST" -U "$POSTGRES_USER" -p "5432" -d postgres -Atc \
12+
"SELECT datname FROM pg_database WHERE datistemplate = false AND datname NOT IN ('postgres', 'rdsadmin');" 2>/dev/null)
13+
14+
if [ $? -ne 0 ]; then
15+
echo "Warning: Failed to connect to database or retrieve database list"
16+
return 1
17+
fi
18+
19+
if [ -z "$DATABASES" ]; then
20+
echo "No databases found to drop"
21+
return 0
22+
fi
23+
24+
echo "Found databases to drop: $(echo "$DATABASES" | tr '\n' ' ')"
25+
26+
for db in $DATABASES; do
27+
echo "Attempting to drop database: $db"
28+
29+
# Use IF EXISTS to avoid errors if database doesn't exist
30+
# Capture both stdout and stderr, but don't let errors stop the script
31+
if psql -h "$POSTGRES_HOST" -U "$POSTGRES_USER" -p "5432" -d postgres -c "DROP DATABASE IF EXISTS \"$db\";" 2>&1; then
32+
echo "Successfully dropped database: $db"
33+
else
34+
echo "Warning: Failed to drop database $db, but continuing with cleanup"
35+
fi
36+
done
37+
38+
echo "Database cleanup process completed"
39+
}

.ibm/pipelines/jobs/ocp-nightly.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ handle_ocp_nightly() {
1010
export K8S_CLUSTER_ROUTER_BASE=$(oc get route console -n openshift-console -o=jsonpath='{.spec.host}' | sed 's/^[^.]*\.//')
1111

1212
cluster_setup
13+
clear_database
1314
initiate_deployments
1415
deploy_test_backstage_customization_provider "${NAME_SPACE}"
1516

.ibm/pipelines/openshift-ci-tests.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ trap cleanup EXIT INT ERR
4242
SCRIPTS=(
4343
"utils.sh"
4444
"env_variables.sh"
45+
"clear-database.sh"
4546
)
4647

4748
# Source explicitly specified scripts

e2e-tests/playwright/e2e/configuration-test/config-map.spec.ts

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,20 @@ import { LOGGER } from "../../utils/logger";
44
import { Common } from "../../utils/common";
55
import { UIhelper } from "../../utils/ui-helper";
66

7-
test.describe.skip("Change app-config at e2e test runtime", () => {
7+
test.describe("Change app-config at e2e test runtime", () => {
88
test("Verify title change after ConfigMap modification", async ({ page }) => {
99
test.setTimeout(300000); // Increasing to 5 minutes
1010

11-
const configMapName = "rhdh-backstage-app-config";
11+
// Start with a common name, but let KubeClient find the actual ConfigMap
12+
const configMapName = "app-config-rhdh";
1213
const namespace = process.env.NAME_SPACE_RUNTIME || "showcase-runtime";
1314
const deploymentName = "rhdh-backstage";
1415

1516
const kubeUtils = new KubeClient();
1617
const dynamicTitle = generateDynamicTitle();
17-
const uiHelper = new UIhelper(page);
1818
try {
19-
LOGGER.info(`Updating ConfigMap '${configMapName}' with new title.`);
19+
LOGGER.info(`Looking for app-config ConfigMap in namespace '${namespace}'`);
20+
LOGGER.info(`Updating ConfigMap with new title: '${dynamicTitle}'`);
2021
await kubeUtils.updateConfigMapTitle(
2122
configMapName,
2223
namespace,
@@ -34,10 +35,7 @@ test.describe.skip("Change app-config at e2e test runtime", () => {
3435
await page.reload({ waitUntil: "domcontentloaded" });
3536
await common.loginAsGuest();
3637
await new UIhelper(page).openSidebar("Home");
37-
await uiHelper.verifyHeading("Welcome back!");
38-
await uiHelper.verifyText("Quick Access");
39-
await expect(page.locator("#search-bar-text-field")).toBeVisible();
40-
LOGGER.info("Verifying new title in the UI...");
38+
LOGGER.info("Verifying new title in the UI... ");
4139
expect(await page.title()).toContain(dynamicTitle);
4240
LOGGER.info("Title successfully verified in the UI.");
4341
} catch (error) {

e2e-tests/playwright/e2e/plugins/bulk-import.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ test.describe.serial("Bulk Import plugin", () => {
9090
]);
9191
await expect(
9292
await uiHelper.clickButton("Create pull requests"),
93-
).not.toBeVisible({ timeout: 10000 });
93+
).toBeDisabled({ timeout: 10000 });
9494
});
9595

9696
test('Verify that the two selected repositories are listed: one with the status "Added" and another with the status "WAIT_PR_APPROVAL."', async () => {

e2e-tests/playwright/utils/kube-client.ts

Lines changed: 140 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,58 @@ export class KubeClient {
6060
}
6161
}
6262

63+
async listConfigMaps(namespace: string) {
64+
try {
65+
LOGGER.info(`Listing configmaps in namespace ${namespace}`);
66+
return await this.coreV1Api.listNamespacedConfigMap(namespace);
67+
} catch (e) {
68+
LOGGER.error(e.body?.message);
69+
throw e;
70+
}
71+
}
72+
73+
// Define possible ConfigMap base names as a constant
74+
private readonly appConfigNames = [
75+
'app-config-rhdh',
76+
'app-config',
77+
'backstage-app-config',
78+
'rhdh-app-config'
79+
];
80+
81+
async findAppConfigMap(namespace: string): Promise<string> {
82+
try {
83+
const configMapsResponse = await this.listConfigMaps(namespace);
84+
const configMaps = configMapsResponse.body.items;
85+
86+
LOGGER.info(`Found ${configMaps.length} ConfigMaps in namespace ${namespace}`);
87+
configMaps.forEach(cm => {
88+
LOGGER.info(`ConfigMap: ${cm.metadata?.name}`);
89+
});
90+
91+
for (const name of this.appConfigNames) {
92+
const found = configMaps.find(cm => cm.metadata?.name === name);
93+
if (found) {
94+
LOGGER.info(`Found app config ConfigMap: ${name}`);
95+
return name;
96+
}
97+
}
98+
99+
// If none of the expected names found, look for ConfigMaps containing app-config data
100+
for (const cm of configMaps) {
101+
if (cm.data && Object.keys(cm.data).some(key =>
102+
key.includes('app-config') && key.endsWith('.yaml'))) {
103+
LOGGER.info(`Found ConfigMap with app-config data: ${cm.metadata?.name}`);
104+
return cm.metadata?.name || '';
105+
}
106+
}
107+
108+
throw new Error(`No suitable app-config ConfigMap found in namespace ${namespace}`);
109+
} catch (error) {
110+
LOGGER.error(`Error finding app config ConfigMap: ${error}`);
111+
throw error;
112+
}
113+
}
114+
63115
async getNamespaceByName(name: string): Promise<k8s.V1Namespace | null> {
64116
try {
65117
LOGGER.debug(`Getting namespace ${name}.`);
@@ -144,30 +196,94 @@ export class KubeClient {
144196
newTitle: string,
145197
) {
146198
try {
199+
// If the provided configMapName doesn't exist, try to find the correct one dynamically
200+
let actualConfigMapName = configMapName;
201+
try {
202+
await this.getConfigMap(configMapName, namespace);
203+
LOGGER.info(`Using provided ConfigMap name: ${configMapName}`);
204+
} catch (error) {
205+
if (error.response?.statusCode === 404) {
206+
LOGGER.info(`ConfigMap ${configMapName} not found, searching for alternatives...`);
207+
actualConfigMapName = await this.findAppConfigMap(namespace);
208+
} else {
209+
throw error;
210+
}
211+
}
212+
147213
const configMapResponse = await this.getConfigMap(
148-
configMapName,
214+
actualConfigMapName,
149215
namespace,
150216
);
151217
const configMap = configMapResponse.body;
152218

153-
const appConfigYaml = configMap.data[`${configMapName}.yaml`];
219+
LOGGER.info(`Using ConfigMap: ${actualConfigMapName}`);
220+
LOGGER.info(`Available data keys: ${Object.keys(configMap.data || {}).join(', ')}`);
221+
222+
// Find the correct data key dynamically
223+
let dataKey: string | undefined;
224+
const dataKeys = Object.keys(configMap.data || {});
225+
226+
// Generate key patterns from the possible names + the actual ConfigMap name
227+
const keyPatterns = [
228+
`${actualConfigMapName}.yaml`,
229+
...this.appConfigNames.map(name => `${name}.yaml`)
230+
];
231+
232+
for (const pattern of keyPatterns) {
233+
if (dataKeys.includes(pattern)) {
234+
dataKey = pattern;
235+
break;
236+
}
237+
}
238+
239+
// If none of the patterns match, look for any .yaml file containing app-config
240+
if (!dataKey) {
241+
dataKey = dataKeys.find(key =>
242+
key.endsWith('.yaml') && key.includes('app-config')
243+
);
244+
}
245+
246+
// Last resort: use any .yaml file
247+
if (!dataKey) {
248+
dataKey = dataKeys.find(key => key.endsWith('.yaml'));
249+
}
250+
251+
if (!dataKey) {
252+
throw new Error(`No suitable YAML data key found in ConfigMap '${actualConfigMapName}'. Available keys: ${dataKeys.join(', ')}`);
253+
}
254+
255+
LOGGER.info(`Using data key: ${dataKey}`);
256+
const appConfigYaml = configMap.data[dataKey];
257+
258+
if (!appConfigYaml) {
259+
throw new Error(`Data key '${dataKey}' is empty in ConfigMap '${actualConfigMapName}'`);
260+
}
261+
154262
// eslint-disable-next-line @typescript-eslint/no-explicit-any
155263
const appConfigObj = yaml.load(appConfigYaml) as any;
156264

265+
if (!appConfigObj || !appConfigObj.app) {
266+
throw new Error(`Invalid app-config structure in ConfigMap '${actualConfigMapName}'. Expected 'app' section not found.`);
267+
}
268+
269+
LOGGER.info(`Current title: ${appConfigObj.app.title}`);
157270
appConfigObj.app.title = newTitle;
158-
configMap.data[`${configMapName}.yaml`] = yaml.dump(appConfigObj);
271+
LOGGER.info(`New title: ${newTitle}`);
272+
273+
configMap.data[dataKey] = yaml.dump(appConfigObj);
159274

160275
delete configMap.metadata.creationTimestamp;
276+
delete configMap.metadata.resourceVersion;
161277

162278
await this.coreV1Api.replaceNamespacedConfigMap(
163-
configMapName,
279+
actualConfigMapName,
164280
namespace,
165281
configMap,
166282
);
167-
console.log("ConfigMap updated successfully.");
283+
console.log(`ConfigMap '${actualConfigMapName}' updated successfully with new title: '${newTitle}'`);
168284
} catch (error) {
169285
console.error("Error updating ConfigMap:", error);
170-
throw new Error("Failed to update ConfigMap");
286+
throw new Error(`Failed to update ConfigMap: ${error.message}`);
171287
}
172288
}
173289

@@ -287,19 +403,18 @@ export class KubeClient {
287403
timeout: number = 300000, // 5 minutes
288404
checkInterval: number = 10000, // 10 seconds
289405
) {
290-
const start = Date.now();
406+
const endTime = Date.now() + timeout;
291407
const labelSelector =
292408
"app.kubernetes.io/component=backstage,app.kubernetes.io/instance=rhdh,app.kubernetes.io/name=backstage";
293409

294-
while (Date.now() - start < timeout) {
410+
while (Date.now() < endTime) {
295411
try {
296-
// Check deployment status
297412
const response = await this.appsApi.readNamespacedDeployment(
298413
deploymentName,
299414
namespace,
300415
);
301-
302416
const availableReplicas = response.body.status?.availableReplicas || 0;
417+
const readyReplicas = response.body.status?.readyReplicas || 0;
303418
const conditions = response.body.status?.conditions || [];
304419

305420
console.log(`Available replicas: ${availableReplicas}`);
@@ -320,7 +435,7 @@ export class KubeClient {
320435
}
321436

322437
console.log(
323-
`Waiting for ${deploymentName} to reach ${expectedReplicas} replicas, currently has ${availableReplicas}.`,
438+
`Waiting for ${deploymentName} to reach ${expectedReplicas} replicas, currently has ${availableReplicas} available, ${readyReplicas} ready.`,
324439
);
325440
} catch (error) {
326441
console.error(`Error checking deployment status: ${error}`);
@@ -330,35 +445,45 @@ export class KubeClient {
330445
}
331446

332447
throw new Error(
333-
`Deployment ${deploymentName} did not become ready in time.`,
448+
`Deployment ${deploymentName} did not become ready in time (timeout: ${timeout / 1000}s).`,
334449
);
335450
}
336451

337452
async restartDeployment(deploymentName: string, namespace: string) {
338453
try {
454+
console.log(
455+
`Starting deployment restart for ${deploymentName} in namespace ${namespace}`,
456+
);
457+
458+
// Scale down deployment to 0 replicas
339459
console.log(`Scaling down deployment ${deploymentName} to 0 replicas.`);
340460
console.log(`Deployment: ${deploymentName}, Namespace: ${namespace}`);
341461
await this.logPodConditions(namespace);
342462
await this.scaleDeployment(deploymentName, namespace, 0);
463+
await this.waitForDeploymentReady(deploymentName, namespace, 0, 300000); // 5 minutes for scale down
343464

344-
await this.waitForDeploymentReady(deploymentName, namespace, 0);
465+
// Wait a bit for pods to be fully terminated
466+
console.log("Waiting for pods to be fully terminated...");
467+
await new Promise((resolve) => setTimeout(resolve, 10000)); // 10 seconds
345468

469+
// Scale up deployment to 1 replica
346470
console.log(`Scaling up deployment ${deploymentName} to 1 replica.`);
347471
await this.scaleDeployment(deploymentName, namespace, 1);
348472

349-
await this.waitForDeploymentReady(deploymentName, namespace, 1);
473+
await this.waitForDeploymentReady(deploymentName, namespace, 1, 600000); // 10 minutes for scale up
350474

351475
console.log(
352476
`Restart of deployment ${deploymentName} completed successfully.`,
353477
);
354478
} catch (error) {
355479
console.error(
356480
`Error during deployment restart: Deployment '${deploymentName}' in namespace '${namespace}'.`,
481+
error,
357482
);
358483
await this.logPodConditions(namespace);
359484
await this.logDeploymentEvents(deploymentName, namespace);
360485
throw new Error(
361-
`Failed to restart deployment '${deploymentName}' in namespace '${namespace}'.`,
486+
`Failed to restart deployment '${deploymentName}' in namespace '${namespace}': ${error.message}`,
362487
);
363488
}
364489
}

0 commit comments

Comments
 (0)