From 75c5b10feec2165c6f2f176bcde011e78f9791d0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 18 Oct 2024 15:18:04 -0400 Subject: [PATCH] chore(deps): update ghcr.io/open-feature/flagd-testbed docker tag to v0.5.13 (#1068) Signed-off-by: Todd Baert Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Todd Baert --- libs/providers/flagd/docker-compose.yaml | 2 +- .../flagd-reconnect.unstable.ts | 48 +++++++++++-------- 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/libs/providers/flagd/docker-compose.yaml b/libs/providers/flagd/docker-compose.yaml index d92ce5de6..7853f8ddd 100644 --- a/libs/providers/flagd/docker-compose.yaml +++ b/libs/providers/flagd/docker-compose.yaml @@ -1,6 +1,6 @@ services: flagd: - image: ghcr.io/open-feature/flagd-testbed:v0.5.6 + image: ghcr.io/open-feature/flagd-testbed:v0.5.13 ports: - 8013:8013 flagd-unstable: diff --git a/libs/providers/flagd/src/e2e/step-definitions/flagd-reconnect.unstable.ts b/libs/providers/flagd/src/e2e/step-definitions/flagd-reconnect.unstable.ts index d66af00ee..9462b7d8f 100644 --- a/libs/providers/flagd/src/e2e/step-definitions/flagd-reconnect.unstable.ts +++ b/libs/providers/flagd/src/e2e/step-definitions/flagd-reconnect.unstable.ts @@ -22,26 +22,36 @@ export function flagdRecconnectUnstable() { }); }); - test('Provider reconnection', ({ given, when, then, and }) => { - given('a flagd provider is set', () => { - // handled in beforeAll - }); - when('a PROVIDER_READY handler and a PROVIDER_ERROR handler are added', () => { - client.addHandler(ProviderEvents.Error, () => { - errorRunCount++; + describe('retry', () => { + /** + * This describe block and retry settings are calibrated to gRPC's retry time + * and our testing container's restart cadence. + */ + const retryTimes = 240; + const retryDelayMs = 1000; + jest.retryTimes(retryTimes); + + test('Provider reconnection', ({ given, when, then, and }) => { + given('a flagd provider is set', () => { + // handled in beforeAll + }); + when('a PROVIDER_READY handler and a PROVIDER_ERROR handler are added', () => { + client.addHandler(ProviderEvents.Error, () => { + errorRunCount++; + }); + }); + then('the PROVIDER_READY handler must run when the provider connects', async () => { + // should already be at 1 from `beforeAll` + expect(readyRunCount).toEqual(1); + }); + and("the PROVIDER_ERROR handler must run when the provider's connection is lost", async () => { + await new Promise((resolve) => setTimeout(resolve, retryDelayMs)); + expect(errorRunCount).toBeGreaterThan(0); + }); + and('when the connection is reestablished the PROVIDER_READY handler must run again', async () => { + await new Promise((resolve) => setTimeout(resolve, retryDelayMs)); + expect(readyRunCount).toBeGreaterThan(1); }); - }); - then('the PROVIDER_READY handler must run when the provider connects', async () => { - // should already be at 1 from `beforeAll` - expect(readyRunCount).toEqual(1); - }); - and("the PROVIDER_ERROR handler must run when the provider's connection is lost", async () => { - await new Promise((resolve) => setTimeout(resolve, 10000)); - expect(errorRunCount).toBeGreaterThan(0); - }); - and('when the connection is reestablished the PROVIDER_READY handler must run again', async () => { - await new Promise((resolve) => setTimeout(resolve, 10000)); - expect(readyRunCount).toBeGreaterThan(1); }); });