diff --git a/.outpost.yaml.dev b/.outpost.yaml.dev index 9e30a388..fa6e40e2 100644 --- a/.outpost.yaml.dev +++ b/.outpost.yaml.dev @@ -72,8 +72,7 @@ idgen: type: "nanoid" event_prefix: "evt" destination_prefix: "des" - delivery_prefix: "dlv" -" + attempt_prefix: "atm" # Concurrency publish_max_concurrency: 1 diff --git a/cmd/e2e/log_test.go b/cmd/e2e/log_test.go index 5dc72343..7785343e 100644 --- a/cmd/e2e/log_test.go +++ b/cmd/e2e/log_test.go @@ -18,14 +18,14 @@ func parseTime(s string) time.Time { return t } -// TestLogAPI tests the Log API endpoints (deliveries, events). +// TestLogAPI tests the Log API endpoints (attempts, events). // // Setup: // 1. Create a tenant and destination // 2. Publish 10 events with small delays for distinct timestamps // // Test Groups: -// - deliveries: list, filter, expand +// - attempts: list, filter, expand // - events: list, filter, retrieve // - sort_order: sort by time ascending/descending // - pagination: paginate through results @@ -111,17 +111,17 @@ func (suite *basicSuite) TestLogAPI() { suite.Require().Equal(http.StatusAccepted, resp.StatusCode, "failed to publish event %d", i) } - // Wait for all deliveries (30s timeout for slow CI environments) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries", 10, 10*time.Second) + // Wait for all attempts (30s timeout for slow CI environments) + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts", 10, 10*time.Second) // ========================================================================= - // Deliveries Tests + // Attempts Tests // ========================================================================= - suite.Run("deliveries", func() { + suite.Run("attempts", func() { suite.Run("list all", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries", + Path: "/tenants/" + tenantID + "/attempts", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -142,7 +142,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("filter by destination_id", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?destination_id=" + destinationID, + Path: "/tenants/" + tenantID + "/attempts?destination_id=" + destinationID, })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -155,7 +155,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("filter by event_id", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventIDs[0], + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventIDs[0], })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -168,7 +168,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=event returns event object without data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=event&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=event&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -177,8 +177,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.NotEmpty(event["id"]) suite.NotEmpty(event["topic"]) suite.NotEmpty(event["time"]) @@ -188,7 +188,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=event.data returns event object with data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=event.data&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=event.data&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -197,8 +197,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.NotEmpty(event["id"]) suite.NotNil(event["data"]) // include=event.data SHOULD include data }) @@ -206,7 +206,7 @@ func (suite *basicSuite) TestLogAPI() { suite.Run("include=response_data returns response data", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?include=response_data&limit=1", + Path: "/tenants/" + tenantID + "/attempts?include=response_data&limit=1", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -215,8 +215,8 @@ func (suite *basicSuite) TestLogAPI() { models := body["models"].([]interface{}) suite.Require().Len(models, 1) - delivery := models[0].(map[string]interface{}) - suite.NotNil(delivery["response_data"]) + attempt := models[0].(map[string]interface{}) + suite.NotNil(attempt["response_data"]) }) }) @@ -508,14 +508,14 @@ func (suite *basicSuite) TestLogAPI() { // 2. Configure mock webhook server to FAIL (return 500) // 3. Create a destination pointing to the mock server // 4. Publish an event with eligible_for_retry=false (fails once, no auto-retry) -// 5. Wait for delivery to fail, then fetch the delivery ID +// 5. Wait for attempt to fail, then fetch the attempt ID // 6. Update mock server to SUCCEED (return 200) // // Test Cases: -// - POST /:tenantID/deliveries/:deliveryID/retry - Successful retry returns 202 Accepted -// - POST /:tenantID/deliveries/:deliveryID/retry (non-existent) - Returns 404 -// - Verify retry created new delivery - Event now has 2+ deliveries -// - POST /:tenantID/deliveries/:deliveryID/retry (disabled destination) - Returns 400 +// - POST /:tenantID/attempts/:attemptID/retry - Successful retry returns 202 Accepted +// - POST /:tenantID/attempts/:attemptID/retry (non-existent) - Returns 404 +// - Verify retry created new attempt - Event now has 2+ attempts +// - POST /:tenantID/attempts/:attemptID/retry (disabled destination) - Returns 400 func (suite *basicSuite) TestRetryAPI() { tenantID := idgen.String() destinationID := idgen.Destination() @@ -548,7 +548,7 @@ func (suite *basicSuite) TestRetryAPI() { "url": fmt.Sprintf("%s/webhook/%s", suite.mockServerBaseURL, destinationID), }, "response": map[string]interface{}{ - "status": 500, // Fail deliveries + "status": 500, // Fail attempts }, }, }, @@ -602,22 +602,22 @@ func (suite *basicSuite) TestRetryAPI() { } suite.RunAPITests(suite.T(), setupTests) - // Wait for delivery to complete (and fail) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries?event_id="+eventID, 1, 5*time.Second) + // Wait for attempt to complete (and fail) + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts?event_id="+eventID, 1, 5*time.Second) - // Get the delivery ID - deliveriesResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ + // Get the attempt ID + attemptsResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventID, + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventID, })) suite.Require().NoError(err) - suite.Require().Equal(http.StatusOK, deliveriesResp.StatusCode) + suite.Require().Equal(http.StatusOK, attemptsResp.StatusCode) - body := deliveriesResp.Body.(map[string]interface{}) + body := attemptsResp.Body.(map[string]interface{}) models := body["models"].([]interface{}) - suite.Require().NotEmpty(models, "should have at least one delivery") - firstDelivery := models[0].(map[string]interface{}) - deliveryID := firstDelivery["id"].(string) + suite.Require().NotEmpty(models, "should have at least one attempt") + firstAttempt := models[0].(map[string]interface{}) + attemptID := firstAttempt["id"].(string) // Update mock to succeed for retry updateMockTests := []APITest{ @@ -649,12 +649,12 @@ func (suite *basicSuite) TestRetryAPI() { // Test retry endpoint retryTests := []APITest{ - // POST /:tenantID/deliveries/:deliveryID/retry - successful retry + // POST /:tenantID/attempts/:attemptID/retry - successful retry { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - retry delivery", + Name: "POST /:tenantID/attempts/:attemptID/retry - retry attempt", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + deliveryID + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + attemptID + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -665,12 +665,12 @@ func (suite *basicSuite) TestRetryAPI() { }, }, }, - // POST /:tenantID/deliveries/:deliveryID/retry - non-existent delivery + // POST /:tenantID/attempts/:attemptID/retry - non-existent attempt { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - not found", + Name: "POST /:tenantID/attempts/:attemptID/retry - not found", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + idgen.Delivery() + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + idgen.Attempt() + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -681,16 +681,16 @@ func (suite *basicSuite) TestRetryAPI() { } suite.RunAPITests(suite.T(), retryTests) - // Wait for retry delivery to complete - suite.waitForDeliveries(suite.T(), "/tenants/"+tenantID+"/deliveries?event_id="+eventID, 2, 5*time.Second) + // Wait for retry attempt to complete + suite.waitForAttempts(suite.T(), "/tenants/"+tenantID+"/attempts?event_id="+eventID, 2, 5*time.Second) - // Verify we have more deliveries after retry + // Verify we have more attempts after retry verifyTests := []APITest{ { - Name: "GET /:tenantID/deliveries?event_id=X - verify retry created new delivery", + Name: "GET /:tenantID/attempts?event_id=X - verify retry created new attempt", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/tenants/" + tenantID + "/deliveries?event_id=" + eventID, + Path: "/tenants/" + tenantID + "/attempts?event_id=" + eventID, }), Expected: APITestExpectation{ Validate: map[string]interface{}{ @@ -728,10 +728,10 @@ func (suite *basicSuite) TestRetryAPI() { }, }, { - Name: "POST /:tenantID/deliveries/:deliveryID/retry - disabled destination", + Name: "POST /:tenantID/attempts/:attemptID/retry - disabled destination", Request: suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodPOST, - Path: "/tenants/" + tenantID + "/deliveries/" + deliveryID + "/retry", + Path: "/tenants/" + tenantID + "/attempts/" + attemptID + "/retry", }), Expected: APITestExpectation{ Match: &httpclient.Response{ @@ -776,24 +776,24 @@ func (suite *basicSuite) TestRetryAPI() { suite.RunAPITests(suite.T(), cleanupTests) } -// TestAdminLogEndpoints tests the admin-only /events and /deliveries endpoints. +// TestAdminLogEndpoints tests the admin-only /events and /attempts endpoints. // // These endpoints allow cross-tenant queries with optional tenant_id filter. // // Setup: // 1. Create two tenants with destinations // 2. Publish events to each tenant -// 3. Wait for deliveries to complete +// 3. Wait for attempts to complete // // Test Cases: // - GET /events without auth returns 401 -// - GET /deliveries without auth returns 401 +// - GET /attempts without auth returns 401 // - GET /events with JWT returns 401 (admin-only) -// - GET /deliveries with JWT returns 401 (admin-only) +// - GET /attempts with JWT returns 401 (admin-only) // - GET /events with admin key returns all events (cross-tenant) -// - GET /deliveries with admin key returns all deliveries (cross-tenant) +// - GET /attempts with admin key returns all attempts (cross-tenant) // - GET /events?tenant_id=X filters to single tenant -// - GET /deliveries?tenant_id=X filters to single tenant +// - GET /attempts?tenant_id=X filters to single tenant func (suite *basicSuite) TestAdminLogEndpoints() { tenant1ID := idgen.String() tenant2ID := idgen.String() @@ -931,9 +931,9 @@ func (suite *basicSuite) TestAdminLogEndpoints() { } suite.RunAPITests(suite.T(), setupTests) - // Wait for deliveries for both tenants - suite.waitForDeliveries(suite.T(), "/tenants/"+tenant1ID+"/deliveries", 1, 5*time.Second) - suite.waitForDeliveries(suite.T(), "/tenants/"+tenant2ID+"/deliveries", 1, 5*time.Second) + // Wait for attempts for both tenants + suite.waitForAttempts(suite.T(), "/tenants/"+tenant1ID+"/attempts", 1, 5*time.Second) + suite.waitForAttempts(suite.T(), "/tenants/"+tenant2ID+"/attempts", 1, 5*time.Second) // Get JWT token for tenant1 to test that JWT auth is rejected on admin endpoints tokenResp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ @@ -959,10 +959,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(http.StatusUnauthorized, resp.StatusCode) }) - suite.Run("GET /deliveries without auth returns 401", func() { + suite.Run("GET /attempts without auth returns 401", func() { resp, err := suite.client.Do(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries", + Path: "/attempts", }) suite.Require().NoError(err) suite.Equal(http.StatusUnauthorized, resp.StatusCode) @@ -977,10 +977,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(http.StatusUnauthorized, resp.StatusCode) }) - suite.Run("GET /deliveries with JWT returns 401 (admin-only)", func() { + suite.Run("GET /attempts with JWT returns 401 (admin-only)", func() { resp, err := suite.client.Do(suite.AuthJWTRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries", + Path: "/attempts", }, jwtToken)) suite.Require().NoError(err) suite.Equal(http.StatusUnauthorized, resp.StatusCode) @@ -1016,31 +1016,31 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.True(eventsSeen[event2ID], "should include tenant2 event") }) - suite.Run("GET /deliveries returns deliveries from all tenants", func() { + suite.Run("GET /attempts returns attempts from all tenants", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries?include=event", + Path: "/attempts?include=event", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) body := resp.Body.(map[string]interface{}) models := body["models"].([]interface{}) - // Should have at least 2 deliveries (one from each tenant we created) + // Should have at least 2 attempts (one from each tenant we created) suite.GreaterOrEqual(len(models), 2) - // Verify we have deliveries from both tenants by checking event IDs + // Verify we have attempts from both tenants by checking event IDs eventsSeen := map[string]bool{} for _, item := range models { - delivery := item.(map[string]interface{}) - if event, ok := delivery["event"].(map[string]interface{}); ok { + attempt := item.(map[string]interface{}) + if event, ok := attempt["event"].(map[string]interface{}); ok { if id, ok := event["id"].(string); ok { eventsSeen[id] = true } } } - suite.True(eventsSeen[event1ID], "should include tenant1 delivery") - suite.True(eventsSeen[event2ID], "should include tenant2 delivery") + suite.True(eventsSeen[event1ID], "should include tenant1 attempt") + suite.True(eventsSeen[event2ID], "should include tenant2 attempt") }) }) @@ -1065,10 +1065,10 @@ func (suite *basicSuite) TestAdminLogEndpoints() { suite.Equal(event1ID, event["id"]) }) - suite.Run("GET /deliveries?tenant_id=X filters to single tenant", func() { + suite.Run("GET /attempts?tenant_id=X filters to single tenant", func() { resp, err := suite.client.Do(suite.AuthRequest(httpclient.Request{ Method: httpclient.MethodGET, - Path: "/deliveries?tenant_id=" + tenant2ID + "&include=event", + Path: "/attempts?tenant_id=" + tenant2ID + "&include=event", })) suite.Require().NoError(err) suite.Require().Equal(http.StatusOK, resp.StatusCode) @@ -1077,9 +1077,9 @@ func (suite *basicSuite) TestAdminLogEndpoints() { models := body["models"].([]interface{}) suite.Len(models, 1) - // Verify only tenant2 delivery by event ID - delivery := models[0].(map[string]interface{}) - event := delivery["event"].(map[string]interface{}) + // Verify only tenant2 attempt by event ID + attempt := models[0].(map[string]interface{}) + event := attempt["event"].(map[string]interface{}) suite.Equal(event2ID, event["id"]) }) }) diff --git a/cmd/e2e/suites_test.go b/cmd/e2e/suites_test.go index 48321bf4..f2531e19 100644 --- a/cmd/e2e/suites_test.go +++ b/cmd/e2e/suites_test.go @@ -41,8 +41,8 @@ func waitForHealthy(t *testing.T, port int, timeout time.Duration) { t.Fatalf("timed out waiting for health check at %s", healthURL) } -// waitForDeliveries polls until at least minCount deliveries exist for the given path. -func (s *e2eSuite) waitForDeliveries(t *testing.T, path string, minCount int, timeout time.Duration) { +// waitForAttempts polls until at least minCount attempts exist for the given path. +func (s *e2eSuite) waitForAttempts(t *testing.T, path string, minCount int, timeout time.Duration) { t.Helper() deadline := time.Now().Add(timeout) var lastCount int @@ -72,9 +72,9 @@ func (s *e2eSuite) waitForDeliveries(t *testing.T, path string, minCount int, ti time.Sleep(100 * time.Millisecond) } if lastErr != nil { - t.Fatalf("timed out waiting for %d deliveries at %s: last error: %v", minCount, path, lastErr) + t.Fatalf("timed out waiting for %d attempts at %s: last error: %v", minCount, path, lastErr) } - t.Fatalf("timed out waiting for %d deliveries at %s: got %d (status %d)", minCount, path, lastCount, lastStatus) + t.Fatalf("timed out waiting for %d attempts at %s: got %d (status %d)", minCount, path, lastCount, lastStatus) } // waitForDestinationDisabled polls until the destination has disabled_at set (non-null). diff --git a/cmd/publish/publish_http.go b/cmd/publish/publish_http.go index 71d13137..40a70745 100644 --- a/cmd/publish/publish_http.go +++ b/cmd/publish/publish_http.go @@ -16,14 +16,11 @@ const ( func publishHTTP(body map[string]interface{}) error { log.Printf("[x] Publishing HTTP") - // make HTTP POST request to the URL specified in the body - jsonData, err := json.Marshal(body) if err != nil { return fmt.Errorf("failed to marshal body to JSON: %w", err) } - // Make HTTP POST request req, err := http.NewRequest("POST", ServerURL, bytes.NewBuffer(jsonData)) if err != nil { return fmt.Errorf("failed to create HTTP request: %w", err) @@ -37,7 +34,6 @@ func publishHTTP(body map[string]interface{}) error { } defer resp.Body.Close() - // Check for non-200 status code if resp.StatusCode != http.StatusOK { return fmt.Errorf("received non-200 response: %d", resp.StatusCode) } diff --git a/docs/apis/openapi.yaml b/docs/apis/openapi.yaml index 66b2165f..22d1c6a5 100644 --- a/docs/apis/openapi.yaml +++ b/docs/apis/openapi.yaml @@ -475,7 +475,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -562,7 +562,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -649,7 +649,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -726,7 +726,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -810,7 +810,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -887,7 +887,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -973,7 +973,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1060,7 +1060,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1143,7 +1143,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1177,7 +1177,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1211,7 +1211,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1244,7 +1244,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1278,7 +1278,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1313,7 +1313,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1348,7 +1348,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1382,7 +1382,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1449,7 +1449,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1475,7 +1475,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1501,7 +1501,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1526,7 +1526,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1552,7 +1552,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1578,7 +1578,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1605,7 +1605,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1631,7 +1631,7 @@ components: additionalProperties: type: string nullable: true - description: Static key-value pairs merged into event metadata on every delivery. + description: Static key-value pairs merged into event metadata on every attempt. example: { "app-id": "my-app", "region": "us-east-1" } metadata: type: object @@ -1769,24 +1769,24 @@ components: type: string example: { "content-type": "application/json" } - # Delivery schemas for deliveries-first API - Delivery: + # Attempt schemas for attempts-first API + Attempt: type: object - description: A delivery represents a single delivery attempt of an event to a destination. + description: An attempt represents a single delivery attempt of an event to a destination. properties: id: type: string - description: Unique identifier for this delivery. - example: "del_123" + description: Unique identifier for this attempt. + example: "atm_123" status: type: string enum: [success, failed] - description: The delivery status. + description: The attempt status. example: "success" delivered_at: type: string format: date-time - description: Time the delivery was attempted. + description: Time the attempt was made. example: "2024-01-01T00:00:05Z" code: type: string @@ -1794,7 +1794,7 @@ components: example: "200" response_data: type: object - description: Response data from the delivery attempt. Only included when include=response_data. + description: Response data from the attempt. Only included when include=response_data. additionalProperties: true example: { "status_code": 200, "body": '{"status":"ok"}', "headers": { "content-type": "application/json" } } attempt: @@ -1803,7 +1803,7 @@ components: example: 1 manual: type: boolean - description: Whether this delivery was manually triggered (e.g., a retry initiated by a user). + description: Whether this attempt was manually triggered (e.g., a retry initiated by a user). example: false event: oneOf: @@ -1815,7 +1815,7 @@ components: description: The associated event. Returns event ID by default, or included event object when include=event or include=event.data. destination: type: string - description: The destination ID this delivery was sent to. + description: The destination ID this attempt was sent to. example: "des_456" EventSummary: type: object @@ -1872,15 +1872,15 @@ components: additionalProperties: true description: The event payload data. example: { "user_id": "userid", "status": "active" } - DeliveryPaginatedResult: + AttemptPaginatedResult: type: object - description: Paginated list of deliveries. + description: Paginated list of attempts. properties: models: type: array items: - $ref: "#/components/schemas/Delivery" - description: Array of delivery objects. + $ref: "#/components/schemas/Attempt" + description: Array of attempt objects. pagination: $ref: "#/components/schemas/SeekPagination" @@ -2028,14 +2028,14 @@ tags: description: Operations for retrieving destination type schemas. - name: Topics description: Operations for retrieving available event topics. - - name: Deliveries + - name: Attempts description: | - Deliveries represent individual delivery attempts of events to destinations. The deliveries API provides a delivery-centric view of event processing. + Attempts represent individual delivery attempts of events to destinations. The attempts API provides an attempt-centric view of event processing. - Each delivery contains: - - `id`: Unique delivery identifier + Each attempt contains: + - `id`: Unique attempt identifier - `status`: success or failed - - `delivered_at`: Timestamp of the delivery attempt + - `delivered_at`: Timestamp of the attempt - `code`: HTTP status code or error code - `attempt`: Attempt number (1 for first attempt, 2+ for retries) - `event`: Associated event (ID or included object) @@ -2044,7 +2044,7 @@ tags: Use the `include` query parameter to include related data: - `include=event`: Include event summary (id, topic, time, eligible_for_retry, metadata) - `include=event.data`: Include full event with payload data - - `include=response_data`: Include response body and headers from the delivery attempt + - `include=response_data`: Include response body and headers from the attempt - name: Events description: Operations related to event history. @@ -2456,15 +2456,15 @@ paths: schema: $ref: "#/components/schemas/APIErrorResponse" - /deliveries: + /attempts: get: - tags: [Deliveries] - summary: List Deliveries (Admin) + tags: [Attempts] + summary: List Attempts (Admin) description: | - Retrieves a paginated list of deliveries across all tenants. This is an admin-only endpoint that requires the Admin API Key. + Retrieves a paginated list of attempts across all tenants. This is an admin-only endpoint that requires the Admin API Key. - When `tenant_id` is not provided, returns deliveries from all tenants. When `tenant_id` is provided, returns only deliveries for that tenant. - operationId: adminListDeliveries + When `tenant_id` is not provided, returns attempts from all tenants. When `tenant_id` is provided, returns only attempts for that tenant. + operationId: adminListAttempts security: - AdminApiKey: [] parameters: @@ -2473,26 +2473,26 @@ paths: required: false schema: type: string - description: Filter deliveries by tenant ID. If not provided, returns deliveries from all tenants. + description: Filter attempts by tenant ID. If not provided, returns attempts from all tenants. - name: event_id in: query required: false schema: type: string - description: Filter deliveries by event ID. + description: Filter attempts by event ID. - name: destination_id in: query required: false schema: type: string - description: Filter deliveries by destination ID. + description: Filter attempts by destination ID. - name: status in: query required: false schema: type: string enum: [success, failed] - description: Filter deliveries by status. + description: Filter attempts by status. - name: topic in: query required: false @@ -2502,21 +2502,21 @@ paths: - type: array items: type: string - description: Filter deliveries by event topic(s). Can be specified multiple times or comma-separated. + description: Filter attempts by event topic(s). Can be specified multiple times or comma-separated. - name: time[gte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time >= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time >= value (RFC3339 or YYYY-MM-DD format). - name: time[lte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time <= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time <= value (RFC3339 or YYYY-MM-DD format). - name: limit in: query required: false @@ -2570,23 +2570,23 @@ paths: description: Sort direction. responses: "200": - description: A paginated list of deliveries. + description: A paginated list of attempts. content: application/json: schema: - $ref: "#/components/schemas/DeliveryPaginatedResult" + $ref: "#/components/schemas/AttemptPaginatedResult" examples: - AdminDeliveriesListExample: + AdminAttemptsListExample: value: models: - - id: "del_123" + - id: "atm_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - - id: "del_124" + - id: "att_124" status: "failed" delivered_at: "2024-01-02T10:00:01Z" code: "503" @@ -2599,7 +2599,7 @@ paths: limit: 100 next: "MTcwNDA2NzIwMA==" prev: null - AdminDeliveriesWithIncludeExample: + AdminAttemptsWithIncludeExample: summary: Response with include=event value: models: @@ -3307,8 +3307,8 @@ paths: "404": description: Tenant not found. - # Deliveries (Tenant Specific - Admin or JWT) - /tenants/{tenant_id}/deliveries: + # Attempts (Tenant Specific - Admin or JWT) + /tenants/{tenant_id}/attempts: parameters: - name: tenant_id in: path @@ -3317,30 +3317,30 @@ paths: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. get: - tags: [Deliveries] - summary: List Deliveries - description: Retrieves a paginated list of deliveries for the tenant, with filtering and sorting options. - operationId: listTenantDeliveries + tags: [Attempts] + summary: List Attempts + description: Retrieves a paginated list of attempts for the tenant, with filtering and sorting options. + operationId: listTenantAttempts parameters: - name: destination_id in: query required: false schema: type: string - description: Filter deliveries by destination ID. + description: Filter attempts by destination ID. - name: event_id in: query required: false schema: type: string - description: Filter deliveries by event ID. + description: Filter attempts by event ID. - name: status in: query required: false schema: type: string enum: [success, failed] - description: Filter deliveries by status. + description: Filter attempts by status. - name: topic in: query required: false @@ -3350,21 +3350,21 @@ paths: - type: array items: type: string - description: Filter deliveries by event topic(s). Can be specified multiple times or comma-separated. + description: Filter attempts by event topic(s). Can be specified multiple times or comma-separated. - name: time[gte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time >= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time >= value (RFC3339 or YYYY-MM-DD format). - name: time[lte] in: query required: false schema: type: string format: date-time - description: Filter deliveries by event time <= value (RFC3339 or YYYY-MM-DD format). + description: Filter attempts by event time <= value (RFC3339 or YYYY-MM-DD format). - name: limit in: query required: false @@ -3418,23 +3418,23 @@ paths: description: Sort direction. responses: "200": - description: A paginated list of deliveries. + description: A paginated list of attempts. content: application/json: schema: - $ref: "#/components/schemas/DeliveryPaginatedResult" + $ref: "#/components/schemas/AttemptPaginatedResult" examples: - DeliveriesListExample: + AttemptsListExample: value: models: - - id: "del_123" + - id: "atm_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - - id: "del_124" + - id: "att_124" status: "failed" delivered_at: "2024-01-02T10:00:01Z" code: "503" @@ -3447,11 +3447,11 @@ paths: limit: 100 next: "MTcwNDA2NzIwMA==" prev: null - DeliveriesWithIncludeExample: + AttemptsWithIncludeExample: summary: Response with include=event value: models: - - id: "del_123" + - id: "atm_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" @@ -3478,7 +3478,7 @@ paths: schema: $ref: "#/components/schemas/APIErrorResponse" - /tenants/{tenant_id}/deliveries/{delivery_id}: + /tenants/{tenant_id}/attempts/{attempt_id}: parameters: - name: tenant_id in: path @@ -3486,17 +3486,17 @@ paths: schema: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: delivery_id + - name: attempt_id in: path required: true schema: type: string - description: The ID of the delivery. + description: The ID of the attempt. get: - tags: [Deliveries] - summary: Get Delivery - description: Retrieves details for a specific delivery. - operationId: getTenantDelivery + tags: [Attempts] + summary: Get Attempt + description: Retrieves details for a specific attempt. + operationId: getTenantAttempt parameters: - name: include in: query @@ -3514,25 +3514,25 @@ paths: - `response_data`: Include response body and headers responses: "200": - description: Delivery details. + description: Attempt details. content: application/json: schema: - $ref: "#/components/schemas/Delivery" + $ref: "#/components/schemas/Attempt" examples: - DeliveryExample: + AttemptExample: value: - id: "del_123" + id: "atm_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" attempt: 1 event: "evt_123" destination: "des_456" - DeliveryWithIncludeExample: + AttemptWithIncludeExample: summary: Response with include=event.data,response_data value: - id: "del_123" + id: "atm_123" status: "success" delivered_at: "2024-01-01T00:00:05Z" code: "200" @@ -3550,9 +3550,9 @@ paths: data: { "user_id": "userid", "status": "active" } destination: "des_456" "404": - description: Tenant or Delivery not found. + description: Tenant or Attempt not found. - /tenants/{tenant_id}/deliveries/{delivery_id}/retry: + /tenants/{tenant_id}/attempts/{attempt_id}/retry: parameters: - name: tenant_id in: path @@ -3560,28 +3560,28 @@ paths: schema: type: string description: The ID of the tenant. Required when using AdminApiKey authentication. - - name: delivery_id + - name: attempt_id in: path required: true schema: type: string - description: The ID of the delivery to retry. + description: The ID of the attempt to retry. post: - tags: [Deliveries] - summary: Retry Delivery + tags: [Attempts] + summary: Retry Attempt description: | - Triggers a retry for a delivery. Only the latest delivery for an event+destination pair can be retried. + Triggers a retry for an attempt. Only the latest attempt for an event+destination pair can be retried. The destination must exist and be enabled. - operationId: retryTenantDelivery + operationId: retryTenantAttempt responses: "202": description: Retry accepted for processing. "404": - description: Tenant or Delivery not found. + description: Tenant or Attempt not found. "409": description: | - Delivery not eligible for retry. This can happen when: - - The delivery is not the latest for this event+destination pair + Attempt not eligible for retry. This can happen when: + - The attempt is not the latest for this event+destination pair - The destination is disabled or deleted # Events (Tenant Specific - Admin or JWT) @@ -3746,7 +3746,7 @@ paths: "404": description: Tenant or Event not found. - /tenants/{tenant_id}/events/{event_id}/deliveries: + /tenants/{tenant_id}/events/{event_id}/attempts: parameters: - name: tenant_id in: path @@ -3762,12 +3762,12 @@ paths: description: The ID of the event. get: tags: [Events] - summary: List Event Delivery Attempts - description: Retrieves a list of delivery attempts for a specific event, including response details. - operationId: listTenantEventDeliveries + summary: List Event Attempts + description: Retrieves a list of attempts for a specific event, including response details. + operationId: listTenantEventAttempts responses: "200": - description: A list of delivery attempts. + description: A list of attempts. content: application/json: schema: @@ -3775,7 +3775,7 @@ paths: items: $ref: "#/components/schemas/DeliveryAttempt" examples: - DeliveriesListExample: + AttemptsListExample: value: - delivered_at: "2024-01-01T00:00:05Z" status: "success" diff --git a/docs/pages/features/event-delivery.mdx b/docs/pages/features/event-delivery.mdx index a0ebff20..e18c1059 100644 --- a/docs/pages/features/event-delivery.mdx +++ b/docs/pages/features/event-delivery.mdx @@ -12,7 +12,7 @@ The retry interval uses an exponential backoff algorithm with a base of `2`. ## Manual Retries -Manual retries can be triggered for any given event via the [Event API](/docs/api/events#retry-event-delivery) or user portal. +Manual retries can be triggered for any given attempt via the [Attempts API](/docs/api/attempts#retry-attempt) or user portal. ## Disabled destinations diff --git a/docs/pages/guides/building-your-own-ui.mdx b/docs/pages/guides/building-your-own-ui.mdx index 0ab3076a..2edbcc6a 100644 --- a/docs/pages/guides/building-your-own-ui.mdx +++ b/docs/pages/guides/building-your-own-ui.mdx @@ -372,6 +372,6 @@ return ( ); ``` -For each event, you can retrieve all its associated delivery attempts using the [List Event Deliveries Attempts API](/docs/api/event-deliveries-attempts#list-event-deliveries-attempts). +For each event, you can retrieve all its associated delivery attempts using the [List Event Attempts API](/docs/api/events#list-event-attempts). You can find the source code of the `Events.tsx` component of the User Portal here: [Events.tsx](https://github.com/hookdeck/outpost/blob/main/internal/portal/src/scenes/Destination/Events/Events.tsx) diff --git a/docs/pages/guides/migrate-to-outpost.mdx b/docs/pages/guides/migrate-to-outpost.mdx index ad966ea2..cdd2b4a8 100644 --- a/docs/pages/guides/migrate-to-outpost.mdx +++ b/docs/pages/guides/migrate-to-outpost.mdx @@ -129,7 +129,7 @@ To migrate your historical data to Outpost, you need to map your existing data s The Outpost schema contains two tables related to events: 1. **events** - The events that Outpost has received to publish. -2. **deliveries** - The delivery attempts of events to destinations. +2. **attempts** - The delivery attempts of events to destinations. The following diagram shows the Outpost schema. You can connect to the database instance within your Outpost installation to inspect the schema further. diff --git a/docs/pages/references/configuration.mdx b/docs/pages/references/configuration.mdx index d081cf40..528fa31a 100644 --- a/docs/pages/references/configuration.mdx +++ b/docs/pages/references/configuration.mdx @@ -74,7 +74,7 @@ Global configurations are provided through env variables or a YAML file. ConfigM | `GCP_PUBSUB_SERVICE_ACCOUNT_CREDENTIALS` | JSON string or path to a file containing GCP service account credentials for Pub/Sub. Required if GCP Pub/Sub is the chosen MQ provider and not running in an environment with implicit credentials (e.g., GCE, GKE). | `nil` | Conditional | | `GIN_MODE` | Sets the Gin framework mode (e.g., 'debug', 'release', 'test'). See Gin documentation for details. | `release` | No | | `HTTP_USER_AGENT` | Custom HTTP User-Agent string for outgoing webhook deliveries. If unset, a default (OrganizationName/Version) is used. | `nil` | No | -| `IDGEN_DELIVERY_PREFIX` | Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix) | `nil` | No | +| `IDGEN_ATTEMPT_PREFIX` | Prefix for attempt IDs, prepended with underscore (e.g., 'atm_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_DESTINATION_PREFIX` | Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_EVENT_PREFIX` | Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix) | `nil` | No | | `IDGEN_TYPE` | ID generation type for all entities: uuidv4, uuidv7, nanoid. Default: uuidv4 | `uuidv4` | No | @@ -133,6 +133,7 @@ Global configurations are provided through env variables or a YAML file. ConfigM | `RETRY_INTERVAL_SECONDS` | Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided. | `30` | No | | `RETRY_POLL_BACKOFF_MS` | Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100 | `100` | No | | `RETRY_SCHEDULE` | Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h. | `[]` | No | +| `RETRY_VISIBILITY_TIMEOUT_SECONDS` | Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30 | `30` | No | | `SERVICE` | Specifies the service type to run. Valid values: 'api', 'log', 'delivery', or empty/all for singular mode (runs all services). | `nil` | No | | `TELEMETRY_BATCH_INTERVAL` | Maximum time in seconds to wait before sending a batch of telemetry events if batch size is not reached. | `5` | No | | `TELEMETRY_BATCH_SIZE` | Maximum number of telemetry events to batch before sending. | `100` | No | @@ -269,8 +270,8 @@ gin_mode: "release" http_user_agent: "" idgen: - # Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix) - delivery_prefix: "" + # Prefix for attempt IDs, prepended with underscore (e.g., 'atm_123'). Default: empty (no prefix) + attempt_prefix: "" # Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix) destination_prefix: "" @@ -607,6 +608,9 @@ retry_poll_backoff_ms: 100 # Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h. retry_schedule: [] +# Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30 +retry_visibility_timeout_seconds: 30 + # Specifies the service type to run. Valid values: 'api', 'log', 'delivery', or empty/all for singular mode (runs all services). service: "" diff --git a/go.mod b/go.mod index c2de85c1..48572390 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,6 @@ require ( github.com/testcontainers/testcontainers-go/modules/localstack v0.36.0 github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0 github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0 - github.com/testcontainers/testcontainers-go/modules/redis v0.36.0 github.com/uptrace/opentelemetry-go-extra/otelzap v0.3.1 github.com/urfave/cli/v3 v3.4.1 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.53.0 diff --git a/go.sum b/go.sum index 6c635117..af9e1f8a 100644 --- a/go.sum +++ b/go.sum @@ -906,8 +906,6 @@ github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4 github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -1288,8 +1286,6 @@ github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0 h1:xTGNNsOD github.com/testcontainers/testcontainers-go/modules/postgres v0.36.0/go.mod h1:WKS3MGq1lzbVibIRnL08TOaf5bKWPxJe5frzyQfV4oY= github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0 h1:gobSVNvTsiJTcGTlVJMpeUfAcz85HAMMwo8xEVQZItE= github.com/testcontainers/testcontainers-go/modules/rabbitmq v0.36.0/go.mod h1:rLtFlrLEWcU/Ud52FiGk57QvUqoAHvR380hZo+tkBaI= -github.com/testcontainers/testcontainers-go/modules/redis v0.36.0 h1:Z+6APQ0DjQP8Kj5Fu+lkAlH2v7f5QkAQyyjnf1Kq8sw= -github.com/testcontainers/testcontainers-go/modules/redis v0.36.0/go.mod h1:LV66RJhSMikZrxJRc6O0nKcRqykmjQSyX82S93haE2w= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= diff --git a/internal/alert/evaluator.go b/internal/alert/evaluator.go index 60562b5b..92386e96 100644 --- a/internal/alert/evaluator.go +++ b/internal/alert/evaluator.go @@ -39,7 +39,6 @@ func NewAlertEvaluator(thresholds []int, autoDisableFailureCount int) AlertEvalu }) } - // Sort by failure count sort.Slice(finalThresholds, func(i, j int) bool { return finalThresholds[i].failures < finalThresholds[j].failures }) // Check if we need to add 100 diff --git a/internal/alert/monitor.go b/internal/alert/monitor.go index 8e4b5934..39ff54f4 100644 --- a/internal/alert/monitor.go +++ b/internal/alert/monitor.go @@ -148,7 +148,6 @@ func (m *alertMonitor) HandleAttempt(ctx context.Context, attempt DeliveryAttemp return fmt.Errorf("failed to get alert state: %w", err) } - // Check if we should send an alert level, shouldAlert := m.evaluator.ShouldAlert(count) if !shouldAlert { return nil diff --git a/internal/alert/notifier.go b/internal/alert/notifier.go index c0c8775f..ef9589bc 100644 --- a/internal/alert/notifier.go +++ b/internal/alert/notifier.go @@ -110,32 +110,27 @@ func NewHTTPAlertNotifier(callbackURL string, opts ...NotifierOption) AlertNotif } func (n *httpAlertNotifier) Notify(ctx context.Context, alert Alert) error { - // Marshal alert to JSON body, err := alert.MarshalJSON() if err != nil { return fmt.Errorf("failed to marshal alert: %w", err) } - // Create request req, err := http.NewRequestWithContext(ctx, http.MethodPost, n.callbackURL, bytes.NewReader(body)) if err != nil { return fmt.Errorf("failed to create request: %w", err) } - // Set headers req.Header.Set("Content-Type", "application/json") if n.bearerToken != "" { req.Header.Set("Authorization", "Bearer "+n.bearerToken) } - // Send request resp, err := n.client.Do(req) if err != nil { return fmt.Errorf("failed to send alert: %w", err) } defer resp.Body.Close() - // Check response status if resp.StatusCode >= 400 { return fmt.Errorf("alert callback failed with status %d", resp.StatusCode) } diff --git a/internal/alert/store.go b/internal/alert/store.go index 9aaf2424..3bc3a0a4 100644 --- a/internal/alert/store.go +++ b/internal/alert/store.go @@ -42,7 +42,6 @@ func (s *redisAlertStore) IncrementConsecutiveFailureCount(ctx context.Context, incrCmd := pipe.Incr(ctx, key) pipe.Expire(ctx, key, 24*time.Hour) - // Execute the transaction _, err := pipe.Exec(ctx) if err != nil { return 0, fmt.Errorf("failed to execute consecutive failure count transaction: %w", err) diff --git a/internal/apirouter/log_handlers.go b/internal/apirouter/log_handlers.go index 478a4460..a50a16fc 100644 --- a/internal/apirouter/log_handlers.go +++ b/internal/apirouter/log_handlers.go @@ -86,8 +86,8 @@ func parseIncludeOptions(c *gin.Context) IncludeOptions { // API Response types -// APIDelivery is the API response for a delivery -type APIDelivery struct { +// APIAttempt is the API response for an attempt +type APIAttempt struct { ID string `json:"id"` Status string `json:"status"` DeliveredAt time.Time `json:"delivered_at"` @@ -130,9 +130,9 @@ type APIEvent struct { Data map[string]interface{} `json:"data,omitempty"` } -// DeliveryPaginatedResult is the paginated response for listing deliveries. -type DeliveryPaginatedResult struct { - Models []APIDelivery `json:"models"` +// AttemptPaginatedResult is the paginated response for listing attempts. +type AttemptPaginatedResult struct { + Models []APIAttempt `json:"models"` Pagination SeekPagination `json:"pagination"` } @@ -142,47 +142,47 @@ type EventPaginatedResult struct { Pagination SeekPagination `json:"pagination"` } -// toAPIDelivery converts a DeliveryRecord to APIDelivery with expand options -func toAPIDelivery(dr *logstore.DeliveryRecord, opts IncludeOptions) APIDelivery { - api := APIDelivery{ - Attempt: dr.Delivery.Attempt, - Manual: dr.Delivery.Manual, - Destination: dr.Delivery.DestinationID, +// toAPIAttempt converts an AttemptRecord to APIAttempt with expand options +func toAPIAttempt(ar *logstore.AttemptRecord, opts IncludeOptions) APIAttempt { + api := APIAttempt{ + Attempt: ar.Attempt.AttemptNumber, + Manual: ar.Attempt.Manual, + Destination: ar.Attempt.DestinationID, } - if dr.Delivery != nil { - api.ID = dr.Delivery.ID - api.Status = dr.Delivery.Status - api.DeliveredAt = dr.Delivery.Time - api.Code = dr.Delivery.Code + if ar.Attempt != nil { + api.ID = ar.Attempt.ID + api.Status = ar.Attempt.Status + api.DeliveredAt = ar.Attempt.Time + api.Code = ar.Attempt.Code if opts.ResponseData { - api.ResponseData = dr.Delivery.ResponseData + api.ResponseData = ar.Attempt.ResponseData } } - if dr.Event != nil { + if ar.Event != nil { if opts.EventData { api.Event = APIEventFull{ - ID: dr.Event.ID, - Topic: dr.Event.Topic, - Time: dr.Event.Time, - EligibleForRetry: dr.Event.EligibleForRetry, - Metadata: dr.Event.Metadata, - Data: dr.Event.Data, + ID: ar.Event.ID, + Topic: ar.Event.Topic, + Time: ar.Event.Time, + EligibleForRetry: ar.Event.EligibleForRetry, + Metadata: ar.Event.Metadata, + Data: ar.Event.Data, } } else if opts.Event { api.Event = APIEventSummary{ - ID: dr.Event.ID, - Topic: dr.Event.Topic, - Time: dr.Event.Time, - EligibleForRetry: dr.Event.EligibleForRetry, - Metadata: dr.Event.Metadata, + ID: ar.Event.ID, + Topic: ar.Event.Topic, + Time: ar.Event.Time, + EligibleForRetry: ar.Event.EligibleForRetry, + Metadata: ar.Event.Metadata, } } else { - api.Event = dr.Event.ID + api.Event = ar.Event.ID } } else { - api.Event = dr.Delivery.EventID + api.Event = ar.Attempt.EventID } // TODO: Handle destination expansion @@ -193,17 +193,17 @@ func toAPIDelivery(dr *logstore.DeliveryRecord, opts IncludeOptions) APIDelivery return api } -// ListDeliveries handles GET /:tenantID/deliveries +// ListAttempts handles GET /:tenantID/attempts // Query params: event_id, destination_id, status, topic[], start, end, limit, next, prev, expand[], sort_order -func (h *LogHandlers) ListDeliveries(c *gin.Context) { +func (h *LogHandlers) ListAttempts(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - h.listDeliveriesInternal(c, tenant.ID) + h.listAttemptsInternal(c, tenant.ID) } -func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { +func (h *LogHandlers) listAttemptsInternal(c *gin.Context, tenantID string) { // Parse and validate cursors (next/prev are mutually exclusive) cursors, errResp := ParseCursors(c) if errResp != nil { @@ -234,7 +234,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { _ = orderBy // Parse time date filters - deliveryTimeFilter, errResp := ParseDateFilter(c, "time") + attemptTimeFilter, errResp := ParseDateFilter(c, "time") if errResp != nil { AbortWithError(c, errResp.Code, *errResp) return @@ -247,17 +247,17 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { destinationIDs = []string{destID} } - req := logstore.ListDeliveryRequest{ + req := logstore.ListAttemptRequest{ TenantID: tenantID, EventID: c.Query("event_id"), DestinationIDs: destinationIDs, Status: c.Query("status"), Topics: parseQueryArray(c, "topic"), TimeFilter: logstore.TimeFilter{ - GTE: deliveryTimeFilter.GTE, - LTE: deliveryTimeFilter.LTE, - GT: deliveryTimeFilter.GT, - LT: deliveryTimeFilter.LT, + GTE: attemptTimeFilter.GTE, + LTE: attemptTimeFilter.LTE, + GT: attemptTimeFilter.GT, + LT: attemptTimeFilter.LT, }, Limit: limit, Next: cursors.Next, @@ -265,7 +265,7 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { SortOrder: dir, } - response, err := h.logStore.ListDelivery(c.Request.Context(), req) + response, err := h.logStore.ListAttempt(c.Request.Context(), req) if err != nil { if errors.Is(err, cursor.ErrInvalidCursor) || errors.Is(err, cursor.ErrVersionMismatch) { AbortWithError(c, http.StatusBadRequest, NewErrBadRequest(err)) @@ -277,13 +277,13 @@ func (h *LogHandlers) listDeliveriesInternal(c *gin.Context, tenantID string) { includeOpts := parseIncludeOptions(c) - apiDeliveries := make([]APIDelivery, len(response.Data)) - for i, de := range response.Data { - apiDeliveries[i] = toAPIDelivery(de, includeOpts) + apiAttempts := make([]APIAttempt, len(response.Data)) + for i, ar := range response.Data { + apiAttempts[i] = toAPIAttempt(ar, includeOpts) } - c.JSON(http.StatusOK, DeliveryPaginatedResult{ - Models: apiDeliveries, + c.JSON(http.StatusOK, AttemptPaginatedResult{ + Models: apiAttempts, Pagination: SeekPagination{ OrderBy: orderBy, Dir: dir, @@ -323,30 +323,30 @@ func (h *LogHandlers) RetrieveEvent(c *gin.Context) { }) } -// RetrieveDelivery handles GET /:tenantID/deliveries/:deliveryID -func (h *LogHandlers) RetrieveDelivery(c *gin.Context) { +// RetrieveAttempt handles GET /:tenantID/attempts/:attemptID +func (h *LogHandlers) RetrieveAttempt(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - deliveryID := c.Param("deliveryID") + attemptID := c.Param("attemptID") - deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ - TenantID: tenant.ID, - DeliveryID: deliveryID, + attemptRecord, err := h.logStore.RetrieveAttempt(c.Request.Context(), logstore.RetrieveAttemptRequest{ + TenantID: tenant.ID, + AttemptID: attemptID, }) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryRecord == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) + if attemptRecord == nil { + AbortWithError(c, http.StatusNotFound, NewErrNotFound("attempt")) return } includeOpts := parseIncludeOptions(c) - c.JSON(http.StatusOK, toAPIDelivery(deliveryRecord, includeOpts)) + c.JSON(http.StatusOK, toAPIAttempt(attemptRecord, includeOpts)) } // AdminListEvents handles GET /events (admin-only, cross-tenant) @@ -355,10 +355,10 @@ func (h *LogHandlers) AdminListEvents(c *gin.Context) { h.listEventsInternal(c, c.Query("tenant_id")) } -// AdminListDeliveries handles GET /deliveries (admin-only, cross-tenant) +// AdminListAttempts handles GET /attempts (admin-only, cross-tenant) // Query params: tenant_id (optional), event_id, destination_id, status, topic[], start, end, limit, next, prev, expand[], sort_order -func (h *LogHandlers) AdminListDeliveries(c *gin.Context) { - h.listDeliveriesInternal(c, c.Query("tenant_id")) +func (h *LogHandlers) AdminListAttempts(c *gin.Context) { + h.listAttemptsInternal(c, c.Query("tenant_id")) } // ListEvents handles GET /:tenantID/events diff --git a/internal/apirouter/log_handlers_test.go b/internal/apirouter/log_handlers_test.go index 693d04e2..431e9fdb 100644 --- a/internal/apirouter/log_handlers_test.go +++ b/internal/apirouter/log_handlers_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestListDeliveries(t *testing.T) { +func TestListAttempts(t *testing.T) { t.Parallel() result := setupTestRouterFull(t, "", "") @@ -35,9 +35,9 @@ func TestListDeliveries(t *testing.T) { CreatedAt: time.Now(), })) - t.Run("should return empty list when no deliveries", func(t *testing.T) { + t.Run("should return empty list when no attempts", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -49,12 +49,12 @@ func TestListDeliveries(t *testing.T) { assert.Len(t, data, 0) }) - t.Run("should list deliveries", func(t *testing.T) { - // Seed delivery events + t.Run("should list attempts", func(t *testing.T) { + // Seed attempt events eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -64,18 +64,18 @@ func TestListDeliveries(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -86,16 +86,16 @@ func TestListDeliveries(t *testing.T) { data := response["models"].([]interface{}) assert.Len(t, data, 1) - firstDelivery := data[0].(map[string]interface{}) - assert.Equal(t, deliveryID, firstDelivery["id"]) - assert.Equal(t, "success", firstDelivery["status"]) - assert.Equal(t, eventID, firstDelivery["event"]) // Not included - assert.Equal(t, destinationID, firstDelivery["destination"]) + firstAttempt := data[0].(map[string]interface{}) + assert.Equal(t, attemptID, firstAttempt["id"]) + assert.Equal(t, "success", firstAttempt["status"]) + assert.Equal(t, eventID, firstAttempt["event"]) // Not included + assert.Equal(t, destinationID, firstAttempt["destination"]) }) t.Run("should include event when include=event", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -106,8 +106,8 @@ func TestListDeliveries(t *testing.T) { data := response["models"].([]interface{}) require.Len(t, data, 1) - firstDelivery := data[0].(map[string]interface{}) - event := firstDelivery["event"].(map[string]interface{}) + firstAttempt := data[0].(map[string]interface{}) + event := firstAttempt["event"].(map[string]interface{}) assert.NotNil(t, event["id"]) assert.Equal(t, "user.created", event["topic"]) // data should not be present without include=event.data @@ -116,7 +116,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should include event.data when include=event.data", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event.data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event.data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -127,15 +127,15 @@ func TestListDeliveries(t *testing.T) { data := response["models"].([]interface{}) require.Len(t, data, 1) - firstDelivery := data[0].(map[string]interface{}) - event := firstDelivery["event"].(map[string]interface{}) + firstAttempt := data[0].(map[string]interface{}) + event := firstAttempt["event"].(map[string]interface{}) assert.NotNil(t, event["id"]) assert.NotNil(t, event["data"]) // data should be present }) t.Run("should filter by destination_id", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?destination_id="+destinationID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?destination_id="+destinationID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -149,7 +149,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should filter by non-existent destination_id", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?destination_id=nonexistent", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?destination_id=nonexistent", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -163,7 +163,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -171,7 +171,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should exclude response_data by default", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -182,16 +182,16 @@ func TestListDeliveries(t *testing.T) { data := response["models"].([]interface{}) require.Len(t, data, 1) - firstDelivery := data[0].(map[string]interface{}) - assert.Nil(t, firstDelivery["response_data"]) + firstAttempt := data[0].(map[string]interface{}) + assert.Nil(t, firstAttempt["response_data"]) }) t.Run("should include response_data with include=response_data", func(t *testing.T) { - // Seed a delivery with response_data + // Seed an attempt with response_data eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-30 * time.Minute).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -201,22 +201,22 @@ func TestListDeliveries(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - delivery.ResponseData = map[string]interface{}{ + attempt.ResponseData = map[string]interface{}{ "body": "OK", "status": float64(200), } - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=response_data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=response_data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -225,25 +225,25 @@ func TestListDeliveries(t *testing.T) { require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) data := response["models"].([]interface{}) - // Find the delivery we just created - var foundDelivery map[string]interface{} + // Find the attempt we just created + var foundAttempt map[string]interface{} for _, d := range data { - del := d.(map[string]interface{}) - if del["id"] == deliveryID { - foundDelivery = del + atm := d.(map[string]interface{}) + if atm["id"] == attemptID { + foundAttempt = atm break } } - require.NotNil(t, foundDelivery, "delivery not found in response") - require.NotNil(t, foundDelivery["response_data"], "response_data should be included") - respData := foundDelivery["response_data"].(map[string]interface{}) + require.NotNil(t, foundAttempt, "attempt not found in response") + require.NotNil(t, foundAttempt["response_data"], "response_data should be included") + respData := foundAttempt["response_data"].(map[string]interface{}) assert.Equal(t, "OK", respData["body"]) assert.Equal(t, float64(200), respData["status"]) }) t.Run("should support comma-separated include param", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?include=event,response_data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?include=event,response_data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -254,16 +254,16 @@ func TestListDeliveries(t *testing.T) { data := response["models"].([]interface{}) require.GreaterOrEqual(t, len(data), 1) - firstDelivery := data[0].(map[string]interface{}) + firstAttempt := data[0].(map[string]interface{}) // event should be included (object, not string) - event := firstDelivery["event"].(map[string]interface{}) + event := firstAttempt["event"].(map[string]interface{}) assert.NotNil(t, event["id"]) assert.NotNil(t, event["topic"]) }) t.Run("should return validation error for invalid dir", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?dir=invalid", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?dir=invalid", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnprocessableEntity, w.Code) @@ -271,7 +271,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should accept valid dir param", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?dir=asc", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?dir=asc", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -279,7 +279,7 @@ func TestListDeliveries(t *testing.T) { t.Run("should cap limit at 1000", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries?limit=5000", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts?limit=5000", nil) result.router.ServeHTTP(w, req) // Should succeed, limit is silently capped @@ -287,7 +287,7 @@ func TestListDeliveries(t *testing.T) { }) } -func TestRetrieveDelivery(t *testing.T) { +func TestRetrieveAttempt(t *testing.T) { t.Parallel() result := setupTestRouterFull(t, "", "") @@ -307,11 +307,11 @@ func TestRetrieveDelivery(t *testing.T) { CreatedAt: time.Now(), })) - // Seed a delivery event + // Seed an attempt event eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -321,19 +321,19 @@ func TestRetrieveDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) - t.Run("should retrieve delivery by ID", func(t *testing.T) { + t.Run("should retrieve attempt by ID", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+attemptID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -341,7 +341,7 @@ func TestRetrieveDelivery(t *testing.T) { var response map[string]interface{} require.NoError(t, json.Unmarshal(w.Body.Bytes(), &response)) - assert.Equal(t, deliveryID, response["id"]) + assert.Equal(t, attemptID, response["id"]) assert.Equal(t, "failed", response["status"]) assert.Equal(t, eventID, response["event"]) // Not included assert.Equal(t, destinationID, response["destination"]) @@ -349,7 +349,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should include event when include=event", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"?include=event", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+attemptID+"?include=event", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -366,7 +366,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should include event.data when include=event.data", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"?include=event.data", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+attemptID+"?include=event.data", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) @@ -379,9 +379,9 @@ func TestRetrieveDelivery(t *testing.T) { assert.NotNil(t, event["data"]) // data should be present }) - t.Run("should return 404 for non-existent delivery", func(t *testing.T) { + t.Run("should return 404 for non-existent attempt", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/deliveries/nonexistent", nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/attempts/nonexistent", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -389,7 +389,7 @@ func TestRetrieveDelivery(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/deliveries/"+deliveryID, nil) + req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/nonexistent/attempts/"+attemptID, nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -416,11 +416,11 @@ func TestRetrieveEvent(t *testing.T) { CreatedAt: time.Now(), })) - // Seed a delivery event + // Seed an attempt event eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -436,15 +436,15 @@ func TestRetrieveEvent(t *testing.T) { }), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("should retrieve event by ID", func(t *testing.T) { w := httptest.NewRecorder() @@ -516,11 +516,11 @@ func TestListEvents(t *testing.T) { }) t.Run("should list events", func(t *testing.T) { - // Seed delivery events + // Seed attempt events eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -533,15 +533,15 @@ func TestListEvents(t *testing.T) { }), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) w := httptest.NewRecorder() req, _ := http.NewRequest("GET", baseAPIPath+"/tenants/"+tenantID+"/events", nil) diff --git a/internal/apirouter/logger_middleware_integration_test.go b/internal/apirouter/logger_middleware_integration_test.go index 2d44ea86..392b1f94 100644 --- a/internal/apirouter/logger_middleware_integration_test.go +++ b/internal/apirouter/logger_middleware_integration_test.go @@ -65,7 +65,7 @@ func (r *mockRegistry) CreatePublisher(ctx context.Context, destination *models. return nil, fmt.Errorf("not implemented") } -func (r *mockRegistry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (r *mockRegistry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { return nil, fmt.Errorf("not implemented") } diff --git a/internal/apirouter/retry_handlers.go b/internal/apirouter/retry_handlers.go index ec2844c6..f52f99d3 100644 --- a/internal/apirouter/retry_handlers.go +++ b/internal/apirouter/retry_handlers.go @@ -32,33 +32,33 @@ func NewRetryHandlers( } } -// RetryDelivery handles POST /:tenantID/deliveries/:deliveryID/retry +// RetryAttempt handles POST /:tenantID/attempts/:attemptID/retry // Constraints: -// - Only the latest delivery for an event+destination pair can be retried +// - Only the latest attempt for an event+destination pair can be retried // - Destination must exist and be enabled -func (h *RetryHandlers) RetryDelivery(c *gin.Context) { +func (h *RetryHandlers) RetryAttempt(c *gin.Context) { tenant := mustTenantFromContext(c) if tenant == nil { return } - deliveryID := c.Param("deliveryID") + attemptID := c.Param("attemptID") - // 1. Look up delivery by ID - deliveryRecord, err := h.logStore.RetrieveDelivery(c.Request.Context(), logstore.RetrieveDeliveryRequest{ - TenantID: tenant.ID, - DeliveryID: deliveryID, + // 1. Look up attempt by ID + attemptRecord, err := h.logStore.RetrieveAttempt(c.Request.Context(), logstore.RetrieveAttemptRequest{ + TenantID: tenant.ID, + AttemptID: attemptID, }) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return } - if deliveryRecord == nil { - AbortWithError(c, http.StatusNotFound, NewErrNotFound("delivery")) + if attemptRecord == nil { + AbortWithError(c, http.StatusNotFound, NewErrNotFound("attempt")) return } // 2. Check destination exists and is enabled - destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, deliveryRecord.Delivery.DestinationID) + destination, err := h.entityStore.RetrieveDestination(c.Request.Context(), tenant.ID, attemptRecord.Attempt.DestinationID) if err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) return @@ -79,7 +79,7 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { } // 3. Create and publish manual delivery task - task := models.NewManualDeliveryTask(*deliveryRecord.Event, deliveryRecord.Delivery.DestinationID) + task := models.NewManualDeliveryTask(*attemptRecord.Event, attemptRecord.Attempt.DestinationID) if err := h.deliveryMQ.Publish(c.Request.Context(), task); err != nil { AbortWithError(c, http.StatusInternalServerError, NewErrInternalServer(err)) @@ -87,10 +87,10 @@ func (h *RetryHandlers) RetryDelivery(c *gin.Context) { } h.logger.Ctx(c.Request.Context()).Audit("manual retry initiated", - zap.String("delivery_id", deliveryID), - zap.String("event_id", deliveryRecord.Event.ID), + zap.String("attempt_id", attemptID), + zap.String("event_id", attemptRecord.Event.ID), zap.String("tenant_id", tenant.ID), - zap.String("destination_id", deliveryRecord.Delivery.DestinationID), + zap.String("destination_id", attemptRecord.Attempt.DestinationID), zap.String("destination_type", destination.Type)) c.JSON(http.StatusAccepted, gin.H{ diff --git a/internal/apirouter/retry_handlers_test.go b/internal/apirouter/retry_handlers_test.go index 170b147c..74c7c65e 100644 --- a/internal/apirouter/retry_handlers_test.go +++ b/internal/apirouter/retry_handlers_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestRetryDelivery(t *testing.T) { +func TestRetryAttempt(t *testing.T) { t.Parallel() result := setupTestRouterFull(t, "", "") @@ -35,11 +35,11 @@ func TestRetryDelivery(t *testing.T) { CreatedAt: time.Now(), })) - // Seed a delivery event + // Seed an attempt event eventID := idgen.Event() - deliveryID := idgen.Delivery() + attemptID := idgen.Attempt() eventTime := time.Now().Add(-1 * time.Hour).Truncate(time.Millisecond) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(eventID), @@ -49,17 +49,17 @@ func TestRetryDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(deliveryID), - testutil.DeliveryFactory.WithEventID(eventID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(attemptID), + testutil.AttemptFactory.WithEventID(eventID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: event, Attempt: attempt}})) - t.Run("should retry delivery successfully with full event data", func(t *testing.T) { + t.Run("should retry attempt successfully with full event data", func(t *testing.T) { // Subscribe to deliveryMQ to capture published task ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -69,7 +69,7 @@ func TestRetryDelivery(t *testing.T) { // Trigger manual retry w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+deliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+attemptID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusAccepted, w.Code) @@ -97,9 +97,9 @@ func TestRetryDelivery(t *testing.T) { msg.Ack() }) - t.Run("should return 404 for non-existent delivery", func(t *testing.T) { + t.Run("should return 404 for non-existent attempt", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/nonexistent/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/nonexistent/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -107,7 +107,7 @@ func TestRetryDelivery(t *testing.T) { t.Run("should return 404 for non-existent tenant", func(t *testing.T) { w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/nonexistent/deliveries/"+deliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/nonexistent/attempts/"+attemptID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -126,9 +126,9 @@ func TestRetryDelivery(t *testing.T) { DisabledAt: &disabledAt, })) - // Create a delivery for the disabled destination + // Create an attempt for the disabled destination disabledEventID := idgen.Event() - disabledDeliveryID := idgen.Delivery() + disabledAttemptID := idgen.Attempt() disabledEvent := testutil.EventFactory.AnyPointer( testutil.EventFactory.WithID(disabledEventID), @@ -138,18 +138,18 @@ func TestRetryDelivery(t *testing.T) { testutil.EventFactory.WithTime(eventTime), ) - disabledDelivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(disabledDeliveryID), - testutil.DeliveryFactory.WithEventID(disabledEventID), - testutil.DeliveryFactory.WithDestinationID(disabledDestinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(deliveryTime), + disabledAttempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(disabledAttemptID), + testutil.AttemptFactory.WithEventID(disabledEventID), + testutil.AttemptFactory.WithDestinationID(disabledDestinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(attemptTime), ) - require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: disabledEvent, Delivery: disabledDelivery}})) + require.NoError(t, result.logStore.InsertMany(context.Background(), []*models.LogEntry{{Event: disabledEvent, Attempt: disabledAttempt}})) w := httptest.NewRecorder() - req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/deliveries/"+disabledDeliveryID+"/retry", nil) + req, _ := http.NewRequest("POST", baseAPIPath+"/tenants/"+tenantID+"/attempts/"+disabledAttemptID+"/retry", nil) result.router.ServeHTTP(w, req) assert.Equal(t, http.StatusBadRequest, w.Code) diff --git a/internal/apirouter/router.go b/internal/apirouter/router.go index 8d42539c..a3fad430 100644 --- a/internal/apirouter/router.go +++ b/internal/apirouter/router.go @@ -170,8 +170,8 @@ func NewRouter( }, { Method: http.MethodGet, - Path: "/deliveries", - Handler: logHandlers.AdminListDeliveries, + Path: "/attempts", + Handler: logHandlers.AdminListAttempts, AuthScope: AuthScopeAdmin, Mode: RouteModeAlways, }, @@ -353,11 +353,11 @@ func NewRouter( }, }, - // Delivery routes + // Attempt routes { Method: http.MethodGet, - Path: "/:tenantID/deliveries", - Handler: logHandlers.ListDeliveries, + Path: "/:tenantID/attempts", + Handler: logHandlers.ListAttempts, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ @@ -366,8 +366,8 @@ func NewRouter( }, { Method: http.MethodGet, - Path: "/:tenantID/deliveries/:deliveryID", - Handler: logHandlers.RetrieveDelivery, + Path: "/:tenantID/attempts/:attemptID", + Handler: logHandlers.RetrieveAttempt, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ @@ -376,8 +376,8 @@ func NewRouter( }, { Method: http.MethodPost, - Path: "/:tenantID/deliveries/:deliveryID/retry", - Handler: retryHandlers.RetryDelivery, + Path: "/:tenantID/attempts/:attemptID/retry", + Handler: retryHandlers.RetryAttempt, AuthScope: AuthScopeAdminOrTenant, Mode: RouteModeAlways, Middlewares: []gin.HandlerFunc{ diff --git a/internal/app/app.go b/internal/app/app.go index 3dd86e61..8263ba92 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -121,21 +121,17 @@ func (a *App) PostRun(ctx context.Context) { } func (a *App) run(ctx context.Context) error { - // Set up cancellation context ctx, cancel := context.WithCancel(ctx) defer cancel() - // Handle sigterm and await termChan signal termChan := make(chan os.Signal, 1) signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM) - // Run workers in goroutine errChan := make(chan error, 1) go func() { errChan <- a.supervisor.Run(ctx) }() - // Wait for either termination signal or worker failure var exitErr error select { case <-termChan: @@ -175,13 +171,13 @@ func (a *App) configureIDGenerators() error { zap.String("type", a.config.IDGen.Type), zap.String("event_prefix", a.config.IDGen.EventPrefix), zap.String("destination_prefix", a.config.IDGen.DestinationPrefix), - zap.String("delivery_prefix", a.config.IDGen.DeliveryPrefix)) + zap.String("attempt_prefix", a.config.IDGen.AttemptPrefix)) if err := idgen.Configure(idgen.IDGenConfig{ Type: a.config.IDGen.Type, EventPrefix: a.config.IDGen.EventPrefix, DestinationPrefix: a.config.IDGen.DestinationPrefix, - DeliveryPrefix: a.config.IDGen.DeliveryPrefix, + AttemptPrefix: a.config.IDGen.AttemptPrefix, }); err != nil { a.logger.Error("failed to configure ID generators", zap.Error(err)) return err @@ -202,7 +198,6 @@ func (a *App) initializeRedis(ctx context.Context) error { } a.redisClient = redisClient - // Run Redis schema migrations if err := runRedisMigrations(ctx, redisClient, a.logger, a.config.DeploymentID); err != nil { a.logger.Error("Redis migration failed", zap.Error(err)) return err diff --git a/internal/app/installation.go b/internal/app/installation.go index 9019e7ea..3c43a097 100644 --- a/internal/app/installation.go +++ b/internal/app/installation.go @@ -21,12 +21,10 @@ func getInstallation(ctx context.Context, redisClient redis.Cmdable, telemetryCo // First attempt: try to get existing installation ID installationID, err := redisClient.HGet(ctx, outpostrcKey, installationKey).Result() if err == nil { - // Installation ID already exists return installationID, nil } if err != redis.Nil { - // Unexpected error return "", err } @@ -41,7 +39,6 @@ func getInstallation(ctx context.Context, redisClient redis.Cmdable, telemetryCo } if wasSet { - // We successfully set the installation ID return newInstallationID, nil } diff --git a/internal/app/migration.go b/internal/app/migration.go index 9938b659..eaa7b543 100644 --- a/internal/app/migration.go +++ b/internal/app/migration.go @@ -51,7 +51,6 @@ func runMigration(ctx context.Context, cfg *config.Config, logger *logging.Logge } if err == nil { - // Migration succeeded if versionJumped > 0 { logger.Info("migrations applied", zap.Int("version", version), @@ -88,7 +87,6 @@ func runMigration(ctx context.Context, cfg *config.Config, logger *logging.Logge case <-ctx.Done(): return ctx.Err() case <-time.After(retryDelay): - // Continue to next attempt } } else { // Exhausted all retries diff --git a/internal/app/redis_migration.go b/internal/app/redis_migration.go index 97aa7ddd..70d52782 100644 --- a/internal/app/redis_migration.go +++ b/internal/app/redis_migration.go @@ -35,7 +35,6 @@ func runRedisMigrations(ctx context.Context, redisClient redis.Cmdable, logger * return nil } - // Check if this is a lock-related error isLockError := isRedisLockError(err) lastErr = err @@ -56,7 +55,6 @@ func runRedisMigrations(ctx context.Context, redisClient redis.Cmdable, logger * case <-ctx.Done(): return ctx.Err() case <-time.After(retryDelay): - // Continue to next attempt } } else { logger.Error("redis migration failed after retries", @@ -70,7 +68,6 @@ func runRedisMigrations(ctx context.Context, redisClient redis.Cmdable, logger * // executeRedisMigrations creates the runner and executes migrations func executeRedisMigrations(ctx context.Context, redisClient redis.Cmdable, logger *logging.Logger, deploymentID string) error { - // Create runner client, ok := redisClient.(redis.Client) if !ok { // Wrap Cmdable to implement Client interface diff --git a/internal/config/config.go b/internal/config/config.go index 035c20c4..35fed1d5 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -76,11 +76,11 @@ type Config struct { LogMaxConcurrency int `yaml:"log_max_concurrency" env:"LOG_MAX_CONCURRENCY" desc:"Maximum number of log writing operations to process concurrently." required:"N"` // Delivery Retry - RetrySchedule []int `yaml:"retry_schedule" env:"RETRY_SCHEDULE" envSeparator:"," desc:"Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h." required:"N"` - RetryIntervalSeconds int `yaml:"retry_interval_seconds" env:"RETRY_INTERVAL_SECONDS" desc:"Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided." required:"N"` - RetryMaxLimit int `yaml:"retry_max_limit" env:"MAX_RETRY_LIMIT" desc:"Maximum number of retry attempts for a single event delivery before giving up. Ignored if retry_schedule is provided." required:"N"` - RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` - RetryVisibilityTimeoutSeconds int `yaml:"retry_visibility_timeout_seconds" env:"RETRY_VISIBILITY_TIMEOUT_SECONDS" desc:"Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30" required:"N"` + RetrySchedule []int `yaml:"retry_schedule" env:"RETRY_SCHEDULE" envSeparator:"," desc:"Comma-separated list of retry delays in seconds. If provided, overrides retry_interval_seconds and retry_max_limit. Schedule length defines the max number of retries. Example: '5,60,600,3600,7200' for 5 retries at 5s, 1m, 10m, 1h, 2h." required:"N"` + RetryIntervalSeconds int `yaml:"retry_interval_seconds" env:"RETRY_INTERVAL_SECONDS" desc:"Interval in seconds for exponential backoff retry strategy (base 2). Ignored if retry_schedule is provided." required:"N"` + RetryMaxLimit int `yaml:"retry_max_limit" env:"MAX_RETRY_LIMIT" desc:"Maximum number of retry attempts for a single event delivery before giving up. Ignored if retry_schedule is provided." required:"N"` + RetryPollBackoffMs int `yaml:"retry_poll_backoff_ms" env:"RETRY_POLL_BACKOFF_MS" desc:"Backoff time in milliseconds when the retry monitor finds no messages to process. When a retry message is found, the monitor immediately polls for the next message without delay. Lower values provide faster retry processing but increase Redis load. For serverless Redis providers (Upstash, ElastiCache Serverless), consider increasing to 5000-10000ms to reduce costs. Default: 100" required:"N"` + RetryVisibilityTimeoutSeconds int `yaml:"retry_visibility_timeout_seconds" env:"RETRY_VISIBILITY_TIMEOUT_SECONDS" desc:"Time in seconds a retry message is hidden after being received before becoming visible again for reprocessing. This applies when event data is temporarily unavailable (e.g., race condition with log persistence). Default: 30" required:"N"` // Event Delivery MaxDestinationsPerTenant int `yaml:"max_destinations_per_tenant" env:"MAX_DESTINATIONS_PER_TENANT" desc:"Maximum number of destinations allowed per tenant/organization." required:"N"` diff --git a/internal/config/id_gen.go b/internal/config/id_gen.go index ec556b75..b5a470d3 100644 --- a/internal/config/id_gen.go +++ b/internal/config/id_gen.go @@ -5,5 +5,5 @@ type IDGenConfig struct { Type string `yaml:"type" env:"IDGEN_TYPE" desc:"ID generation type for all entities: uuidv4, uuidv7, nanoid. Default: uuidv4" required:"N"` EventPrefix string `yaml:"event_prefix" env:"IDGEN_EVENT_PREFIX" desc:"Prefix for event IDs, prepended with underscore (e.g., 'evt_123'). Default: empty (no prefix)" required:"N"` DestinationPrefix string `yaml:"destination_prefix" env:"IDGEN_DESTINATION_PREFIX" desc:"Prefix for destination IDs, prepended with underscore (e.g., 'dst_123'). Default: empty (no prefix)" required:"N"` - DeliveryPrefix string `yaml:"delivery_prefix" env:"IDGEN_DELIVERY_PREFIX" desc:"Prefix for delivery IDs, prepended with underscore (e.g., 'dlv_123'). Default: empty (no prefix)" required:"N"` + AttemptPrefix string `yaml:"attempt_prefix" env:"IDGEN_ATTEMPT_PREFIX" desc:"Prefix for attempt IDs, prepended with underscore (e.g., 'atm_123'). Default: empty (no prefix)" required:"N"` } diff --git a/internal/deliverymq/messagehandler.go b/internal/deliverymq/messagehandler.go index f005e047..cf91c06d 100644 --- a/internal/deliverymq/messagehandler.go +++ b/internal/deliverymq/messagehandler.go @@ -40,15 +40,15 @@ func (e *PreDeliveryError) Unwrap() error { return e.err } -type DeliveryError struct { +type AttemptError struct { err error } -func (e *DeliveryError) Error() string { - return fmt.Sprintf("delivery error: %v", e.err) +func (e *AttemptError) Error() string { + return fmt.Sprintf("attempt error: %v", e.err) } -func (e *DeliveryError) Unwrap() error { +func (e *AttemptError) Unwrap() error { return e.err } @@ -78,7 +78,7 @@ type messageHandler struct { } type Publisher interface { - PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) + PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) } type LogPublisher interface { @@ -131,7 +131,6 @@ func NewMessageHandler( func (h *messageHandler) Handle(ctx context.Context, msg *mqs.Message) error { task := models.DeliveryTask{} - // Parse message if err := task.FromMessage(msg); err != nil { return h.handleError(msg, &PreDeliveryError{err: err}) } @@ -142,13 +141,11 @@ func (h *messageHandler) Handle(ctx context.Context, msg *mqs.Message) error { zap.String("destination_id", task.DestinationID), zap.Int("attempt", task.Attempt)) - // Get destination destination, err := h.ensurePublishableDestination(ctx, task) if err != nil { return h.handleError(msg, &PreDeliveryError{err: err}) } - // Handle delivery err = h.idempotence.Exec(ctx, idempotencyKeyFromDeliveryTask(task), func(ctx context.Context) error { return h.doHandle(ctx, task, destination) }) @@ -177,29 +174,29 @@ func (h *messageHandler) doHandle(ctx context.Context, task models.DeliveryTask, _, span := h.eventTracer.Deliver(ctx, &task, destination) defer span.End() - delivery, err := h.publisher.PublishEvent(ctx, destination, &task.Event) + attempt, err := h.publisher.PublishEvent(ctx, destination, &task.Event) if err != nil { - // If delivery is nil, it means no delivery was made. + // If attempt is nil, it means no attempt was made. // This is an unexpected error and considered a pre-delivery error. - if delivery == nil { + if attempt == nil { return &PreDeliveryError{err: err} } h.logger.Ctx(ctx).Error("failed to publish event", zap.Error(err), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type)) - deliveryErr := &DeliveryError{err: err} + attemptErr := &AttemptError{err: err} if h.shouldScheduleRetry(task, err) { if retryErr := h.scheduleRetry(ctx, task); retryErr != nil { - return h.logDeliveryResult(ctx, &task, destination, delivery, errors.Join(err, retryErr)) + return h.logDeliveryResult(ctx, &task, destination, attempt, errors.Join(err, retryErr)) } } - return h.logDeliveryResult(ctx, &task, destination, delivery, deliveryErr) + return h.logDeliveryResult(ctx, &task, destination, attempt, attemptErr) } // Handle successful delivery @@ -208,52 +205,50 @@ func (h *messageHandler) doHandle(ctx context.Context, task models.DeliveryTask, if err := h.retryScheduler.Cancel(ctx, models.RetryID(task.Event.ID, task.DestinationID)); err != nil { h.logger.Ctx(ctx).Error("failed to cancel scheduled retry", zap.Error(err), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) - return h.logDeliveryResult(ctx, &task, destination, delivery, err) + return h.logDeliveryResult(ctx, &task, destination, attempt, err) } logger.Audit("scheduled retry canceled", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), zap.String("retry_id", models.RetryID(task.Event.ID, task.DestinationID))) } - return h.logDeliveryResult(ctx, &task, destination, delivery, nil) + return h.logDeliveryResult(ctx, &task, destination, attempt, nil) } -func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) error { +func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, attempt *models.Attempt, err error) error { logger := h.logger.Ctx(ctx) - // Set delivery fields from task - delivery.TenantID = task.Event.TenantID - delivery.Attempt = task.Attempt - delivery.Manual = task.Manual + attempt.TenantID = task.Event.TenantID + attempt.AttemptNumber = task.Attempt + attempt.Manual = task.Manual logger.Audit("event delivered", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), zap.String("destination_type", destination.Type), - zap.String("delivery_status", delivery.Status), + zap.String("attempt_status", attempt.Status), zap.Int("attempt", task.Attempt), zap.Bool("manual", task.Manual)) - // Publish delivery log logEntry := models.LogEntry{ - Event: &task.Event, - Delivery: delivery, + Event: &task.Event, + Attempt: attempt, } if logErr := h.logMQ.Publish(ctx, logEntry); logErr != nil { - logger.Error("failed to publish delivery log", + logger.Error("failed to publish attempt log", zap.Error(logErr), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attempt.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", task.Event.TenantID), zap.String("destination_id", destination.ID), @@ -264,12 +259,11 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.Del return &PostDeliveryError{err: logErr} } - // Call alert monitor in goroutine - go h.handleAlertAttempt(ctx, task, destination, delivery, err) + go h.handleAlertAttempt(ctx, task, destination, attempt, err) - // If we have a DeliveryError, return it as is - var delErr *DeliveryError - if errors.As(err, &delErr) { + // If we have an AttemptError, return it as is + var atmErr *AttemptError + if errors.As(err, &atmErr) { return err } @@ -287,9 +281,9 @@ func (h *messageHandler) logDeliveryResult(ctx context.Context, task *models.Del return nil } -func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, delivery *models.Delivery, err error) { - attempt := alert.DeliveryAttempt{ - Success: delivery.Status == models.DeliveryStatusSuccess, +func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.DeliveryTask, destination *models.Destination, attemptResult *models.Attempt, err error) { + alertAttempt := alert.DeliveryAttempt{ + Success: attemptResult.Status == models.AttemptStatusSuccess, DeliveryTask: task, Destination: &alert.AlertDestination{ ID: destination.ID, @@ -300,33 +294,33 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.De CreatedAt: destination.CreatedAt, DisabledAt: destination.DisabledAt, }, - Timestamp: delivery.Time, + Timestamp: attemptResult.Time, } - if !attempt.Success && err != nil { + if !alertAttempt.Success && err != nil { // Extract attempt data if available - var delErr *DeliveryError - if errors.As(err, &delErr) { + var atmErr *AttemptError + if errors.As(err, &atmErr) { var pubErr *destregistry.ErrDestinationPublishAttempt - if errors.As(delErr.err, &pubErr) { - attempt.DeliveryResponse = pubErr.Data + if errors.As(atmErr.err, &pubErr) { + alertAttempt.DeliveryResponse = pubErr.Data } else { - attempt.DeliveryResponse = map[string]interface{}{ - "error": delErr.err.Error(), + alertAttempt.DeliveryResponse = map[string]interface{}{ + "error": atmErr.err.Error(), } } } else { - attempt.DeliveryResponse = map[string]interface{}{ + alertAttempt.DeliveryResponse = map[string]interface{}{ "error": "unexpected", "message": err.Error(), } } } - if monitorErr := h.alertMonitor.HandleAttempt(ctx, attempt); monitorErr != nil { + if monitorErr := h.alertMonitor.HandleAttempt(ctx, alertAttempt); monitorErr != nil { h.logger.Ctx(ctx).Error("failed to handle alert attempt", zap.Error(monitorErr), - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attemptResult.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), @@ -335,7 +329,7 @@ func (h *messageHandler) handleAlertAttempt(ctx context.Context, task *models.De } h.logger.Ctx(ctx).Info("alert attempt handled", - zap.String("delivery_id", delivery.ID), + zap.String("attempt_id", attemptResult.ID), zap.String("event_id", task.Event.ID), zap.String("tenant_id", destination.TenantID), zap.String("destination_id", destination.ID), @@ -372,18 +366,18 @@ func (h *messageHandler) shouldNackError(err error) bool { } // Handle delivery errors - var delErr *DeliveryError - if errors.As(err, &delErr) { - return h.shouldNackDeliveryError(delErr.err) + var atmErr *AttemptError + if errors.As(err, &atmErr) { + return h.shouldNackDeliveryError(atmErr.err) } // Handle post-delivery errors var postErr *PostDeliveryError if errors.As(err, &postErr) { // Check if this wraps a delivery error - var delErr *DeliveryError - if errors.As(postErr.err, &delErr) { - return h.shouldNackDeliveryError(delErr.err) + var atmErr2 *AttemptError + if errors.As(postErr.err, &atmErr2) { + return h.shouldNackDeliveryError(atmErr2.err) } return true // Nack other post-delivery errors } diff --git a/internal/deliverymq/messagehandler_test.go b/internal/deliverymq/messagehandler_test.go index 8ccf8b1f..0a3a5d75 100644 --- a/internal/deliverymq/messagehandler_test.go +++ b/internal/deliverymq/messagehandler_test.go @@ -256,7 +256,7 @@ func TestMessageHandler_PublishError_EligibleForRetry(t *testing.T) { assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.taskIDs[0], "should use GetRetryID for task ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -324,7 +324,7 @@ func TestMessageHandler_PublishError_NotEligible(t *testing.T) { assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "should only attempt once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -384,7 +384,7 @@ func TestMessageHandler_RetryFlow(t *testing.T) { assert.Empty(t, retryScheduler.schedules, "no retry should be scheduled") assert.Equal(t, 1, publisher.current, "publish should succeed once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK") } func TestMessageHandler_Idempotency(t *testing.T) { @@ -831,7 +831,7 @@ func TestManualDelivery_PublishError(t *testing.T) { assert.Equal(t, 1, publisher.current, "should attempt publish once") assert.Empty(t, retryScheduler.schedules, "should not schedule retry for manual delivery") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusFailed, logPublisher.entries[0].Delivery.Status, "delivery status should be Failed") + assert.Equal(t, models.AttemptStatusFailed, logPublisher.entries[0].Attempt.Status, "delivery status should be Failed") assertAlertMonitor(t, alertMonitor, false, &destination, publishErr.Data) } @@ -894,7 +894,7 @@ func TestManualDelivery_CancelError(t *testing.T) { assert.Len(t, retryScheduler.canceled, 1, "should attempt to cancel retry") assert.Equal(t, models.RetryID(task.Event.ID, task.DestinationID), retryScheduler.canceled[0], "should cancel with correct retry ID") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK despite cancel error") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK despite cancel error") assertAlertMonitor(t, alertMonitor, true, &destination, nil) } @@ -1077,7 +1077,7 @@ func TestMessageHandler_AlertMonitorError(t *testing.T) { assert.False(t, mockMsg.nacked, "message should not be nacked despite alert monitor error") assert.Equal(t, 1, publisher.current, "should publish once") require.Len(t, logPublisher.entries, 1, "should have one delivery") - assert.Equal(t, models.DeliveryStatusSuccess, logPublisher.entries[0].Delivery.Status, "delivery status should be OK") + assert.Equal(t, models.AttemptStatusSuccess, logPublisher.entries[0].Attempt.Status, "delivery status should be OK") // Verify alert monitor was called but error was ignored // Wait for the HandleAttempt call to be made diff --git a/internal/deliverymq/mock_test.go b/internal/deliverymq/mock_test.go index b96e4eb3..d90f6ea8 100644 --- a/internal/deliverymq/mock_test.go +++ b/internal/deliverymq/mock_test.go @@ -25,17 +25,17 @@ func newMockPublisher(responses []error) *mockPublisher { return &mockPublisher{responses: responses} } -func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { m.mu.Lock() defer m.mu.Unlock() if m.current >= len(m.responses) { m.current++ - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, + Status: models.AttemptStatusSuccess, Code: "OK", ResponseData: map[string]interface{}{}, Time: time.Now(), @@ -45,21 +45,21 @@ func (m *mockPublisher) PublishEvent(ctx context.Context, destination *models.De resp := m.responses[m.current] m.current++ if resp == nil { - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusSuccess, + Status: models.AttemptStatusSuccess, Code: "OK", ResponseData: map[string]interface{}{}, Time: time.Now(), }, nil } - return &models.Delivery{ - ID: idgen.Delivery(), + return &models.Attempt{ + ID: idgen.Attempt(), EventID: event.ID, DestinationID: destination.ID, - Status: models.DeliveryStatusFailed, + Status: models.AttemptStatusFailed, Code: "ERR", ResponseData: map[string]interface{}{}, Time: time.Now(), diff --git a/internal/deliverymq/retry.go b/internal/deliverymq/retry.go index cfb3985c..48b69a30 100644 --- a/internal/deliverymq/retry.go +++ b/internal/deliverymq/retry.go @@ -38,19 +38,17 @@ func WithRetryVisibilityTimeout(vt uint) RetrySchedulerOption { } func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, deploymentID string, pollBackoff time.Duration, logger *logging.Logger, eventGetter RetryEventGetter, opts ...RetrySchedulerOption) (scheduler.Scheduler, error) { - // Apply options cfg := &retrySchedulerConfig{} for _, opt := range opts { opt(cfg) } - // Create Redis client for RSMQ + ctx := context.Background() redisClient, err := redis.New(ctx, redisConfig) if err != nil { return nil, fmt.Errorf("failed to create Redis client for retry scheduler: %w", err) } - // Create RSMQ adapter adapter := rsmq.NewRedisAdapter(redisClient) // Construct RSMQ namespace with deployment prefix if provided @@ -61,7 +59,6 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d namespace = fmt.Sprintf("%s:rsmq", deploymentID) } - // Create RSMQ client with deployment-aware namespace var rsmqClient *rsmq.RedisSMQ if logger != nil { rsmqClient = rsmq.NewRedisSMQ(adapter, namespace, logger) @@ -69,7 +66,6 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d rsmqClient = rsmq.NewRedisSMQ(adapter, namespace) } - // Define execution function exec := func(ctx context.Context, msg string) error { retryTask := RetryTask{} if err := retryTask.FromString(msg); err != nil { @@ -111,7 +107,6 @@ func NewRetryScheduler(deliverymq *DeliveryMQ, redisConfig *redis.RedisConfig, d return nil } - // Build scheduler options - pass visibility timeout if configured if cfg.visibilityTimeout > 0 { return scheduler.New("deliverymq-retry", rsmqClient, exec, scheduler.WithPollBackoff(pollBackoff), diff --git a/internal/destinationmockserver/model.go b/internal/destinationmockserver/model.go index d4281fd9..dc638874 100644 --- a/internal/destinationmockserver/model.go +++ b/internal/destinationmockserver/model.go @@ -195,7 +195,6 @@ func verifySignature(secret string, payload []byte, signature string, algorithm return false } - // Create a new signature manager with the secret secrets := []destwebhook.WebhookSecret{ { Key: secret, @@ -209,7 +208,6 @@ func verifySignature(secret string, payload []byte, signature string, algorithm destwebhook.WithAlgorithm(destwebhook.GetAlgorithm(algorithm)), ) - // Try each signature for _, sig := range signatures { if sm.VerifySignature(sig, secret, destwebhook.SignaturePayload{ Body: string(payload), diff --git a/internal/destinationmockserver/server.go b/internal/destinationmockserver/server.go index 3b2b6664..9821a83a 100644 --- a/internal/destinationmockserver/server.go +++ b/internal/destinationmockserver/server.go @@ -23,7 +23,6 @@ type DestinationMockServer struct { func (s *DestinationMockServer) Run(ctx context.Context) error { go func() { - // service connections if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { s.logger.Fatal("listen: %s\n", zap.Error(err)) } diff --git a/internal/destregistry/registry.go b/internal/destregistry/registry.go index 8b670da0..b11763f4 100644 --- a/internal/destregistry/registry.go +++ b/internal/destregistry/registry.go @@ -25,7 +25,7 @@ type PreprocessDestinationOpts struct { type Registry interface { // Operations ValidateDestination(ctx context.Context, destination *models.Destination) error - PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) + PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) DisplayDestination(destination *models.Destination) (*DestinationDisplay, error) PreprocessDestination(newDestination *models.Destination, originalDestination *models.Destination, opts *PreprocessDestinationOpts) error @@ -135,14 +135,14 @@ func (r *registry) ValidateDestination(ctx context.Context, destination *models. return nil } -func (r *registry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Delivery, error) { +func (r *registry) PublishEvent(ctx context.Context, destination *models.Destination, event *models.Event) (*models.Attempt, error) { publisher, err := r.ResolvePublisher(ctx, destination) if err != nil { return nil, err } - delivery := &models.Delivery{ - ID: idgen.Delivery(), + attempt := &models.Attempt{ + ID: idgen.Attempt(), DestinationID: destination.ID, EventID: event.ID, } @@ -153,7 +153,7 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina deliveryData, err := publisher.Publish(timeoutCtx, event) if err != nil { - // Context canceled = system shutdown, return nil delivery to trigger nack → requeue. + // Context canceled = system shutdown, return nil attempt to trigger nack → requeue. // This is handled centrally so individual publishers don't need to check for it. // See: https://github.com/hookdeck/outpost/issues/571 if errors.Is(err, context.Canceled) { @@ -161,18 +161,18 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } if deliveryData != nil { - delivery.Time = time.Now() - delivery.Status = deliveryData.Status - delivery.Code = deliveryData.Code - delivery.ResponseData = deliveryData.Response + attempt.Time = time.Now() + attempt.Status = deliveryData.Status + attempt.Code = deliveryData.Code + attempt.ResponseData = deliveryData.Response } else { - delivery = nil + attempt = nil } var publishErr *ErrDestinationPublishAttempt if errors.As(err, &publishErr) { // Check if the wrapped error is a timeout if errors.Is(publishErr.Err, context.DeadlineExceeded) { - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: publishErr.Err, Provider: destination.Type, Data: map[string]interface{}{ @@ -181,11 +181,11 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina }, } } - return delivery, publishErr + return attempt, publishErr } if errors.Is(err, context.DeadlineExceeded) { - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: err, Provider: destination.Type, Data: map[string]interface{}{ @@ -195,7 +195,7 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } } - return delivery, &ErrDestinationPublishAttempt{ + return attempt, &ErrDestinationPublishAttempt{ Err: err, Provider: destination.Type, Data: map[string]interface{}{ @@ -217,12 +217,12 @@ func (r *registry) PublishEvent(ctx context.Context, destination *models.Destina } } - delivery.Time = time.Now() - delivery.Status = deliveryData.Status - delivery.Code = deliveryData.Code - delivery.ResponseData = deliveryData.Response + attempt.Time = time.Now() + attempt.Status = deliveryData.Status + attempt.Code = deliveryData.Code + attempt.ResponseData = deliveryData.Response - return delivery, nil + return attempt, nil } func (r *registry) RegisterProvider(destinationType string, provider Provider) error { diff --git a/internal/emetrics/emetrics.go b/internal/emetrics/emetrics.go index 26c0e3cb..5b2f3223 100644 --- a/internal/emetrics/emetrics.go +++ b/internal/emetrics/emetrics.go @@ -102,9 +102,9 @@ func (e *emetricsImpl) DeliveryLatency(ctx context.Context, latency time.Duratio func (e *emetricsImpl) EventDelivered(ctx context.Context, ok bool, destinationType string) { var status string if ok { - status = models.DeliveryStatusSuccess + status = models.AttemptStatusSuccess } else { - status = models.DeliveryStatusFailed + status = models.AttemptStatusFailed } e.eventDeliveredCounter.Add(ctx, 1, metric.WithAttributes( attribute.String("type", destinationType), diff --git a/internal/idgen/idgen.go b/internal/idgen/idgen.go index 1fff29d0..b1cf5841 100644 --- a/internal/idgen/idgen.go +++ b/internal/idgen/idgen.go @@ -17,7 +17,7 @@ func init() { generator: &uuidv4Generator{}, eventPrefix: "", destinationPrefix: "", - deliveryPrefix: "", + attemptPrefix: "", } } @@ -29,7 +29,7 @@ type IDGenerator struct { generator idGenerator eventPrefix string destinationPrefix string - deliveryPrefix string + attemptPrefix string } func (g *IDGenerator) Event() string { @@ -40,8 +40,8 @@ func (g *IDGenerator) Destination() string { return g.generate(g.destinationPrefix) } -func (g *IDGenerator) Delivery() string { - return g.generate(g.deliveryPrefix) +func (g *IDGenerator) Attempt() string { + return g.generate(g.attemptPrefix) } func (g *IDGenerator) Installation() string { @@ -62,7 +62,6 @@ func newIDGenerator(idType string) (idGenerator, error) { idType = "uuidv4" } - // Select the appropriate generator implementation switch idType { case "uuidv4": return &uuidv4Generator{}, nil @@ -110,7 +109,7 @@ type IDGenConfig struct { Type string EventPrefix string DestinationPrefix string - DeliveryPrefix string + AttemptPrefix string } func Configure(cfg IDGenConfig) error { @@ -123,7 +122,7 @@ func Configure(cfg IDGenConfig) error { generator: gen, eventPrefix: cfg.EventPrefix, destinationPrefix: cfg.DestinationPrefix, - deliveryPrefix: cfg.DeliveryPrefix, + attemptPrefix: cfg.AttemptPrefix, } return nil @@ -137,8 +136,8 @@ func Destination() string { return globalGenerator.Destination() } -func Delivery() string { - return globalGenerator.Delivery() +func Attempt() string { + return globalGenerator.Attempt() } func Installation() string { diff --git a/internal/logmq/batchprocessor.go b/internal/logmq/batchprocessor.go index 472931a8..cfefecaf 100644 --- a/internal/logmq/batchprocessor.go +++ b/internal/logmq/batchprocessor.go @@ -13,7 +13,7 @@ import ( ) // ErrInvalidLogEntry is returned when a LogEntry is missing required fields. -var ErrInvalidLogEntry = errors.New("invalid log entry: both event and delivery are required") +var ErrInvalidLogEntry = errors.New("invalid log entry: both event and attempt are required") // LogStore defines the interface for persisting log entries. // This is a consumer-defined interface containing only what logmq needs. @@ -87,12 +87,12 @@ func (bp *BatchProcessor) processBatch(_ string, msgs []*mqs.Message) { continue } - // Validate that both Event and Delivery are present. + // Validate that both Event and Attempt are present. // The logstore requires both for data consistency. - if entry.Event == nil || entry.Delivery == nil { - logger.Error("invalid log entry: both event and delivery are required", + if entry.Event == nil || entry.Attempt == nil { + logger.Error("invalid log entry: both event and attempt are required", zap.Bool("has_event", entry.Event != nil), - zap.Bool("has_delivery", entry.Delivery != nil), + zap.Bool("has_attempt", entry.Attempt != nil), zap.String("message_id", msg.LoggableID)) msg.Nack() continue diff --git a/internal/logmq/batchprocessor_test.go b/internal/logmq/batchprocessor_test.go index 12dc5d63..74e8473d 100644 --- a/internal/logmq/batchprocessor_test.go +++ b/internal/logmq/batchprocessor_test.go @@ -31,14 +31,14 @@ func (m *mockLogStore) InsertMany(ctx context.Context, entries []*models.LogEntr return nil } -func (m *mockLogStore) getInserted() (events []*models.Event, deliveries []*models.Delivery) { +func (m *mockLogStore) getInserted() (events []*models.Event, attempts []*models.Attempt) { m.mu.Lock() defer m.mu.Unlock() for _, entry := range m.entries { events = append(events, entry.Event) - deliveries = append(deliveries, entry.Delivery) + attempts = append(attempts, entry.Attempt) } - return events, deliveries + return events, attempts } // mockQueueMessage implements mqs.QueueMessage for testing. @@ -84,10 +84,10 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { defer bp.Shutdown() event := testutil.EventFactory.Any() - delivery := testutil.DeliveryFactory.Any() + attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ - Event: &event, - Delivery: &delivery, + Event: &event, + Attempt: &attempt, } mock, msg := newMockMessage(entry) @@ -100,9 +100,9 @@ func TestBatchProcessor_ValidEntry(t *testing.T) { assert.True(t, mock.acked, "valid message should be acked") assert.False(t, mock.nacked, "valid message should not be nacked") - events, deliveries := logStore.getInserted() + events, attempts := logStore.getInserted() assert.Len(t, events, 1) - assert.Len(t, deliveries, 1) + assert.Len(t, attempts, 1) } func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { @@ -117,10 +117,10 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { require.NoError(t, err) defer bp.Shutdown() - delivery := testutil.DeliveryFactory.Any() + attempt := testutil.AttemptFactory.Any() entry := models.LogEntry{ - Event: nil, // Missing event - Delivery: &delivery, + Event: nil, // Missing event + Attempt: &attempt, } mock, msg := newMockMessage(entry) @@ -133,12 +133,12 @@ func TestBatchProcessor_InvalidEntry_MissingEvent(t *testing.T) { assert.False(t, mock.acked, "invalid message should not be acked") assert.True(t, mock.nacked, "invalid message should be nacked") - events, deliveries := logStore.getInserted() + events, attempts := logStore.getInserted() assert.Empty(t, events, "no events should be inserted for invalid entry") - assert.Empty(t, deliveries, "no deliveries should be inserted for invalid entry") + assert.Empty(t, attempts, "no attempts should be inserted for invalid entry") } -func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { +func TestBatchProcessor_InvalidEntry_MissingAttempt(t *testing.T) { ctx := context.Background() logger := testutil.CreateTestLogger(t) logStore := &mockLogStore{} @@ -152,8 +152,8 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { event := testutil.EventFactory.Any() entry := models.LogEntry{ - Event: &event, - Delivery: nil, // Missing delivery + Event: &event, + Attempt: nil, // Missing attempt } mock, msg := newMockMessage(entry) @@ -166,9 +166,9 @@ func TestBatchProcessor_InvalidEntry_MissingDelivery(t *testing.T) { assert.False(t, mock.acked, "invalid message should not be acked") assert.True(t, mock.nacked, "invalid message should be nacked") - events, deliveries := logStore.getInserted() + events, attempts := logStore.getInserted() assert.Empty(t, events, "no events should be inserted for invalid entry") - assert.Empty(t, deliveries, "no deliveries should be inserted for invalid entry") + assert.Empty(t, attempts, "no attempts should be inserted for invalid entry") } func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { @@ -185,19 +185,19 @@ func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { // Create valid entry 1 event1 := testutil.EventFactory.Any() - delivery1 := testutil.DeliveryFactory.Any() - validEntry1 := models.LogEntry{Event: &event1, Delivery: &delivery1} + attempt1 := testutil.AttemptFactory.Any() + validEntry1 := models.LogEntry{Event: &event1, Attempt: &attempt1} mock1, msg1 := newMockMessage(validEntry1) // Create invalid entry (missing event) - delivery2 := testutil.DeliveryFactory.Any() - invalidEntry := models.LogEntry{Event: nil, Delivery: &delivery2} + attempt2 := testutil.AttemptFactory.Any() + invalidEntry := models.LogEntry{Event: nil, Attempt: &attempt2} mock2, msg2 := newMockMessage(invalidEntry) // Create valid entry 2 event3 := testutil.EventFactory.Any() - delivery3 := testutil.DeliveryFactory.Any() - validEntry2 := models.LogEntry{Event: &event3, Delivery: &delivery3} + attempt3 := testutil.AttemptFactory.Any() + validEntry2 := models.LogEntry{Event: &event3, Attempt: &attempt3} mock3, msg3 := newMockMessage(validEntry2) // Add all messages @@ -221,9 +221,9 @@ func TestBatchProcessor_InvalidEntry_DoesNotBlockBatch(t *testing.T) { assert.False(t, mock3.nacked, "valid message 2 should not be nacked") // Only valid entries should be inserted - events, deliveries := logStore.getInserted() + events, attempts := logStore.getInserted() assert.Len(t, events, 2, "only 2 valid events should be inserted") - assert.Len(t, deliveries, 2, "only 2 valid deliveries should be inserted") + assert.Len(t, attempts, 2, "only 2 valid attempts should be inserted") } func TestBatchProcessor_MalformedJSON(t *testing.T) { @@ -248,7 +248,7 @@ func TestBatchProcessor_MalformedJSON(t *testing.T) { assert.False(t, mock.acked, "malformed message should not be acked") assert.True(t, mock.nacked, "malformed message should be nacked") - events, deliveries := logStore.getInserted() + events, attempts := logStore.getInserted() assert.Empty(t, events) - assert.Empty(t, deliveries) + assert.Empty(t, attempts) } diff --git a/internal/logstore/chlogstore/chlogstore.go b/internal/logstore/chlogstore/chlogstore.go index 0f42117f..eadbbd4f 100644 --- a/internal/logstore/chlogstore/chlogstore.go +++ b/internal/logstore/chlogstore/chlogstore.go @@ -16,15 +16,15 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) type logStoreImpl struct { - chDB clickhouse.DB - eventsTable string - deliveriesTable string + chDB clickhouse.DB + eventsTable string + attemptsTable string } var _ driver.LogStore = (*logStoreImpl)(nil) @@ -35,9 +35,9 @@ func NewLogStore(chDB clickhouse.DB, deploymentID string) driver.LogStore { prefix = deploymentID + "_" } return &logStoreImpl{ - chDB: chDB, - eventsTable: prefix + "events", - deliveriesTable: prefix + "deliveries", + chDB: chDB, + eventsTable: prefix + "events", + attemptsTable: prefix + "attempts", } } @@ -255,13 +255,13 @@ func buildEventCursorCondition(compare, position string) (string, []any) { return condition, []any{eventTimeMs, eventTimeMs, eventID} } -// deliveryRecordWithPosition wraps a delivery record with its cursor position data. -type deliveryRecordWithPosition struct { - *driver.DeliveryRecord - deliveryTime time.Time +// attemptRecordWithPosition wraps an attempt record with its cursor position data. +type attemptRecordWithPosition struct { + *driver.AttemptRecord + attemptTime time.Time } -func (s *logStoreImpl) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *logStoreImpl) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -272,48 +272,48 @@ func (s *logStoreImpl) ListDelivery(ctx context.Context, req driver.ListDelivery limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithPosition]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithPosition]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithPosition, error) { - query, args := buildDeliveryQuery(s.deliveriesTable, req, q) + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]attemptRecordWithPosition, error) { + query, args := buildAttemptQuery(s.attemptsTable, req, q) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryRecords(rows) + return scanAttemptRecords(rows) }, - Cursor: pagination.Cursor[deliveryRecordWithPosition]{ - Encode: func(dr deliveryRecordWithPosition) string { - position := fmt.Sprintf("%d::%s", dr.deliveryTime.UnixMilli(), dr.Delivery.ID) - return cursor.Encode(cursorResourceDelivery, cursorVersion, position) + Cursor: pagination.Cursor[attemptRecordWithPosition]{ + Encode: func(ar attemptRecordWithPosition) string { + position := fmt.Sprintf("%d::%s", ar.attemptTime.UnixMilli(), ar.Attempt.ID) + return cursor.Encode(cursorResourceAttempt, cursorVersion, position) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } - // Extract delivery records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + // Extract attempt records from results + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryRecord + data[i] = item.AttemptRecord } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { +func buildAttemptQuery(table string, req driver.ListAttemptRequest, q pagination.QueryInput) (string, []any) { var conditions []string var args []any @@ -343,24 +343,24 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati } if req.TimeFilter.GTE != nil { - conditions = append(conditions, "delivery_time >= ?") + conditions = append(conditions, "attempt_time >= ?") args = append(args, *req.TimeFilter.GTE) } if req.TimeFilter.LTE != nil { - conditions = append(conditions, "delivery_time <= ?") + conditions = append(conditions, "attempt_time <= ?") args = append(args, *req.TimeFilter.LTE) } if req.TimeFilter.GT != nil { - conditions = append(conditions, "delivery_time > ?") + conditions = append(conditions, "attempt_time > ?") args = append(args, *req.TimeFilter.GT) } if req.TimeFilter.LT != nil { - conditions = append(conditions, "delivery_time < ?") + conditions = append(conditions, "attempt_time < ?") args = append(args, *req.TimeFilter.LT) } if q.CursorPos != "" { - cursorCond, cursorArgs := buildDeliveryCursorCondition(q.Compare, q.CursorPos) + cursorCond, cursorArgs := buildAttemptCursorCondition(q.Compare, q.CursorPos) conditions = append(conditions, cursorCond) args = append(args, cursorArgs...) } @@ -370,7 +370,7 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati whereClause = "1=1" } - orderByClause := fmt.Sprintf("ORDER BY delivery_time %s, delivery_id %s", + orderByClause := fmt.Sprintf("ORDER BY attempt_time %s, attempt_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) query := fmt.Sprintf(` @@ -383,13 +383,13 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati event_time, metadata, data, - delivery_id, + attempt_id, status, - delivery_time, + attempt_time, code, response_data, manual, - attempt + attempt_number FROM %s WHERE %s %s @@ -399,8 +399,8 @@ func buildDeliveryQuery(table string, req driver.ListDeliveryRequest, q paginati return query, args } -func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, error) { - var results []deliveryRecordWithPosition +func scanAttemptRecords(rows clickhouse.Rows) ([]attemptRecordWithPosition, error) { + var results []attemptRecordWithPosition for rows.Next() { var ( eventID string @@ -411,13 +411,13 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er eventTime time.Time metadataStr string dataStr string - deliveryID string + attemptID string status string - deliveryTime time.Time + attemptTime time.Time code string responseDataStr string manual bool - attempt uint32 + attemptNumber uint32 ) err := rows.Scan( @@ -429,13 +429,13 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er &eventTime, &metadataStr, &dataStr, - &deliveryID, + &attemptID, &status, - &deliveryTime, + &attemptTime, &code, &responseDataStr, &manual, - &attempt, + &attemptNumber, ) if err != nil { return nil, fmt.Errorf("scan failed: %w", err) @@ -461,17 +461,17 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er } } - results = append(results, deliveryRecordWithPosition{ - DeliveryRecord: &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + results = append(results, attemptRecordWithPosition{ + AttemptRecord: &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: int(attempt), + AttemptNumber: int(attemptNumber), Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -486,7 +486,7 @@ func scanDeliveryRecords(rows clickhouse.Rows) ([]deliveryRecordWithPosition, er Metadata: metadata, }, }, - deliveryTime: deliveryTime, + attemptTime: attemptTime, }) } @@ -528,7 +528,7 @@ func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEve data FROM %s WHERE %s - LIMIT 1`, s.deliveriesTable, whereClause) + LIMIT 1`, s.attemptsTable, whereClause) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { @@ -570,7 +570,7 @@ func (s *logStoreImpl) RetrieveEvent(ctx context.Context, req driver.RetrieveEve return event, nil } -func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *logStoreImpl) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { var conditions []string var args []any @@ -579,8 +579,8 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve args = append(args, req.TenantID) } - conditions = append(conditions, "delivery_id = ?") - args = append(args, req.DeliveryID) + conditions = append(conditions, "attempt_id = ?") + args = append(args, req.AttemptID) whereClause := strings.Join(conditions, " AND ") @@ -594,16 +594,16 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve event_time, metadata, data, - delivery_id, + attempt_id, status, - delivery_time, + attempt_time, code, response_data, manual, - attempt + attempt_number FROM %s WHERE %s - LIMIT 1`, s.deliveriesTable, whereClause) + LIMIT 1`, s.attemptsTable, whereClause) rows, err := s.chDB.Query(ctx, query, args...) if err != nil { @@ -624,13 +624,13 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve eventTime time.Time metadataStr string dataStr string - deliveryID string + attemptID string status string - deliveryTime time.Time + attemptTime time.Time code string responseDataStr string manual bool - attempt uint32 + attemptNumber uint32 ) err = rows.Scan( @@ -642,13 +642,13 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve &eventTime, &metadataStr, &dataStr, - &deliveryID, + &attemptID, &status, - &deliveryTime, + &attemptTime, &code, &responseDataStr, &manual, - &attempt, + &attemptNumber, ) if err != nil { return nil, fmt.Errorf("scan failed: %w", err) @@ -674,16 +674,16 @@ func (s *logStoreImpl) RetrieveDelivery(ctx context.Context, req driver.Retrieve } } - return &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + return &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: int(attempt), + AttemptNumber: int(attemptNumber), Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -750,20 +750,20 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, entries []*models.LogEntr } } - // Insert deliveries with their paired event data - deliveryBatch, err := s.chDB.PrepareBatch(ctx, + // Insert attempts with their paired event data + attemptBatch, err := s.chDB.PrepareBatch(ctx, fmt.Sprintf(`INSERT INTO %s ( event_id, tenant_id, destination_id, topic, eligible_for_retry, event_time, metadata, data, - delivery_id, status, delivery_time, code, response_data, manual, attempt - )`, s.deliveriesTable), + attempt_id, status, attempt_time, code, response_data, manual, attempt_number + )`, s.attemptsTable), ) if err != nil { - return fmt.Errorf("prepare deliveries batch failed: %w", err) + return fmt.Errorf("prepare attempts batch failed: %w", err) } for _, entry := range entries { event := entry.Event - d := entry.Delivery + a := entry.Attempt metadataJSON, err := json.Marshal(event.Metadata) if err != nil { @@ -773,34 +773,34 @@ func (s *logStoreImpl) InsertMany(ctx context.Context, entries []*models.LogEntr if err != nil { return fmt.Errorf("failed to marshal data: %w", err) } - responseDataJSON, err := json.Marshal(d.ResponseData) + responseDataJSON, err := json.Marshal(a.ResponseData) if err != nil { return fmt.Errorf("failed to marshal response_data: %w", err) } - if err := deliveryBatch.Append( - d.EventID, + if err := attemptBatch.Append( + a.EventID, event.TenantID, - d.DestinationID, + a.DestinationID, event.Topic, event.EligibleForRetry, event.Time, string(metadataJSON), string(dataJSON), - d.ID, - d.Status, - d.Time, - d.Code, + a.ID, + a.Status, + a.Time, + a.Code, string(responseDataJSON), - d.Manual, - uint32(d.Attempt), + a.Manual, + uint32(a.AttemptNumber), ); err != nil { - return fmt.Errorf("deliveries batch append failed: %w", err) + return fmt.Errorf("attempts batch append failed: %w", err) } } - if err := deliveryBatch.Send(); err != nil { - return fmt.Errorf("deliveries batch send failed: %w", err) + if err := attemptBatch.Send(); err != nil { + return fmt.Errorf("attempts batch send failed: %w", err) } return nil @@ -810,21 +810,21 @@ func parseTimestampMs(s string) (int64, error) { return strconv.ParseInt(s, 10, 64) } -func buildDeliveryCursorCondition(compare, position string) (string, []any) { +func buildAttemptCursorCondition(compare, position string) (string, []any) { parts := strings.SplitN(position, "::", 2) if len(parts) != 2 { return "1=1", nil } - deliveryTimeMs, err := parseTimestampMs(parts[0]) + attemptTimeMs, err := parseTimestampMs(parts[0]) if err != nil { return "1=1", nil // invalid timestamp, return always true } - deliveryID := parts[1] + attemptID := parts[1] condition := fmt.Sprintf(`( - delivery_time %s fromUnixTimestamp64Milli(?) - OR (delivery_time = fromUnixTimestamp64Milli(?) AND delivery_id %s ?) + attempt_time %s fromUnixTimestamp64Milli(?) + OR (attempt_time = fromUnixTimestamp64Milli(?) AND attempt_id %s ?) )`, compare, compare) - return condition, []any{deliveryTimeMs, deliveryTimeMs, deliveryID} + return condition, []any{attemptTimeMs, attemptTimeMs, attemptID} } diff --git a/internal/logstore/chlogstore/chlogstore_test.go b/internal/logstore/chlogstore/chlogstore_test.go index 4eaad64d..827c35c1 100644 --- a/internal/logstore/chlogstore/chlogstore_test.go +++ b/internal/logstore/chlogstore/chlogstore_test.go @@ -77,15 +77,15 @@ func (h *harness) Close() { func (h *harness) FlushWrites(ctx context.Context) error { // Force ClickHouse to merge parts and deduplicate rows on both tables eventsTable := "events" - deliveriesTable := "deliveries" + attemptsTable := "attempts" if h.deploymentID != "" { eventsTable = h.deploymentID + "_events" - deliveriesTable = h.deploymentID + "_deliveries" + attemptsTable = h.deploymentID + "_attempts" } if err := h.chDB.Exec(ctx, "OPTIMIZE TABLE "+eventsTable+" FINAL"); err != nil { return err } - return h.chDB.Exec(ctx, "OPTIMIZE TABLE "+deliveriesTable+" FINAL") + return h.chDB.Exec(ctx, "OPTIMIZE TABLE "+attemptsTable+" FINAL") } func (h *harness) MakeDriver(ctx context.Context) (driver.LogStore, error) { diff --git a/internal/logstore/driver/driver.go b/internal/logstore/driver/driver.go index b2ae46c4..ab86fa16 100644 --- a/internal/logstore/driver/driver.go +++ b/internal/logstore/driver/driver.go @@ -18,9 +18,9 @@ type TimeFilter struct { type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) + ListAttempt(context.Context, ListAttemptRequest) (ListAttemptResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + RetrieveAttempt(ctx context.Context, request RetrieveAttemptRequest) (*AttemptRecord, error) InsertMany(context.Context, []*models.LogEntry) error } @@ -41,11 +41,11 @@ type ListEventResponse struct { Prev string } -type ListDeliveryRequest struct { +type ListAttemptRequest struct { Next string Prev string Limit int - TimeFilter TimeFilter // optional - filter deliveries by time + TimeFilter TimeFilter // optional - filter attempts by time TenantID string // optional - filter by tenant (if empty, returns all tenants) EventID string // optional - filter for specific event DestinationIDs []string // optional @@ -54,8 +54,8 @@ type ListDeliveryRequest struct { SortOrder string // optional: "asc", "desc" (default: "desc") } -type ListDeliveryResponse struct { - Data []*DeliveryRecord +type ListAttemptResponse struct { + Data []*AttemptRecord Next string Prev string } @@ -66,13 +66,13 @@ type RetrieveEventRequest struct { DestinationID string // optional - if provided, scopes to that destination } -type RetrieveDeliveryRequest struct { - TenantID string // optional - filter by tenant (if empty, searches all tenants) - DeliveryID string // required +type RetrieveAttemptRequest struct { + TenantID string // optional - filter by tenant (if empty, searches all tenants) + AttemptID string // required } -// DeliveryRecord represents a delivery query result with optional Event population. -type DeliveryRecord struct { - Delivery *models.Delivery - Event *models.Event // optionally populated for query results +// AttemptRecord represents an attempt query result with optional Event population. +type AttemptRecord struct { + Attempt *models.Attempt + Event *models.Event // optionally populated for query results } diff --git a/internal/logstore/drivertest/crud.go b/internal/logstore/drivertest/crud.go index 09c3e063..675b42aa 100644 --- a/internal/logstore/drivertest/crud.go +++ b/internal/logstore/drivertest/crud.go @@ -33,10 +33,10 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { startTime := baseTime.Add(-48 * time.Hour) // We'll populate these as we insert - var allDeliveries []*models.Delivery + var allDeliveries []*models.Attempt destinationEvents := map[string][]*models.Event{} topicEvents := map[string][]*models.Event{} - statusDeliveries := map[string][]*models.Delivery{} + statusDeliveries := map[string][]*models.Attempt{} t.Run("insert and verify", func(t *testing.T) { t.Run("single delivery", func(t *testing.T) { @@ -49,16 +49,16 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.EventFactory.WithTopic(topic), testutil.EventFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("single_del"), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-30*time.Minute)), + delivery := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("single_del"), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(baseTime.Add(-30*time.Minute)), ) - err := logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}}) + err := logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: delivery}}) require.NoError(t, err) require.NoError(t, h.FlushWrites(ctx)) @@ -68,7 +68,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { statusDeliveries["success"] = append(statusDeliveries["success"], delivery) // Verify via List - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, EventID: event.ID, Limit: 10, @@ -77,7 +77,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { require.NoError(t, err) require.Len(t, response.Data, 1) assert.Equal(t, event.ID, response.Data[0].Event.ID) - assert.Equal(t, "success", response.Data[0].Delivery.Status) + assert.Equal(t, "success", response.Data[0].Attempt.Status) // Verify via Retrieve retrieved, err := logStore.RetrieveEvent(ctx, driver.RetrieveEventRequest{ @@ -109,16 +109,16 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { testutil.EventFactory.WithTopic(topic), testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("batch_del_%02d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destID), - testutil.DeliveryFactory.WithStatus(status), - testutil.DeliveryFactory.WithTime(eventTime.Add(time.Millisecond)), + delivery := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("batch_del_%02d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destID), + testutil.AttemptFactory.WithStatus(status), + testutil.AttemptFactory.WithTime(eventTime.Add(time.Millisecond)), ) - entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) + entries = append(entries, &models.LogEntry{Event: event, Attempt: delivery}) allDeliveries = append(allDeliveries, delivery) destinationEvents[destID] = append(destinationEvents[destID], event) topicEvents[topic] = append(topicEvents[topic], event) @@ -130,7 +130,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { require.NoError(t, h.FlushWrites(ctx)) // Verify all inserted - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -203,9 +203,9 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { } }) - t.Run("ListDelivery by destination", func(t *testing.T) { + t.Run("ListAttempt by destination", func(t *testing.T) { destID := destinationIDs[0] - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{destID}, Limit: 100, @@ -213,12 +213,12 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) for _, dr := range response.Data { - assert.Equal(t, destID, dr.Delivery.DestinationID) + assert.Equal(t, destID, dr.Attempt.DestinationID) } }) - t.Run("ListDelivery by status", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt by status", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Status: "success", Limit: 100, @@ -226,13 +226,13 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { }) require.NoError(t, err) for _, dr := range response.Data { - assert.Equal(t, "success", dr.Delivery.Status) + assert.Equal(t, "success", dr.Attempt.Status) } }) - t.Run("ListDelivery by topic", func(t *testing.T) { + t.Run("ListAttempt by topic", func(t *testing.T) { topic := testutil.TestTopics[0] - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Topics: []string{topic}, Limit: 100, @@ -244,9 +244,9 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { } }) - t.Run("ListDelivery by event ID", func(t *testing.T) { + t.Run("ListAttempt by event ID", func(t *testing.T) { eventID := "batch_evt_00" - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, EventID: eventID, Limit: 100, @@ -261,7 +261,7 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { t.Run("retrieve", func(t *testing.T) { // Use one of our batch events for retrieve tests knownEventID := "batch_evt_00" - knownDeliveryID := "batch_del_00" + knownAttemptID := "batch_del_00" t.Run("RetrieveEvent existing", func(t *testing.T) { retrieved, err := logStore.RetrieveEvent(ctx, driver.RetrieveEventRequest{ @@ -303,29 +303,29 @@ func testCRUD(t *testing.T, newHarness HarnessMaker) { assert.Nil(t, retrieved) }) - t.Run("RetrieveDelivery existing", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: tenantID, - DeliveryID: knownDeliveryID, + t.Run("RetrieveAttempt existing", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: tenantID, + AttemptID: knownAttemptID, }) require.NoError(t, err) require.NotNil(t, retrieved) - assert.Equal(t, knownDeliveryID, retrieved.Delivery.ID) + assert.Equal(t, knownAttemptID, retrieved.Attempt.ID) }) - t.Run("RetrieveDelivery non-existent returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: tenantID, - DeliveryID: "non-existent-delivery", + t.Run("RetrieveAttempt non-existent returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: tenantID, + AttemptID: "non-existent-delivery", }) require.NoError(t, err) assert.Nil(t, retrieved) }) - t.Run("RetrieveDelivery wrong tenant returns nil", func(t *testing.T) { - retrieved, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: "wrong-tenant", - DeliveryID: knownDeliveryID, + t.Run("RetrieveAttempt wrong tenant returns nil", func(t *testing.T) { + retrieved, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: "wrong-tenant", + AttemptID: knownAttemptID, }) require.NoError(t, err) assert.Nil(t, retrieved) diff --git a/internal/logstore/drivertest/misc.go b/internal/logstore/drivertest/misc.go index 022b481e..b9dc9cbe 100644 --- a/internal/logstore/drivertest/misc.go +++ b/internal/logstore/drivertest/misc.go @@ -54,12 +54,12 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTopic("test.topic"), testutil.EventFactory.WithTime(baseTime.Add(-10*time.Minute)), ) - delivery1 := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("tenant1-delivery"), - testutil.DeliveryFactory.WithEventID(event1.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-10*time.Minute)), + attempt1 := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("tenant1-delivery"), + testutil.AttemptFactory.WithEventID(event1.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(baseTime.Add(-10*time.Minute)), ) event2 := testutil.EventFactory.AnyPointer( @@ -69,23 +69,23 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTopic("test.topic"), testutil.EventFactory.WithTime(baseTime.Add(-5*time.Minute)), ) - delivery2 := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID("tenant2-delivery"), - testutil.DeliveryFactory.WithEventID(event2.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("failed"), - testutil.DeliveryFactory.WithTime(baseTime.Add(-5*time.Minute)), + attempt2 := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID("tenant2-delivery"), + testutil.AttemptFactory.WithEventID(event2.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("failed"), + testutil.AttemptFactory.WithTime(baseTime.Add(-5*time.Minute)), ) require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{ - {Event: event1, Delivery: delivery1}, - {Event: event2, Delivery: delivery2}, + {Event: event1, Attempt: attempt1}, + {Event: event2, Attempt: attempt2}, })) require.NoError(t, h.FlushWrites(ctx)) t.Run("TenantIsolation", func(t *testing.T) { - t.Run("ListDelivery isolates by tenant", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt isolates by tenant", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenant1ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -94,7 +94,7 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, require.Len(t, response.Data, 1) assert.Equal(t, "tenant1-event", response.Data[0].Event.ID) - response, err = logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err = logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenant2ID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -140,8 +140,8 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.True(t, tenantsSeen[tenant2ID]) }) - t.Run("ListDelivery returns all tenants when TenantID empty", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("ListAttempt returns all tenants when TenantID empty", func(t *testing.T) { + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: "", DestinationIDs: []string{destinationID}, Limit: 100, @@ -169,18 +169,18 @@ func testIsolation(t *testing.T, ctx context.Context, logStore driver.LogStore, assert.Equal(t, tenant2ID, retrieved2.TenantID) }) - t.Run("RetrieveDelivery finds delivery across tenants when TenantID empty", func(t *testing.T) { - retrieved1, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: "", - DeliveryID: "tenant1-delivery", + t.Run("RetrieveAttempt finds attempt across tenants when TenantID empty", func(t *testing.T) { + retrieved1, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: "", + AttemptID: "tenant1-delivery", }) require.NoError(t, err) require.NotNil(t, retrieved1) assert.Equal(t, tenant1ID, retrieved1.Event.TenantID) - retrieved2, err := logStore.RetrieveDelivery(ctx, driver.RetrieveDeliveryRequest{ - TenantID: "", - DeliveryID: "tenant2-delivery", + retrieved2, err := logStore.RetrieveAttempt(ctx, driver.RetrieveAttemptRequest{ + TenantID: "", + AttemptID: "tenant2-delivery", }) require.NoError(t, err) require.NotNil(t, retrieved2) @@ -203,21 +203,21 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("sort_del_%d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("sort_del_%d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(baseTime.Add(-time.Duration(i)*time.Hour)), ) - entries = append(entries, &models.LogEntry{Event: event, Delivery: delivery}) + entries = append(entries, &models.LogEntry{Event: event, Attempt: attempt}) } require.NoError(t, logStore.InsertMany(ctx, entries)) startTime := baseTime.Add(-48 * time.Hour) t.Run("invalid SortOrder uses default (desc)", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "sideways", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -225,8 +225,8 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, }) require.NoError(t, err) require.Len(t, response.Data, 3) - assert.Equal(t, "sort_del_0", response.Data[0].Delivery.ID) - assert.Equal(t, "sort_del_2", response.Data[2].Delivery.ID) + assert.Equal(t, "sort_del_0", response.Data[0].Attempt.ID) + assert.Equal(t, "sort_del_2", response.Data[2].Attempt.ID) }) }) @@ -240,15 +240,15 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTopic("test.topic"), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) t.Run("nil DestinationIDs equals empty DestinationIDs", func(t *testing.T) { - responseNil, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + responseNil, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: nil, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -256,7 +256,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, }) require.NoError(t, err) - responseEmpty, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + responseEmpty, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{}, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -296,18 +296,18 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, ) for _, evt := range []*models.Event{eventBefore, eventAt, eventAfter} { - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("del_%s", evt.ID)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(evt.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(evt.Time), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("del_%s", evt.ID)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(evt.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(evt.Time), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: evt, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: evt, Attempt: attempt}})) } t.Run("GTE is inclusive (>=)", func(t *testing.T) { - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &boundaryTime}, Limit: 10, @@ -318,7 +318,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, t.Run("LTE is inclusive (<=)", func(t *testing.T) { farPast := boundaryTime.Add(-1 * time.Hour) - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, TimeFilter: driver.TimeFilter{GTE: &farPast, LTE: &boundaryTime}, Limit: 10, @@ -337,15 +337,15 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithTenantID(tenantID), testutil.EventFactory.WithDestinationID(destinationID), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) - t.Run("modifying ListDelivery result doesn't affect subsequent queries", func(t *testing.T) { - response1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("modifying ListAttempt result doesn't affect subsequent queries", func(t *testing.T) { + response1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -356,7 +356,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, originalID := response1.Data[0].Event.ID response1.Data[0].Event.ID = "MODIFIED" - response2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 10, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -371,7 +371,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, tenantID := idgen.String() destinationID := idgen.Destination() eventTime := time.Now().Add(-30 * time.Minute).Truncate(time.Second) - deliveryTime := eventTime.Add(1 * time.Second) + attemptTime := eventTime.Add(1 * time.Second) startTime := eventTime.Add(-1 * time.Hour) event := testutil.EventFactory.AnyPointer( @@ -379,14 +379,14 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(eventTime), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithStatus("success"), - testutil.DeliveryFactory.WithTime(deliveryTime), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithStatus("success"), + testutil.AttemptFactory.WithTime(attemptTime), ) - entries := []*models.LogEntry{{Event: event, Delivery: delivery}} + entries := []*models.LogEntry{{Event: event, Attempt: attempt}} // Race N goroutines all inserting the same record const numGoroutines = 10 @@ -402,7 +402,7 @@ func testEdgeCases(t *testing.T, ctx context.Context, logStore driver.LogStore, require.NoError(t, h.FlushWrites(ctx)) // Assert: still exactly 1 record - response, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + response, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -427,7 +427,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + _, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", Next: tc.cursor, @@ -453,19 +453,19 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log testutil.EventFactory.WithDestinationID(destinationID), testutil.EventFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - delivery := testutil.DeliveryFactory.AnyPointer( - testutil.DeliveryFactory.WithID(fmt.Sprintf("cursor_del_%d", i)), - testutil.DeliveryFactory.WithTenantID(tenantID), - testutil.DeliveryFactory.WithEventID(event.ID), - testutil.DeliveryFactory.WithDestinationID(destinationID), - testutil.DeliveryFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), + attempt := testutil.AttemptFactory.AnyPointer( + testutil.AttemptFactory.WithID(fmt.Sprintf("cursor_del_%d", i)), + testutil.AttemptFactory.WithTenantID(tenantID), + testutil.AttemptFactory.WithEventID(event.ID), + testutil.AttemptFactory.WithDestinationID(destinationID), + testutil.AttemptFactory.WithTime(baseTime.Add(time.Duration(i)*time.Second)), ) - require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Delivery: delivery}})) + require.NoError(t, logStore.InsertMany(ctx, []*models.LogEntry{{Event: event, Attempt: attempt}})) } require.NoError(t, h.FlushWrites(ctx)) t.Run("delivery_time desc", func(t *testing.T) { - page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -474,7 +474,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "desc", Next: page1.Next, @@ -486,7 +486,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log }) t.Run("delivery_time asc", func(t *testing.T) { - page1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "asc", TimeFilter: driver.TimeFilter{GTE: &startTime}, @@ -495,7 +495,7 @@ func testCursorValidation(t *testing.T, ctx context.Context, logStore driver.Log require.NoError(t, err) require.NotEmpty(t, page1.Next) - page2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + page2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, SortOrder: "asc", Next: page1.Next, diff --git a/internal/logstore/drivertest/pagination.go b/internal/logstore/drivertest/pagination.go index d1bc7f2e..67de5176 100644 --- a/internal/logstore/drivertest/pagination.go +++ b/internal/logstore/drivertest/pagination.go @@ -28,11 +28,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { baseTime := time.Now().Truncate(time.Second) farPast := baseTime.Add(-48 * time.Hour) - t.Run("ListDelivery", func(t *testing.T) { + t.Run("ListAttempt", func(t *testing.T) { var tenantID, destinationID, idPrefix string - suite := paginationtest.Suite[*driver.DeliveryRecord]{ - Name: "ListDelivery", + suite := paginationtest.Suite[*driver.AttemptRecord]{ + Name: "ListAttempt", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -41,9 +41,9 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *driver.DeliveryRecord { + NewItem: func(i int) *driver.AttemptRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) event := &models.Event{ ID: fmt.Sprintf("%s_evt_%03d", idPrefix, i), @@ -56,32 +56,32 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - return &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + return &driver.AttemptRecord{ + Event: event, + Attempt: attempt, } }, - InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + InsertMany: func(ctx context.Context, items []*driver.AttemptRecord) error { entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} + entries[i] = &models.LogEntry{Event: dr.Event, Attempt: dr.Attempt} } return logStore.InsertMany(ctx, entries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.AttemptRecord], error) { + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: opts.Limit, SortOrder: opts.Order, @@ -90,17 +90,17 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*driver.DeliveryRecord]{}, err + return paginationtest.ListResult[*driver.AttemptRecord]{}, err } - return paginationtest.ListResult[*driver.DeliveryRecord]{ + return paginationtest.ListResult[*driver.AttemptRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(dr *driver.DeliveryRecord) string { - return dr.Delivery.ID + GetID: func(dr *driver.AttemptRecord) string { + return dr.Attempt.ID }, AfterInsert: func(ctx context.Context) error { @@ -111,11 +111,11 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { suite.Run(t) }) - t.Run("ListDelivery_WithDestinationFilter", func(t *testing.T) { + t.Run("ListAttempt_WithDestinationFilter", func(t *testing.T) { var tenantID, targetDestID, otherDestID, idPrefix string - suite := paginationtest.Suite[*driver.DeliveryRecord]{ - Name: "ListDelivery_WithDestinationFilter", + suite := paginationtest.Suite[*driver.AttemptRecord]{ + Name: "ListAttempt_WithDestinationFilter", Cleanup: func(ctx context.Context) error { tenantID = idgen.String() @@ -125,9 +125,9 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { return nil }, - NewItem: func(i int) *driver.DeliveryRecord { + NewItem: func(i int) *driver.AttemptRecord { eventTime := baseTime.Add(time.Duration(i) * time.Second) - deliveryTime := eventTime.Add(100 * time.Millisecond) + attemptTime := eventTime.Add(100 * time.Millisecond) destID := targetDestID if i%2 == 1 { @@ -145,32 +145,32 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - return &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + return &driver.AttemptRecord{ + Event: event, + Attempt: attempt, } }, - InsertMany: func(ctx context.Context, items []*driver.DeliveryRecord) error { + InsertMany: func(ctx context.Context, items []*driver.AttemptRecord) error { entries := make([]*models.LogEntry, len(items)) for i, dr := range items { - entries[i] = &models.LogEntry{Event: dr.Event, Delivery: dr.Delivery} + entries[i] = &models.LogEntry{Event: dr.Event, Attempt: dr.Attempt} } return logStore.InsertMany(ctx, entries) }, - List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.DeliveryRecord], error) { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + List: func(ctx context.Context, opts paginationtest.ListOpts) (paginationtest.ListResult[*driver.AttemptRecord], error) { + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, DestinationIDs: []string{targetDestID}, Limit: opts.Limit, @@ -180,21 +180,21 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { TimeFilter: driver.TimeFilter{GTE: &farPast}, }) if err != nil { - return paginationtest.ListResult[*driver.DeliveryRecord]{}, err + return paginationtest.ListResult[*driver.AttemptRecord]{}, err } - return paginationtest.ListResult[*driver.DeliveryRecord]{ + return paginationtest.ListResult[*driver.AttemptRecord]{ Items: res.Data, Next: res.Next, Prev: res.Prev, }, nil }, - GetID: func(dr *driver.DeliveryRecord) string { - return dr.Delivery.ID + GetID: func(dr *driver.AttemptRecord) string { + return dr.Attempt.ID }, - Matches: func(dr *driver.DeliveryRecord) bool { - return dr.Delivery.DestinationID == targetDestID + Matches: func(dr *driver.AttemptRecord) bool { + return dr.Attempt.DestinationID == targetDestID }, AfterInsert: func(ctx context.Context) error { @@ -236,16 +236,16 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { InsertMany: func(ctx context.Context, items []*models.Event) error { entries := make([]*models.LogEntry, len(items)) for i, evt := range items { - deliveryTime := evt.Time.Add(100 * time.Millisecond) + attemptTime := evt.Time.Add(100 * time.Millisecond) entries[i] = &models.LogEntry{ Event: evt, - Delivery: &models.Delivery{ + Attempt: &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: evt.TenantID, EventID: evt.ID, DestinationID: evt.DestinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", }, } @@ -321,16 +321,16 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { InsertMany: func(ctx context.Context, items []*models.Event) error { entries := make([]*models.LogEntry, len(items)) for i, evt := range items { - deliveryTime := evt.Time.Add(100 * time.Millisecond) + attemptTime := evt.Time.Add(100 * time.Millisecond) entries[i] = &models.LogEntry{ Event: evt, - Delivery: &models.Delivery{ + Attempt: &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: evt.TenantID, EventID: evt.ID, DestinationID: evt.DestinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", }, } @@ -379,8 +379,8 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // time-based filters (GTE, LTE, GT, LT), which is critical for // "paginate within a time window" use cases. // - // IMPORTANT: ListDelivery filters by DELIVERY time, ListEvent filters by EVENT time. - // In this test, delivery_time = event_time + 100ms. + // IMPORTANT: ListAttempt filters by ATTEMPT time, ListEvent filters by EVENT time. + // In this test, attempt_time = event_time + 100ms. t.Run("TimeFilterWithCursor", func(t *testing.T) { tenantID := idgen.String() destinationID := idgen.Destination() @@ -392,17 +392,17 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // - Events 15-19: far future (should be excluded by LTE filter) // // Event times are spaced 2 minutes apart within the window. - // Delivery times are 1 second after event times (not sub-second) + // Attempt times are 1 second after event times (not sub-second) // to ensure GT/LT tests work consistently across databases. eventWindowStart := baseTime.Add(-10 * time.Minute) eventWindowEnd := baseTime.Add(10 * time.Minute) - // Delivery window accounts for the 1 second offset - deliveryWindowStart := eventWindowStart.Add(time.Second) - deliveryWindowEnd := eventWindowEnd.Add(time.Second) + // Attempt window accounts for the 1 second offset + attemptWindowStart := eventWindowStart.Add(time.Second) + attemptWindowEnd := eventWindowEnd.Add(time.Second) - var allRecords []*driver.DeliveryRecord + var allRecords []*driver.AttemptRecord var allEvents []*models.Event - var allDeliveries []*models.Delivery + var allAttempts []*models.Attempt for i := range 20 { var eventTime time.Time switch { @@ -418,7 +418,7 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { eventTime = eventWindowEnd.Add(time.Duration(i-14) * time.Hour) } - deliveryTime := eventTime.Add(time.Second) + attemptTime := eventTime.Add(time.Second) event := &models.Event{ ID: fmt.Sprintf("%s_evt_%03d", idPrefix, i), @@ -430,45 +430,45 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { Metadata: map[string]string{}, Data: map[string]any{}, } - delivery := &models.Delivery{ + attempt := &models.Attempt{ ID: fmt.Sprintf("%s_del_%03d", idPrefix, i), TenantID: tenantID, EventID: event.ID, DestinationID: destinationID, Status: "success", - Time: deliveryTime, + Time: attemptTime, Code: "200", } - allRecords = append(allRecords, &driver.DeliveryRecord{ - Event: event, - Delivery: delivery, + allRecords = append(allRecords, &driver.AttemptRecord{ + Event: event, + Attempt: attempt, }) allEvents = append(allEvents, event) - allDeliveries = append(allDeliveries, delivery) + allAttempts = append(allAttempts, attempt) } entries := make([]*models.LogEntry, len(allEvents)) for i := range allEvents { - entries[i] = &models.LogEntry{Event: allEvents[i], Delivery: allDeliveries[i]} + entries[i] = &models.LogEntry{Event: allEvents[i], Attempt: allAttempts[i]} } require.NoError(t, logStore.InsertMany(ctx, entries)) require.NoError(t, h.FlushWrites(ctx)) t.Run("paginate within time-bounded window", func(t *testing.T) { - // Paginate through deliveries within the window with limit=3 - // ListDelivery filters by DELIVERY time, not event time. - // Should only see deliveries 5-14 (10 total), not 0-4 or 15-19 + // Paginate through attempts within the window with limit=3 + // ListAttempt filters by ATTEMPT time, not event time. + // Should only see attempts 5-14 (10 total), not 0-4 or 15-19 var collectedIDs []string var nextCursor string pageCount := 0 for { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Next: nextCursor, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) @@ -488,18 +488,18 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { } } - // Should have collected exactly deliveries 5-14 - require.Len(t, collectedIDs, 10, "should have 10 deliveries in window") + // Should have collected exactly attempts 5-14 + require.Len(t, collectedIDs, 10, "should have 10 attempts in window") for i, id := range collectedIDs { expectedID := fmt.Sprintf("%s_evt_%03d", idPrefix, i+5) - require.Equal(t, expectedID, id, "delivery %d mismatch", i) + require.Equal(t, expectedID, id, "attempt %d mismatch", i) } require.Equal(t, 4, pageCount, "should take 4 pages (3+3+3+1)") }) - t.Run("cursor excludes deliveries outside time filter", func(t *testing.T) { - // First page with no time filter gets all deliveries - resAll, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + t.Run("cursor excludes attempts outside time filter", func(t *testing.T) { + // First page with no time filter gets all attempts + resAll, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", @@ -509,35 +509,35 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { require.Len(t, resAll.Data, 5) // Use the cursor but add a time filter that excludes some results - // The cursor points to position after delivery 4 (far past deliveries) - // But with deliveryWindowStart filter, we should start from delivery 5 - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // The cursor points to position after attempt 4 (far past attempts) + // But with attemptWindowStart filter, we should start from attempt 5 + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 5, SortOrder: "asc", Next: resAll.Next, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) - // Results should respect the time filter (on delivery time) + // Results should respect the time filter (on attempt time) for _, dr := range res.Data { - require.True(t, !dr.Delivery.Time.Before(deliveryWindowStart), "delivery time should be >= deliveryWindowStart") - require.True(t, !dr.Delivery.Time.After(deliveryWindowEnd), "delivery time should be <= deliveryWindowEnd") + require.True(t, !dr.Attempt.Time.Before(attemptWindowStart), "attempt time should be >= attemptWindowStart") + require.True(t, !dr.Attempt.Time.After(attemptWindowEnd), "attempt time should be <= attemptWindowEnd") } }) - t.Run("delivery time filter with GT/LT operators", func(t *testing.T) { - // Test exclusive bounds (GT/LT instead of GTE/LTE) on delivery time - // Use delivery times slightly after delivery 5 and slightly before delivery 14 - gtTime := allRecords[5].Delivery.Time.Add(time.Second) // After delivery 5, before delivery 6 - ltTime := allRecords[14].Delivery.Time.Add(-time.Second) // Before delivery 14, after delivery 13 + t.Run("attempt time filter with GT/LT operators", func(t *testing.T) { + // Test exclusive bounds (GT/LT instead of GTE/LTE) on attempt time + // Use attempt times slightly after attempt 5 and slightly before attempt 14 + gtTime := allRecords[5].Attempt.Time.Add(time.Second) // After attempt 5, before attempt 6 + ltTime := allRecords[14].Attempt.Time.Add(-time.Second) // Before attempt 14, after attempt 13 var collectedIDs []string var nextCursor string for { - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", @@ -572,10 +572,10 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { // comparison across databases with different timestamp precision // (PostgreSQL microseconds, ClickHouse DateTime64, etc.). // - // Important: ListDelivery filters by DELIVERY time, not event time. + // Important: ListAttempt filters by ATTEMPT time, not event time. - // First, retrieve all deliveries to find delivery 10's time - res, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // First, retrieve all attempts to find attempt 10's time + res, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", @@ -584,90 +584,90 @@ func testPagination(t *testing.T, newHarness HarnessMaker) { }, }) require.NoError(t, err) - require.GreaterOrEqual(t, len(res.Data), 11, "need at least 11 deliveries") + require.GreaterOrEqual(t, len(res.Data), 11, "need at least 11 attempts") - // Find delivery 10's stored delivery time, truncated to seconds - var storedDelivery10Time time.Time + // Find attempt 10's stored attempt time, truncated to seconds + var storedAttempt10Time time.Time for _, dr := range res.Data { if dr.Event.ID == allRecords[10].Event.ID { - storedDelivery10Time = dr.Delivery.Time.Truncate(time.Second) + storedAttempt10Time = dr.Attempt.Time.Truncate(time.Second) break } } - require.False(t, storedDelivery10Time.IsZero(), "should find delivery 10") + require.False(t, storedAttempt10Time.IsZero(), "should find attempt 10") - // GT with exact time should exclude delivery 10 - resGT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // GT with exact time should exclude attempt 10 + resGT, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GT: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{GT: &storedAttempt10Time}, }) require.NoError(t, err) for _, dr := range resGT.Data { - drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) - require.True(t, drTimeTrunc.After(storedDelivery10Time), - "GT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) + drTimeTrunc := dr.Attempt.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.After(storedAttempt10Time), + "GT filter should exclude attempt with exact timestamp, got attempt %s with time %v (filter time: %v)", + dr.Attempt.ID, drTimeTrunc, storedAttempt10Time) } - // LT with exact time should exclude delivery 10 - resLT, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // LT with exact time should exclude attempt 10 + resLT, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{LT: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{LT: &storedAttempt10Time}, }) require.NoError(t, err) for _, dr := range resLT.Data { - drTimeTrunc := dr.Delivery.Time.Truncate(time.Second) - require.True(t, drTimeTrunc.Before(storedDelivery10Time), - "LT filter should exclude delivery with exact timestamp, got delivery %s with time %v (filter time: %v)", - dr.Delivery.ID, drTimeTrunc, storedDelivery10Time) + drTimeTrunc := dr.Attempt.Time.Truncate(time.Second) + require.True(t, drTimeTrunc.Before(storedAttempt10Time), + "LT filter should exclude attempt with exact timestamp, got attempt %s with time %v (filter time: %v)", + dr.Attempt.ID, drTimeTrunc, storedAttempt10Time) } - // Verify delivery 10 is included with GTE/LTE (inclusive bounds) - resGTE, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // Verify attempt 10 is included with GTE/LTE (inclusive bounds) + resGTE, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 100, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GTE: &storedDelivery10Time, LTE: &storedDelivery10Time}, + TimeFilter: driver.TimeFilter{GTE: &storedAttempt10Time, LTE: &storedAttempt10Time}, }) require.NoError(t, err) - require.GreaterOrEqual(t, len(resGTE.Data), 1, "GTE/LTE with same time should include delivery at that second") + require.GreaterOrEqual(t, len(resGTE.Data), 1, "GTE/LTE with same time should include attempt at that second") }) t.Run("prev cursor respects time filter", func(t *testing.T) { - // Get first page (ListDelivery filters by delivery time) - res1, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + // Get first page (ListAttempt filters by attempt time) + res1, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) require.NotEmpty(t, res1.Next) // Get second page - res2, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + res2, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Next: res1.Next, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) require.NotEmpty(t, res2.Prev) // Go back to first page using prev cursor - resPrev, err := logStore.ListDelivery(ctx, driver.ListDeliveryRequest{ + resPrev, err := logStore.ListAttempt(ctx, driver.ListAttemptRequest{ TenantID: tenantID, Limit: 3, SortOrder: "asc", Prev: res2.Prev, - TimeFilter: driver.TimeFilter{GTE: &deliveryWindowStart, LTE: &deliveryWindowEnd}, + TimeFilter: driver.TimeFilter{GTE: &attemptWindowStart, LTE: &attemptWindowEnd}, }) require.NoError(t, err) diff --git a/internal/logstore/logstore.go b/internal/logstore/logstore.go index e88f6e22..b0557792 100644 --- a/internal/logstore/logstore.go +++ b/internal/logstore/logstore.go @@ -16,18 +16,18 @@ import ( type TimeFilter = driver.TimeFilter type ListEventRequest = driver.ListEventRequest type ListEventResponse = driver.ListEventResponse -type ListDeliveryRequest = driver.ListDeliveryRequest -type ListDeliveryResponse = driver.ListDeliveryResponse +type ListAttemptRequest = driver.ListAttemptRequest +type ListAttemptResponse = driver.ListAttemptResponse type RetrieveEventRequest = driver.RetrieveEventRequest -type RetrieveDeliveryRequest = driver.RetrieveDeliveryRequest -type DeliveryRecord = driver.DeliveryRecord +type RetrieveAttemptRequest = driver.RetrieveAttemptRequest +type AttemptRecord = driver.AttemptRecord type LogEntry = models.LogEntry type LogStore interface { ListEvent(context.Context, ListEventRequest) (ListEventResponse, error) - ListDelivery(context.Context, ListDeliveryRequest) (ListDeliveryResponse, error) + ListAttempt(context.Context, ListAttemptRequest) (ListAttemptResponse, error) RetrieveEvent(ctx context.Context, request RetrieveEventRequest) (*models.Event, error) - RetrieveDelivery(ctx context.Context, request RetrieveDeliveryRequest) (*DeliveryRecord, error) + RetrieveAttempt(ctx context.Context, request RetrieveAttemptRequest) (*AttemptRecord, error) InsertMany(context.Context, []*models.LogEntry) error } diff --git a/internal/logstore/memlogstore/memlogstore.go b/internal/logstore/memlogstore/memlogstore.go index 99d1a5d0..1f0fb0aa 100644 --- a/internal/logstore/memlogstore/memlogstore.go +++ b/internal/logstore/memlogstore/memlogstore.go @@ -14,25 +14,25 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) // memLogStore is an in-memory implementation of driver.LogStore. // It serves as a reference implementation and is useful for testing. type memLogStore struct { - mu sync.RWMutex - events map[string]*models.Event // keyed by event ID - deliveries []*models.Delivery // list of all deliveries + mu sync.RWMutex + events map[string]*models.Event // keyed by event ID + attempts []*models.Attempt // list of all attempts } var _ driver.LogStore = (*memLogStore)(nil) func NewLogStore() driver.LogStore { return &memLogStore{ - events: make(map[string]*models.Event), - deliveries: make([]*models.Delivery, 0), + events: make(map[string]*models.Event), + attempts: make([]*models.Attempt, 0), } } @@ -194,27 +194,27 @@ func (s *memLogStore) InsertMany(ctx context.Context, entries []*models.LogEntry // Insert event (dedupe by ID) s.events[entry.Event.ID] = copyEvent(entry.Event) - // Insert delivery (idempotent upsert: match on event_id + delivery_id) - d := entry.Delivery - copied := copyDelivery(d) + // Insert attempt (idempotent upsert: match on event_id + attempt_id) + a := entry.Attempt + copied := copyAttempt(a) found := false - for i, existing := range s.deliveries { - if existing.EventID == d.EventID && existing.ID == d.ID { - s.deliveries[i] = copied + for i, existing := range s.attempts { + if existing.EventID == a.EventID && existing.ID == a.ID { + s.attempts[i] = copied found = true break } } if !found { - s.deliveries = append(s.deliveries, copied) + s.attempts = append(s.attempts, copied) } } return nil } -func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *memLogStore) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { s.mu.RLock() defer s.mu.RUnlock() @@ -228,43 +228,43 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR limit = 100 } - // Filter deliveries and build records with events - var allRecords []*driver.DeliveryRecord - for _, d := range s.deliveries { - event := s.events[d.EventID] + // Filter attempts and build records with events + var allRecords []*driver.AttemptRecord + for _, a := range s.attempts { + event := s.events[a.EventID] if event == nil { - continue // skip orphan deliveries + continue // skip orphan attempts } - if !s.matchesDeliveryFilter(d, event, req) { + if !s.matchesAttemptFilter(a, event, req) { continue } - allRecords = append(allRecords, &driver.DeliveryRecord{ - Delivery: copyDelivery(d), - Event: copyEvent(event), + allRecords = append(allRecords, &driver.AttemptRecord{ + Attempt: copyAttempt(a), + Event: copyEvent(event), }) } - // deliveryRecordWithTimeID pairs a delivery record with its sortable time ID. - type deliveryRecordWithTimeID struct { - record *driver.DeliveryRecord + // attemptRecordWithTimeID pairs an attempt record with its sortable time ID. + type attemptRecordWithTimeID struct { + record *driver.AttemptRecord timeID string } - // Build list with time IDs (using delivery time) - recordsWithTimeID := make([]deliveryRecordWithTimeID, len(allRecords)) + // Build list with time IDs (using attempt time) + recordsWithTimeID := make([]attemptRecordWithTimeID, len(allRecords)) for i, r := range allRecords { - recordsWithTimeID[i] = deliveryRecordWithTimeID{ + recordsWithTimeID[i] = attemptRecordWithTimeID{ record: r, - timeID: makeTimeID(r.Delivery.Time, r.Delivery.ID), + timeID: makeTimeID(r.Attempt.Time, r.Attempt.ID), } } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(_ context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { + Fetch: func(_ context.Context, q pagination.QueryInput) ([]attemptRecordWithTimeID, error) { // Sort based on query direction isDesc := q.SortDir == "desc" sort.Slice(recordsWithTimeID, func(i, j int) bool { @@ -275,7 +275,7 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR }) // Filter using q.Compare (like SQL WHERE clause) - var filtered []deliveryRecordWithTimeID + var filtered []attemptRecordWithTimeID for _, r := range recordsWithTimeID { // If no cursor, include all items // If cursor exists, filter using Compare operator @@ -289,38 +289,38 @@ func (s *memLogStore) ListDelivery(ctx context.Context, req driver.ListDeliveryR filtered = filtered[:q.Limit] } - result := make([]deliveryRecordWithTimeID, len(filtered)) + result := make([]attemptRecordWithTimeID, len(filtered)) for i, r := range filtered { - result[i] = deliveryRecordWithTimeID{ - record: &driver.DeliveryRecord{ - Delivery: copyDelivery(r.record.Delivery), - Event: copyEvent(r.record.Event), + result[i] = attemptRecordWithTimeID{ + record: &driver.AttemptRecord{ + Attempt: copyAttempt(r.record.Attempt), + Event: copyEvent(r.record.Event), }, timeID: r.timeID, } } return result, nil }, - Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ - Encode: func(r deliveryRecordWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, r.timeID) + Cursor: pagination.Cursor[attemptRecordWithTimeID]{ + Encode: func(r attemptRecordWithTimeID) string { + return cursor.Encode(cursorResourceAttempt, cursorVersion, r.timeID) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } // Extract records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { data[i] = item.record } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, @@ -345,42 +345,42 @@ func (s *memLogStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEven return copyEvent(event), nil } -func (s *memLogStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *memLogStore) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { s.mu.RLock() defer s.mu.RUnlock() - for _, d := range s.deliveries { - if d.ID == req.DeliveryID { - event := s.events[d.EventID] + for _, a := range s.attempts { + if a.ID == req.AttemptID { + event := s.events[a.EventID] if event == nil { continue } if req.TenantID != "" && event.TenantID != req.TenantID { continue } - return &driver.DeliveryRecord{ - Delivery: copyDelivery(d), - Event: copyEvent(event), + return &driver.AttemptRecord{ + Attempt: copyAttempt(a), + Event: copyEvent(event), }, nil } } return nil, nil } -func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Event, req driver.ListDeliveryRequest) bool { - // Filter by event's tenant ID since deliveries don't have tenant_id in the database +func (s *memLogStore) matchesAttemptFilter(a *models.Attempt, event *models.Event, req driver.ListAttemptRequest) bool { + // Filter by event's tenant ID since attempts don't have tenant_id in the database if req.TenantID != "" && event.TenantID != req.TenantID { return false } - if req.EventID != "" && d.EventID != req.EventID { + if req.EventID != "" && a.EventID != req.EventID { return false } if len(req.DestinationIDs) > 0 { found := false for _, destID := range req.DestinationIDs { - if d.DestinationID == destID { + if a.DestinationID == destID { found = true break } @@ -390,7 +390,7 @@ func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Ev } } - if req.Status != "" && d.Status != req.Status { + if req.Status != "" && a.Status != req.Status { return false } @@ -407,16 +407,16 @@ func (s *memLogStore) matchesDeliveryFilter(d *models.Delivery, event *models.Ev } } - if req.TimeFilter.GTE != nil && d.Time.Before(*req.TimeFilter.GTE) { + if req.TimeFilter.GTE != nil && a.Time.Before(*req.TimeFilter.GTE) { return false } - if req.TimeFilter.LTE != nil && d.Time.After(*req.TimeFilter.LTE) { + if req.TimeFilter.LTE != nil && a.Time.After(*req.TimeFilter.LTE) { return false } - if req.TimeFilter.GT != nil && !d.Time.After(*req.TimeFilter.GT) { + if req.TimeFilter.GT != nil && !a.Time.After(*req.TimeFilter.GT) { return false } - if req.TimeFilter.LT != nil && !d.Time.Before(*req.TimeFilter.LT) { + if req.TimeFilter.LT != nil && !a.Time.Before(*req.TimeFilter.LT) { return false } @@ -449,25 +449,25 @@ func copyEvent(e *models.Event) *models.Event { return copied } -func copyDelivery(d *models.Delivery) *models.Delivery { - if d == nil { +func copyAttempt(a *models.Attempt) *models.Attempt { + if a == nil { return nil } - copied := &models.Delivery{ - ID: d.ID, - TenantID: d.TenantID, - EventID: d.EventID, - DestinationID: d.DestinationID, - Attempt: d.Attempt, - Manual: d.Manual, - Status: d.Status, - Time: d.Time, - Code: d.Code, - } - - if d.ResponseData != nil { - copied.ResponseData = make(map[string]any, len(d.ResponseData)) - for k, v := range d.ResponseData { + copied := &models.Attempt{ + ID: a.ID, + TenantID: a.TenantID, + EventID: a.EventID, + DestinationID: a.DestinationID, + AttemptNumber: a.AttemptNumber, + Manual: a.Manual, + Status: a.Status, + Time: a.Time, + Code: a.Code, + } + + if a.ResponseData != nil { + copied.ResponseData = make(map[string]any, len(a.ResponseData)) + for k, v := range a.ResponseData { copied.ResponseData[k] = v } } diff --git a/internal/logstore/pglogstore/pglogstore.go b/internal/logstore/pglogstore/pglogstore.go index 4c6fb28f..35feeb08 100644 --- a/internal/logstore/pglogstore/pglogstore.go +++ b/internal/logstore/pglogstore/pglogstore.go @@ -15,9 +15,9 @@ import ( ) const ( - cursorResourceEvent = "evt" - cursorResourceDelivery = "dlv" - cursorVersion = 1 + cursorResourceEvent = "evt" + cursorResourceAttempt = "att" + cursorVersion = 1 ) type logStore struct { @@ -181,13 +181,13 @@ func scanEvents(rows pgx.Rows) ([]eventWithTimeID, error) { return results, nil } -// deliveryRecordWithTimeID wraps a delivery record with its time_delivery_id for cursor encoding. -type deliveryRecordWithTimeID struct { - *driver.DeliveryRecord - TimeDeliveryID string +// attemptRecordWithTimeID wraps an attempt record with its time_attempt_id for cursor encoding. +type attemptRecordWithTimeID struct { + *driver.AttemptRecord + TimeAttemptID string } -func (s *logStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequest) (driver.ListDeliveryResponse, error) { +func (s *logStore) ListAttempt(ctx context.Context, req driver.ListAttemptRequest) (driver.ListAttemptResponse, error) { sortOrder := req.SortOrder if sortOrder != "asc" && sortOrder != "desc" { sortOrder = "desc" @@ -198,80 +198,80 @@ func (s *logStore) ListDelivery(ctx context.Context, req driver.ListDeliveryRequ limit = 100 } - res, err := pagination.Run(ctx, pagination.Config[deliveryRecordWithTimeID]{ + res, err := pagination.Run(ctx, pagination.Config[attemptRecordWithTimeID]{ Limit: limit, Order: sortOrder, Next: req.Next, Prev: req.Prev, - Fetch: func(ctx context.Context, q pagination.QueryInput) ([]deliveryRecordWithTimeID, error) { - query, args := buildDeliveryQuery(req, q) + Fetch: func(ctx context.Context, q pagination.QueryInput) ([]attemptRecordWithTimeID, error) { + query, args := buildAttemptQuery(req, q) rows, err := s.db.Query(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query failed: %w", err) } defer rows.Close() - return scanDeliveryRecords(rows) + return scanAttemptRecords(rows) }, - Cursor: pagination.Cursor[deliveryRecordWithTimeID]{ - Encode: func(dr deliveryRecordWithTimeID) string { - return cursor.Encode(cursorResourceDelivery, cursorVersion, dr.TimeDeliveryID) + Cursor: pagination.Cursor[attemptRecordWithTimeID]{ + Encode: func(ar attemptRecordWithTimeID) string { + return cursor.Encode(cursorResourceAttempt, cursorVersion, ar.TimeAttemptID) }, Decode: func(c string) (string, error) { - return cursor.Decode(c, cursorResourceDelivery, cursorVersion) + return cursor.Decode(c, cursorResourceAttempt, cursorVersion) }, }, }) if err != nil { - return driver.ListDeliveryResponse{}, err + return driver.ListAttemptResponse{}, err } - // Extract delivery records from results - data := make([]*driver.DeliveryRecord, len(res.Items)) + // Extract attempt records from results + data := make([]*driver.AttemptRecord, len(res.Items)) for i, item := range res.Items { - data[i] = item.DeliveryRecord + data[i] = item.AttemptRecord } - return driver.ListDeliveryResponse{ + return driver.ListAttemptResponse{ Data: data, Next: res.Next, Prev: res.Prev, }, nil } -func buildDeliveryQuery(req driver.ListDeliveryRequest, q pagination.QueryInput) (string, []any) { - cursorCondition := fmt.Sprintf("AND ($10::text = '' OR idx.time_delivery_id %s $10::text)", q.Compare) - orderByClause := fmt.Sprintf("idx.delivery_time %s, idx.delivery_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) +func buildAttemptQuery(req driver.ListAttemptRequest, q pagination.QueryInput) (string, []any) { + cursorCondition := fmt.Sprintf("AND ($10::text = '' OR idx.time_attempt_id %s $10::text)", q.Compare) + orderByClause := fmt.Sprintf("idx.attempt_time %s, idx.attempt_id %s", strings.ToUpper(q.SortDir), strings.ToUpper(q.SortDir)) query := fmt.Sprintf(` SELECT idx.event_id, - idx.delivery_id, + idx.attempt_id, idx.destination_id, idx.event_time, - idx.delivery_time, + idx.attempt_time, idx.topic, idx.status, - idx.time_delivery_id, + idx.time_attempt_id, e.tenant_id, e.eligible_for_retry, e.data, e.metadata, - d.code, - d.response_data, + a.code, + a.response_data, idx.manual, - idx.attempt - FROM event_delivery_index idx + idx.attempt_number + FROM event_attempt_index idx JOIN events e ON e.id = idx.event_id AND e.time = idx.event_time - JOIN deliveries d ON d.id = idx.delivery_id AND d.time = idx.delivery_time + JOIN attempts a ON a.id = idx.attempt_id AND a.time = idx.attempt_time WHERE ($1::text = '' OR idx.tenant_id = $1) AND ($2::text = '' OR idx.event_id = $2) AND (array_length($3::text[], 1) IS NULL OR idx.destination_id = ANY($3)) AND ($4::text = '' OR idx.status = $4) AND (array_length($5::text[], 1) IS NULL OR idx.topic = ANY($5)) - AND ($6::timestamptz IS NULL OR idx.delivery_time >= $6) - AND ($7::timestamptz IS NULL OR idx.delivery_time <= $7) - AND ($8::timestamptz IS NULL OR idx.delivery_time > $8) - AND ($9::timestamptz IS NULL OR idx.delivery_time < $9) + AND ($6::timestamptz IS NULL OR idx.attempt_time >= $6) + AND ($7::timestamptz IS NULL OR idx.attempt_time <= $7) + AND ($8::timestamptz IS NULL OR idx.attempt_time > $8) + AND ($9::timestamptz IS NULL OR idx.attempt_time < $9) %s ORDER BY %s LIMIT $11 @@ -294,18 +294,18 @@ func buildDeliveryQuery(req driver.ListDeliveryRequest, q pagination.QueryInput) return query, args } -func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { - var results []deliveryRecordWithTimeID +func scanAttemptRecords(rows pgx.Rows) ([]attemptRecordWithTimeID, error) { + var results []attemptRecordWithTimeID for rows.Next() { var ( eventID string - deliveryID string + attemptID string destinationID string eventTime time.Time - deliveryTime time.Time + attemptTime time.Time topic string status string - timeDeliveryID string + timeAttemptID string tenantID string eligibleForRetry bool data map[string]any @@ -313,18 +313,18 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { code string responseData map[string]any manual bool - attempt int + attemptNumber int ) if err := rows.Scan( &eventID, - &deliveryID, + &attemptID, &destinationID, &eventTime, - &deliveryTime, + &attemptTime, &topic, &status, - &timeDeliveryID, + &timeAttemptID, &tenantID, &eligibleForRetry, &data, @@ -332,22 +332,22 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { &code, &responseData, &manual, - &attempt, + &attemptNumber, ); err != nil { return nil, fmt.Errorf("scan failed: %w", err) } - results = append(results, deliveryRecordWithTimeID{ - DeliveryRecord: &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + results = append(results, attemptRecordWithTimeID{ + AttemptRecord: &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: attempt, + AttemptNumber: attemptNumber, Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -362,7 +362,7 @@ func scanDeliveryRecords(rows pgx.Rows) ([]deliveryRecordWithTimeID, error) { Metadata: metadata, }, }, - TimeDeliveryID: timeDeliveryID, + TimeAttemptID: timeAttemptID, }) } @@ -391,7 +391,7 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe FROM events e WHERE ($1::text = '' OR e.tenant_id = $1) AND e.id = $2 AND EXISTS ( - SELECT 1 FROM event_delivery_index idx + SELECT 1 FROM event_attempt_index idx WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.event_id = $2 AND idx.destination_id = $3 )` args = []any{req.TenantID, req.EventID, req.DestinationID} @@ -434,38 +434,38 @@ func (s *logStore) RetrieveEvent(ctx context.Context, req driver.RetrieveEventRe return event, nil } -func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeliveryRequest) (*driver.DeliveryRecord, error) { +func (s *logStore) RetrieveAttempt(ctx context.Context, req driver.RetrieveAttemptRequest) (*driver.AttemptRecord, error) { query := ` SELECT idx.event_id, - idx.delivery_id, + idx.attempt_id, idx.destination_id, idx.event_time, - idx.delivery_time, + idx.attempt_time, idx.topic, idx.status, e.tenant_id, e.eligible_for_retry, e.data, e.metadata, - d.code, - d.response_data, + a.code, + a.response_data, idx.manual, - idx.attempt - FROM event_delivery_index idx + idx.attempt_number + FROM event_attempt_index idx JOIN events e ON e.id = idx.event_id AND e.time = idx.event_time - JOIN deliveries d ON d.id = idx.delivery_id AND d.time = idx.delivery_time - WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.delivery_id = $2 + JOIN attempts a ON a.id = idx.attempt_id AND a.time = idx.attempt_time + WHERE ($1::text = '' OR idx.tenant_id = $1) AND idx.attempt_id = $2 LIMIT 1` - row := s.db.QueryRow(ctx, query, req.TenantID, req.DeliveryID) + row := s.db.QueryRow(ctx, query, req.TenantID, req.AttemptID) var ( eventID string - deliveryID string + attemptID string destinationID string eventTime time.Time - deliveryTime time.Time + attemptTime time.Time topic string status string tenantID string @@ -475,15 +475,15 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli code string responseData map[string]any manual bool - attempt int + attemptNumber int ) err := row.Scan( &eventID, - &deliveryID, + &attemptID, &destinationID, &eventTime, - &deliveryTime, + &attemptTime, &topic, &status, &tenantID, @@ -493,7 +493,7 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli &code, &responseData, &manual, - &attempt, + &attemptNumber, ) if err == pgx.ErrNoRows { return nil, nil @@ -502,16 +502,16 @@ func (s *logStore) RetrieveDelivery(ctx context.Context, req driver.RetrieveDeli return nil, fmt.Errorf("scan failed: %w", err) } - return &driver.DeliveryRecord{ - Delivery: &models.Delivery{ - ID: deliveryID, + return &driver.AttemptRecord{ + Attempt: &models.Attempt{ + ID: attemptID, TenantID: tenantID, EventID: eventID, DestinationID: destinationID, - Attempt: attempt, + AttemptNumber: attemptNumber, Manual: manual, Status: status, - Time: deliveryTime, + Time: attemptTime, Code: code, ResponseData: responseData, }, @@ -543,10 +543,10 @@ func (s *logStore) InsertMany(ctx context.Context, entries []*models.LogEntry) e events = append(events, e) } - // Extract deliveries - deliveries := make([]*models.Delivery, 0, len(entries)) + // Extract attempts + attempts := make([]*models.Attempt, 0, len(entries)) for _, entry := range entries { - deliveries = append(deliveries, entry.Delivery) + attempts = append(attempts, entry.Attempt) } tx, err := s.db.Begin(ctx) @@ -566,41 +566,41 @@ func (s *logStore) InsertMany(ctx context.Context, entries []*models.LogEntry) e } } - if len(deliveries) > 0 { + if len(attempts) > 0 { _, err = tx.Exec(ctx, ` - INSERT INTO deliveries (id, event_id, destination_id, status, time, code, response_data, manual, attempt) + INSERT INTO attempts (id, event_id, destination_id, status, time, code, response_data, manual, attempt_number) SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) ON CONFLICT (time, id) DO UPDATE SET status = EXCLUDED.status, code = EXCLUDED.code, response_data = EXCLUDED.response_data - `, deliveryArrays(deliveries)...) + `, attemptArrays(attempts)...) if err != nil { return err } _, err = tx.Exec(ctx, ` - INSERT INTO event_delivery_index ( - event_id, delivery_id, tenant_id, destination_id, - event_time, delivery_time, topic, status, manual, attempt + INSERT INTO event_attempt_index ( + event_id, attempt_id, tenant_id, destination_id, + event_time, attempt_time, topic, status, manual, attempt_number ) SELECT - d.event_id, - d.id, + a.event_id, + a.id, e.tenant_id, - d.destination_id, + a.destination_id, e.time, - d.time, + a.time, e.topic, - d.status, - d.manual, - d.attempt + a.status, + a.manual, + a.attempt_number FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::timestamptz[], $6::text[], $7::jsonb[], $8::boolean[], $9::integer[]) - AS d(id, event_id, destination_id, status, time, code, response_data, manual, attempt) - JOIN events e ON e.id = d.event_id - ON CONFLICT (delivery_time, event_id, delivery_id) DO UPDATE SET + AS a(id, event_id, destination_id, status, time, code, response_data, manual, attempt_number) + JOIN events e ON e.id = a.event_id + ON CONFLICT (attempt_time, event_id, attempt_id) DO UPDATE SET status = EXCLUDED.status - `, deliveryArrays(deliveries)...) + `, attemptArrays(attempts)...) if err != nil { return err } @@ -642,27 +642,27 @@ func eventArrays(events []*models.Event) []any { } } -func deliveryArrays(deliveries []*models.Delivery) []any { - ids := make([]string, len(deliveries)) - eventIDs := make([]string, len(deliveries)) - destinationIDs := make([]string, len(deliveries)) - statuses := make([]string, len(deliveries)) - times := make([]time.Time, len(deliveries)) - codes := make([]string, len(deliveries)) - responseDatas := make([]map[string]any, len(deliveries)) - manuals := make([]bool, len(deliveries)) - attempts := make([]int, len(deliveries)) - - for i, d := range deliveries { - ids[i] = d.ID - eventIDs[i] = d.EventID - destinationIDs[i] = d.DestinationID - statuses[i] = d.Status - times[i] = d.Time - codes[i] = d.Code - responseDatas[i] = d.ResponseData - manuals[i] = d.Manual - attempts[i] = d.Attempt +func attemptArrays(attempts []*models.Attempt) []any { + ids := make([]string, len(attempts)) + eventIDs := make([]string, len(attempts)) + destinationIDs := make([]string, len(attempts)) + statuses := make([]string, len(attempts)) + times := make([]time.Time, len(attempts)) + codes := make([]string, len(attempts)) + responseDatas := make([]map[string]any, len(attempts)) + manuals := make([]bool, len(attempts)) + attemptNumbers := make([]int, len(attempts)) + + for i, a := range attempts { + ids[i] = a.ID + eventIDs[i] = a.EventID + destinationIDs[i] = a.DestinationID + statuses[i] = a.Status + times[i] = a.Time + codes[i] = a.Code + responseDatas[i] = a.ResponseData + manuals[i] = a.Manual + attemptNumbers[i] = a.AttemptNumber } return []any{ @@ -674,6 +674,6 @@ func deliveryArrays(deliveries []*models.Delivery) []any { codes, responseDatas, manuals, - attempts, + attemptNumbers, } } diff --git a/internal/migrator/migrations/clickhouse/000001_init.down.sql b/internal/migrator/migrations/clickhouse/000001_init.down.sql index d0465d4e..70b20422 100644 --- a/internal/migrator/migrations/clickhouse/000001_init.down.sql +++ b/internal/migrator/migrations/clickhouse/000001_init.down.sql @@ -1,2 +1,2 @@ -DROP TABLE IF EXISTS {deployment_prefix}deliveries; +DROP TABLE IF EXISTS {deployment_prefix}attempts; DROP TABLE IF EXISTS {deployment_prefix}events; diff --git a/internal/migrator/migrations/clickhouse/000001_init.up.sql b/internal/migrator/migrations/clickhouse/000001_init.up.sql index 1e44f64f..ac077085 100644 --- a/internal/migrator/migrations/clickhouse/000001_init.up.sql +++ b/internal/migrator/migrations/clickhouse/000001_init.up.sql @@ -21,11 +21,11 @@ CREATE TABLE IF NOT EXISTS {deployment_prefix}events ( PARTITION BY toYYYYMM(event_time) ORDER BY (event_time, event_id); --- Deliveries table for delivery queries --- Each row represents a delivery attempt for an event +-- Attempts table for attempt queries +-- Each row represents an attempt for an event -- Stateless queries: no GROUP BY, no aggregation, direct row access -CREATE TABLE IF NOT EXISTS {deployment_prefix}deliveries ( +CREATE TABLE IF NOT EXISTS {deployment_prefix}attempts ( -- Event fields event_id String, tenant_id String, @@ -36,22 +36,22 @@ CREATE TABLE IF NOT EXISTS {deployment_prefix}deliveries ( metadata String, -- JSON serialized data String, -- JSON serialized - -- Delivery fields - delivery_id String, + -- Attempt fields + attempt_id String, status String, -- 'success', 'failed' - delivery_time DateTime64(3), + attempt_time DateTime64(3), code String, response_data String, -- JSON serialized manual Bool DEFAULT false, - attempt UInt32 DEFAULT 0, + attempt_number UInt32 DEFAULT 0, -- Indexes for filtering (bloom filters help skip granules) INDEX idx_tenant_id tenant_id TYPE bloom_filter GRANULARITY 1, INDEX idx_destination_id destination_id TYPE bloom_filter GRANULARITY 1, INDEX idx_event_id event_id TYPE bloom_filter GRANULARITY 1, - INDEX idx_delivery_id delivery_id TYPE bloom_filter GRANULARITY 1, + INDEX idx_attempt_id attempt_id TYPE bloom_filter GRANULARITY 1, INDEX idx_topic topic TYPE bloom_filter GRANULARITY 1, INDEX idx_status status TYPE set(100) GRANULARITY 1 ) ENGINE = ReplacingMergeTree -PARTITION BY toYYYYMM(delivery_time) -ORDER BY (delivery_time, delivery_id); +PARTITION BY toYYYYMM(attempt_time) +ORDER BY (attempt_time, attempt_id); diff --git a/internal/migrator/migrations/postgres/000001_init.down.sql b/internal/migrator/migrations/postgres/000001_init.down.sql index f55d529e..e0dca179 100644 --- a/internal/migrator/migrations/postgres/000001_init.down.sql +++ b/internal/migrator/migrations/postgres/000001_init.down.sql @@ -3,4 +3,4 @@ BEGIN; DROP TABLE IF EXISTS events CASCADE; DROP TABLE IF EXISTS deliveries CASCADE; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000001_init.up.sql b/internal/migrator/migrations/postgres/000001_init.up.sql index f9e5234c..884dc93b 100644 --- a/internal/migrator/migrations/postgres/000001_init.up.sql +++ b/internal/migrator/migrations/postgres/000001_init.up.sql @@ -43,4 +43,4 @@ CREATE TABLE deliveries_default PARTITION OF deliveries DEFAULT; CREATE INDEX ON deliveries (event_id); CREATE INDEX ON deliveries (event_id, status); -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000002_delivery_response.down.sql b/internal/migrator/migrations/postgres/000002_delivery_response.down.sql index 0ecc7d93..4699a411 100644 --- a/internal/migrator/migrations/postgres/000002_delivery_response.down.sql +++ b/internal/migrator/migrations/postgres/000002_delivery_response.down.sql @@ -3,4 +3,4 @@ BEGIN; ALTER TABLE deliveries DROP COLUMN code, DROP COLUMN response_data; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000002_delivery_response.up.sql b/internal/migrator/migrations/postgres/000002_delivery_response.up.sql index 24e0d728..7a224676 100644 --- a/internal/migrator/migrations/postgres/000002_delivery_response.up.sql +++ b/internal/migrator/migrations/postgres/000002_delivery_response.up.sql @@ -4,4 +4,4 @@ ALTER TABLE deliveries ADD COLUMN code TEXT, ADD COLUMN response_data JSONB; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql b/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql index 93623032..2218227c 100644 --- a/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql +++ b/internal/migrator/migrations/postgres/000003_event_delivery_index.down.sql @@ -3,4 +3,4 @@ BEGIN; DROP TABLE IF EXISTS event_delivery_index_default; DROP TABLE IF EXISTS event_delivery_index CASCADE; -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql b/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql index 23fc772f..bba2ad01 100644 --- a/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql +++ b/internal/migrator/migrations/postgres/000003_event_delivery_index.up.sql @@ -50,4 +50,4 @@ CREATE INDEX IF NOT EXISTS idx_event_delivery_index_main ON event_delivery_index time_delivery_id ); -COMMIT; \ No newline at end of file +COMMIT; diff --git a/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql new file mode 100644 index 00000000..21f8575d --- /dev/null +++ b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.down.sql @@ -0,0 +1,48 @@ +BEGIN; + +-- Drop new index and restore old one +DROP INDEX IF EXISTS idx_event_attempt_index_main; + +-- Restore generated column with old name +ALTER TABLE event_attempt_index DROP COLUMN time_attempt_id; +ALTER TABLE event_attempt_index ADD COLUMN time_delivery_id text GENERATED ALWAYS AS ( + LPAD( + CAST( + EXTRACT( + EPOCH + FROM attempt_time AT TIME ZONE 'UTC' + ) AS BIGINT + )::text, + 10, + '0' + ) || '_' || attempt_id +) STORED; + +-- Rename columns back in event_attempt_index +ALTER TABLE event_attempt_index RENAME COLUMN attempt_number TO attempt; +ALTER TABLE event_attempt_index RENAME COLUMN attempt_time TO delivery_time; +ALTER TABLE event_attempt_index RENAME COLUMN attempt_id TO delivery_id; + +-- Rename tables back +ALTER TABLE event_attempt_index RENAME TO event_delivery_index; +ALTER TABLE event_attempt_index_default RENAME TO event_delivery_index_default; + +-- Rename column back in attempts: attempt_number -> attempt +ALTER TABLE attempts RENAME COLUMN attempt_number TO attempt; + +ALTER TABLE attempts RENAME TO deliveries; +ALTER TABLE attempts_default RENAME TO deliveries_default; + +-- Recreate old index +CREATE INDEX IF NOT EXISTS idx_event_delivery_index_main ON event_delivery_index( + tenant_id, + destination_id, + topic, + status, + event_time DESC, + delivery_time DESC, + time_event_id, + time_delivery_id +); + +COMMIT; diff --git a/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql new file mode 100644 index 00000000..ea143623 --- /dev/null +++ b/internal/migrator/migrations/postgres/000005_rename_delivery_to_attempt.up.sql @@ -0,0 +1,47 @@ +BEGIN; + +-- Rename deliveries table to attempts +ALTER TABLE deliveries RENAME TO attempts; +ALTER TABLE deliveries_default RENAME TO attempts_default; + +-- Rename column in attempts: attempt -> attempt_number +ALTER TABLE attempts RENAME COLUMN attempt TO attempt_number; + +-- Rename event_delivery_index table to event_attempt_index +ALTER TABLE event_delivery_index RENAME TO event_attempt_index; +ALTER TABLE event_delivery_index_default RENAME TO event_attempt_index_default; + +-- Rename columns in event_attempt_index +ALTER TABLE event_attempt_index RENAME COLUMN delivery_id TO attempt_id; +ALTER TABLE event_attempt_index RENAME COLUMN delivery_time TO attempt_time; +ALTER TABLE event_attempt_index RENAME COLUMN attempt TO attempt_number; + +-- Drop and recreate generated column with new name +ALTER TABLE event_attempt_index DROP COLUMN time_delivery_id; +ALTER TABLE event_attempt_index ADD COLUMN time_attempt_id text GENERATED ALWAYS AS ( + LPAD( + CAST( + EXTRACT( + EPOCH + FROM attempt_time AT TIME ZONE 'UTC' + ) AS BIGINT + )::text, + 10, + '0' + ) || '_' || attempt_id +) STORED; + +-- Drop old index and create new one with updated column names +DROP INDEX IF EXISTS idx_event_delivery_index_main; +CREATE INDEX IF NOT EXISTS idx_event_attempt_index_main ON event_attempt_index( + tenant_id, + destination_id, + topic, + status, + event_time DESC, + attempt_time DESC, + time_event_id, + time_attempt_id +); + +COMMIT; diff --git a/internal/migrator/migrator_test.go b/internal/migrator/migrator_test.go index 51762911..8edeb651 100644 --- a/internal/migrator/migrator_test.go +++ b/internal/migrator/migrator_test.go @@ -342,9 +342,9 @@ func TestMigrator_DeploymentID_TableNaming(t *testing.T) { assert.Equal(t, uint64(1), count, "testdeploy_events table should exist") err = chDB.QueryRow(ctx, "SELECT count() FROM system.tables WHERE database = ? AND name = ?", - chConfig.Database, "testdeploy_deliveries").Scan(&count) + chConfig.Database, "testdeploy_attempts").Scan(&count) require.NoError(t, err) - assert.Equal(t, uint64(1), count, "testdeploy_deliveries table should exist") + assert.Equal(t, uint64(1), count, "testdeploy_attempts table should exist") } // TestMigrator_DeploymentID_Isolation tests that multiple deployments are isolated. @@ -390,8 +390,8 @@ func TestMigrator_DeploymentID_Isolation(t *testing.T) { defer chDB.Close() tables := []string{ - "deploy_a_events", "deploy_a_deliveries", - "deploy_b_events", "deploy_b_deliveries", + "deploy_a_events", "deploy_a_attempts", + "deploy_b_events", "deploy_b_attempts", } for _, table := range tables { var count uint64 @@ -466,9 +466,9 @@ func TestMigrator_NoDeploymentID_DefaultTables(t *testing.T) { assert.Equal(t, uint64(1), count, "events table should exist") err = chDB.QueryRow(ctx, "SELECT count() FROM system.tables WHERE database = ? AND name = ?", - chConfig.Database, "deliveries").Scan(&count) + chConfig.Database, "attempts").Scan(&count) require.NoError(t, err) - assert.Equal(t, uint64(1), count, "deliveries table should exist") + assert.Equal(t, uint64(1), count, "attempts table should exist") } func setupClickHouseConfig(t *testing.T) clickhouse.ClickHouseConfig { diff --git a/internal/models/event.go b/internal/models/event.go index 4d7a68ff..22704c7d 100644 --- a/internal/models/event.go +++ b/internal/models/event.go @@ -128,18 +128,18 @@ func NewManualDeliveryTask(event Event, destinationID string) DeliveryTask { } const ( - DeliveryStatusSuccess = "success" - DeliveryStatusFailed = "failed" + AttemptStatusSuccess = "success" + AttemptStatusFailed = "failed" ) // LogEntry represents a message for the log queue. // -// IMPORTANT: Both Event and Delivery are REQUIRED. The logstore requires both +// IMPORTANT: Both Event and Attempt are REQUIRED. The logstore requires both // to exist for proper data consistency. The logmq consumer validates this // requirement and rejects entries missing either field. type LogEntry struct { - Event *Event `json:"event"` - Delivery *Delivery `json:"delivery"` + Event *Event `json:"event"` + Attempt *Attempt `json:"attempt"` } var _ mqs.IncomingMessage = &LogEntry{} @@ -156,12 +156,12 @@ func (e *LogEntry) ToMessage() (*mqs.Message, error) { return &mqs.Message{Body: data}, nil } -type Delivery struct { +type Attempt struct { ID string `json:"id"` TenantID string `json:"tenant_id"` EventID string `json:"event_id"` DestinationID string `json:"destination_id"` - Attempt int `json:"attempt"` + AttemptNumber int `json:"attempt"` Manual bool `json:"manual"` Status string `json:"status"` Time time.Time `json:"time"` diff --git a/internal/mqinfra/awssqs.go b/internal/mqinfra/awssqs.go index 6c6b0d65..4e990397 100644 --- a/internal/mqinfra/awssqs.go +++ b/internal/mqinfra/awssqs.go @@ -30,7 +30,6 @@ func (infra *infraAWSSQS) Exist(ctx context.Context) (bool, error) { return false, err } - // Check if main queue exists _, err = awsutil.RetrieveQueueURL(ctx, sqsClient, infra.cfg.AWSSQS.Topic) if err != nil { var apiErr smithy.APIError @@ -45,7 +44,6 @@ func (infra *infraAWSSQS) Exist(ctx context.Context) (bool, error) { return false, err } - // Check if DLQ exists dlqName := infra.cfg.AWSSQS.Topic + "-dlq" _, err = awsutil.RetrieveQueueURL(ctx, sqsClient, dlqName) if err != nil { diff --git a/internal/mqinfra/azureservicebus.go b/internal/mqinfra/azureservicebus.go index 5aff2c28..61abd331 100644 --- a/internal/mqinfra/azureservicebus.go +++ b/internal/mqinfra/azureservicebus.go @@ -25,7 +25,6 @@ func (infra *infraAzureServiceBus) Exist(ctx context.Context) (bool, error) { return true, nil } - // Create credential for authentication cred, err := azidentity.NewClientSecretCredential( cfg.TenantID, cfg.ClientID, @@ -36,7 +35,6 @@ func (infra *infraAzureServiceBus) Exist(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to create credential: %w", err) } - // Create clients for topic and subscription management topicClient, err := armservicebus.NewTopicsClient(cfg.SubscriptionID, cred, nil) if err != nil { return false, fmt.Errorf("failed to create topic client: %w", err) @@ -47,7 +45,6 @@ func (infra *infraAzureServiceBus) Exist(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to create subscription client: %w", err) } - // Check if topic exists _, err = topicClient.Get(ctx, cfg.ResourceGroup, cfg.Namespace, cfg.Topic, nil) if err != nil { if isNotFoundError(err) { @@ -56,7 +53,6 @@ func (infra *infraAzureServiceBus) Exist(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to check topic existence: %w", err) } - // Check if subscription exists _, err = subClient.Get(ctx, cfg.ResourceGroup, cfg.Namespace, cfg.Topic, cfg.Subscription, nil) if err != nil { if isNotFoundError(err) { @@ -79,7 +75,6 @@ func (infra *infraAzureServiceBus) Declare(ctx context.Context) error { return nil } - // Create credential for authentication cred, err := azidentity.NewClientSecretCredential( cfg.TenantID, cfg.ClientID, @@ -90,7 +85,6 @@ func (infra *infraAzureServiceBus) Declare(ctx context.Context) error { return fmt.Errorf("failed to create credential: %w", err) } - // Create clients for topic and subscription management topicClient, err := armservicebus.NewTopicsClient(cfg.SubscriptionID, cred, nil) if err != nil { return fmt.Errorf("failed to create topic client: %w", err) @@ -101,7 +95,6 @@ func (infra *infraAzureServiceBus) Declare(ctx context.Context) error { return fmt.Errorf("failed to create subscription client: %w", err) } - // Declare main topic (upsert) topicName := cfg.Topic err = infra.declareTopic(ctx, topicClient, cfg.ResourceGroup, cfg.Namespace, topicName) if err != nil { @@ -158,7 +151,6 @@ func (infra *infraAzureServiceBus) TearDown(ctx context.Context) error { return nil } - // Create credential for authentication cred, err := azidentity.NewClientSecretCredential( cfg.TenantID, cfg.ClientID, @@ -169,7 +161,6 @@ func (infra *infraAzureServiceBus) TearDown(ctx context.Context) error { return fmt.Errorf("failed to create credential: %w", err) } - // Create clients for topic and subscription management topicClient, err := armservicebus.NewTopicsClient(cfg.SubscriptionID, cred, nil) if err != nil { return fmt.Errorf("failed to create topic client: %w", err) @@ -182,13 +173,11 @@ func (infra *infraAzureServiceBus) TearDown(ctx context.Context) error { topicName := cfg.Topic - // Delete main subscription err = infra.deleteSubscription(ctx, subClient, cfg.ResourceGroup, cfg.Namespace, topicName, cfg.Subscription) if err != nil { return fmt.Errorf("failed to delete subscription: %w", err) } - // Delete main topic err = infra.deleteTopic(ctx, topicClient, cfg.ResourceGroup, cfg.Namespace, topicName) if err != nil { return fmt.Errorf("failed to delete topic: %w", err) diff --git a/internal/mqinfra/gcppubsub.go b/internal/mqinfra/gcppubsub.go index 29a238c0..0be4aab9 100644 --- a/internal/mqinfra/gcppubsub.go +++ b/internal/mqinfra/gcppubsub.go @@ -21,20 +21,17 @@ func (infra *infraGCPPubSub) Exist(ctx context.Context) (bool, error) { return false, errors.New("failed assertion: cfg.GCPPubSub != nil") // IMPOSSIBLE } - // Create client options var opts []option.ClientOption if infra.cfg.GCPPubSub.ServiceAccountCredentials != "" { opts = append(opts, option.WithCredentialsJSON([]byte(infra.cfg.GCPPubSub.ServiceAccountCredentials))) } - // Create client client, err := pubsub.NewClient(ctx, infra.cfg.GCPPubSub.ProjectID, opts...) if err != nil { return false, fmt.Errorf("failed to create pubsub client: %w", err) } defer client.Close() - // Check if main topic exists topicID := infra.cfg.GCPPubSub.TopicID topic := client.Topic(topicID) topicExists, err := topic.Exists(ctx) @@ -45,7 +42,6 @@ func (infra *infraGCPPubSub) Exist(ctx context.Context) (bool, error) { return false, nil } - // Check if DLQ topic exists dlqTopicID := topicID + "-dlq" dlqTopic := client.Topic(dlqTopicID) dlqTopicExists, err := dlqTopic.Exists(ctx) @@ -56,7 +52,6 @@ func (infra *infraGCPPubSub) Exist(ctx context.Context) (bool, error) { return false, nil } - // Check if DLQ subscription exists dlqSubID := dlqTopicID + "-sub" dlqSub := client.Subscription(dlqSubID) dlqSubExists, err := dlqSub.Exists(ctx) @@ -67,7 +62,6 @@ func (infra *infraGCPPubSub) Exist(ctx context.Context) (bool, error) { return false, nil } - // Check if main subscription exists subID := infra.cfg.GCPPubSub.SubscriptionID sub := client.Subscription(subID) subExists, err := sub.Exists(ctx) @@ -86,20 +80,17 @@ func (infra *infraGCPPubSub) Declare(ctx context.Context) error { return errors.New("failed assertion: cfg.GCPPubSub != nil") // IMPOSSIBLE } - // Create client options var opts []option.ClientOption if infra.cfg.GCPPubSub.ServiceAccountCredentials != "" { opts = append(opts, option.WithCredentialsJSON([]byte(infra.cfg.GCPPubSub.ServiceAccountCredentials))) } - // Create client client, err := pubsub.NewClient(ctx, infra.cfg.GCPPubSub.ProjectID, opts...) if err != nil { return fmt.Errorf("failed to create pubsub client: %w", err) } defer client.Close() - // Create topic (if not exists) topicID := infra.cfg.GCPPubSub.TopicID topic := client.Topic(topicID) topicExists, err := topic.Exists(ctx) @@ -119,7 +110,6 @@ func (infra *infraGCPPubSub) Declare(ctx context.Context) error { } } - // Create DLQ topic (if not exists) dlqTopicID := topicID + "-dlq" dlqTopic := client.Topic(dlqTopicID) dlqTopicExists, err := dlqTopic.Exists(ctx) @@ -139,7 +129,6 @@ func (infra *infraGCPPubSub) Declare(ctx context.Context) error { } } - // Create DLQ subscription (if not exists) dlqSubID := dlqTopicID + "-sub" dlqSub := client.Subscription(dlqSubID) dlqSubExists, err := dlqSub.Exists(ctx) @@ -212,20 +201,17 @@ func (infra *infraGCPPubSub) TearDown(ctx context.Context) error { return errors.New("failed assertion: cfg.GCPPubSub != nil") // IMPOSSIBLE } - // Create client options var opts []option.ClientOption if infra.cfg.GCPPubSub.ServiceAccountCredentials != "" { opts = append(opts, option.WithCredentialsJSON([]byte(infra.cfg.GCPPubSub.ServiceAccountCredentials))) } - // Create client client, err := pubsub.NewClient(ctx, infra.cfg.GCPPubSub.ProjectID, opts...) if err != nil { return fmt.Errorf("failed to create pubsub client: %w", err) } defer client.Close() - // Delete main subscription subID := infra.cfg.GCPPubSub.SubscriptionID sub := client.Subscription(subID) subExists, err := sub.Exists(ctx) @@ -238,7 +224,6 @@ func (infra *infraGCPPubSub) TearDown(ctx context.Context) error { } } - // Delete DLQ subscription dlqTopicID := infra.cfg.GCPPubSub.TopicID + "-dlq" dlqSubID := dlqTopicID + "-sub" dlqSub := client.Subscription(dlqSubID) @@ -252,7 +237,6 @@ func (infra *infraGCPPubSub) TearDown(ctx context.Context) error { } } - // Delete main topic topicID := infra.cfg.GCPPubSub.TopicID topic := client.Topic(topicID) topicExists, err := topic.Exists(ctx) @@ -265,7 +249,6 @@ func (infra *infraGCPPubSub) TearDown(ctx context.Context) error { } } - // Delete DLQ topic dlqTopic := client.Topic(dlqTopicID) dlqTopicExists, err := dlqTopic.Exists(ctx) if err != nil { diff --git a/internal/mqinfra/rabbitmq.go b/internal/mqinfra/rabbitmq.go index e5463a74..49a86dbc 100644 --- a/internal/mqinfra/rabbitmq.go +++ b/internal/mqinfra/rabbitmq.go @@ -29,7 +29,6 @@ func (infra *infraRabbitMQ) Exist(ctx context.Context) (bool, error) { dlq := infra.cfg.RabbitMQ.Queue + ".dlq" - // Check if exchange exists using passive declare if err := ch.ExchangeDeclarePassive( infra.cfg.RabbitMQ.Exchange, // name "topic", // type @@ -46,7 +45,6 @@ func (infra *infraRabbitMQ) Exist(ctx context.Context) (bool, error) { return false, err } - // Check if main queue exists using passive declare if _, err := ch.QueueDeclarePassive( infra.cfg.RabbitMQ.Queue, // name true, // durable @@ -62,7 +60,6 @@ func (infra *infraRabbitMQ) Exist(ctx context.Context) (bool, error) { return false, err } - // Check if DLQ exists using passive declare if _, err := ch.QueueDeclarePassive( dlq, // name true, // durable @@ -99,7 +96,6 @@ func (infra *infraRabbitMQ) Declare(ctx context.Context) error { dlq := infra.cfg.RabbitMQ.Queue + ".dlq" - // Declare target exchange & queue if err := ch.ExchangeDeclare( infra.cfg.RabbitMQ.Exchange, // name "topic", // type @@ -136,7 +132,6 @@ func (infra *infraRabbitMQ) Declare(ctx context.Context) error { return err } - // Declare dead-letter queue if _, err := ch.QueueDeclare( dlq, // name true, // durable diff --git a/internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx b/internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx similarity index 76% rename from internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx rename to internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx index 8746eaca..b5c15d70 100644 --- a/internal/portal/src/common/RetryDeliveryButton/RetryDeliveryButton.tsx +++ b/internal/portal/src/common/RetryAttemptButton/RetryAttemptButton.tsx @@ -4,8 +4,8 @@ import { ReplayIcon } from "../Icons"; import { showToast } from "../Toast/Toast"; import { ApiContext, formatError } from "../../app"; -interface RetryDeliveryButtonProps { - deliveryId: string; +interface RetryAttemptButtonProps { + attemptId: string; disabled: boolean; loading: boolean; completed: (success: boolean) => void; @@ -13,8 +13,8 @@ interface RetryDeliveryButtonProps { iconLabel?: string; } -const RetryDeliveryButton: React.FC = ({ - deliveryId, +const RetryAttemptButton: React.FC = ({ + attemptId, disabled, loading, completed, @@ -24,12 +24,12 @@ const RetryDeliveryButton: React.FC = ({ const apiClient = useContext(ApiContext); const [retrying, setRetrying] = useState(false); - const retryDelivery = useCallback( + const retryAttempt = useCallback( async (e: MouseEvent) => { e.stopPropagation(); setRetrying(true); try { - await apiClient.fetch(`deliveries/${deliveryId}/retry`, { + await apiClient.fetch(`attempts/${attemptId}/retry`, { method: "POST", }); showToast("success", "Retry successful."); @@ -41,7 +41,7 @@ const RetryDeliveryButton: React.FC = ({ setRetrying(false); }, - [apiClient, deliveryId, completed], + [apiClient, attemptId, completed], ); return ( @@ -49,7 +49,7 @@ const RetryDeliveryButton: React.FC = ({ minimal icon={icon} iconLabel={iconLabel} - onClick={(e) => retryDelivery(e)} + onClick={(e) => retryAttempt(e)} disabled={disabled || retrying} loading={loading || retrying} > @@ -58,4 +58,4 @@ const RetryDeliveryButton: React.FC = ({ ); }; -export default RetryDeliveryButton; +export default RetryAttemptButton; diff --git a/internal/portal/src/scenes/Destination/Destination.tsx b/internal/portal/src/scenes/Destination/Destination.tsx index d6029b90..28f070f7 100644 --- a/internal/portal/src/scenes/Destination/Destination.tsx +++ b/internal/portal/src/scenes/Destination/Destination.tsx @@ -14,19 +14,17 @@ import { } from "../../typings/Destination"; import getLogo from "../../utils/logo"; import DestinationSettings from "./DestinationSettings/DestinationSettings"; -import { DeliveryRoutes } from "./Events/Deliveries"; +import { AttemptRoutes } from "./Events/Attempts"; -// Define the tab interface interface Tab { label: string; path: string; } -// Define available tabs const tabs: Tab[] = [ { label: "Overview", path: "" }, { label: "Settings", path: "/settings" }, - { label: "Deliveries", path: "/deliveries" }, + { label: "Attempts", path: "/attempts" }, ]; const Destination = () => { @@ -133,8 +131,8 @@ const Destination = () => { } /> } + path="/attempts/*" + element={} /> void; + navigateAttempt: (path: string, params?: any) => void; }) => { - const { delivery_id: deliveryId } = useParams(); + const { attempt_id: attemptId } = useParams(); - const { data: delivery } = useSWR( - `deliveries/${deliveryId}?include=event.data,response_data`, + const { data: attempt } = useSWR( + `attempts/${attemptId}?include=event.data,response_data`, ); - if (!delivery) { + if (!attempt) { return
Loading...
; } const event = - typeof delivery.event === "object" ? (delivery.event as EventFull) : null; + typeof attempt.event === "object" ? (attempt.event as EventFull) : null; return (

- {event?.topic || "Delivery"} + {event?.topic || "Attempt"}

- {}} @@ -45,7 +45,7 @@ const DeliveryDetails = ({ icon iconLabel="Close" minimal - onClick={() => navigateDelivery("/")} + onClick={() => navigateAttempt("/")} > @@ -53,30 +53,30 @@ const DeliveryDetails = ({
-
-
+
+
Status
- {delivery.code && ( + {attempt.code && (
Response Code
-
{delivery.code}
+
{attempt.code}
)}
Attempt
-
{delivery.attempt}
+
{attempt.attempt}
{event && (
@@ -87,7 +87,7 @@ const DeliveryDetails = ({
Delivered at
- {new Date(delivery.delivered_at).toLocaleString("en-US", { + {new Date(attempt.delivered_at).toLocaleString("en-US", { year: "numeric", month: "numeric", day: "numeric", @@ -99,10 +99,10 @@ const DeliveryDetails = ({
-
Delivery ID
+
Attempt ID
- {delivery.id} - + {attempt.id} +
{event && ( @@ -118,7 +118,7 @@ const DeliveryDetails = ({
{event?.data && ( -
+

Data

                 {JSON.stringify(event.data, null, 2)}
@@ -127,7 +127,7 @@ const DeliveryDetails = ({
           )}
 
           {event?.metadata && Object.keys(event.metadata).length > 0 && (
-            
+

Metadata

                 {JSON.stringify(event.metadata, null, 2)}
@@ -135,11 +135,11 @@ const DeliveryDetails = ({
             
)} - {delivery.response_data && ( -
+ {attempt.response_data && ( +

Response

-                {JSON.stringify(delivery.response_data, null, 2)}
+                {JSON.stringify(attempt.response_data, null, 2)}
               
)} @@ -149,4 +149,4 @@ const DeliveryDetails = ({ ); }; -export default DeliveryDetails; +export default AttemptDetails; diff --git a/internal/portal/src/scenes/Destination/Events/Deliveries.scss b/internal/portal/src/scenes/Destination/Events/Attempts.scss similarity index 98% rename from internal/portal/src/scenes/Destination/Events/Deliveries.scss rename to internal/portal/src/scenes/Destination/Events/Attempts.scss index fff9146c..5d37d6d2 100644 --- a/internal/portal/src/scenes/Destination/Events/Deliveries.scss +++ b/internal/portal/src/scenes/Destination/Events/Attempts.scss @@ -1,4 +1,4 @@ -.destination-deliveries { +.destination-attempts { margin-top: var(--spacing-5); margin-bottom: var(--spacing-20); @@ -32,7 +32,7 @@ display: grid; min-height: 713px; - .delivery-time-cell { + .attempt-time-cell { text-transform: uppercase; } @@ -117,7 +117,7 @@ } } -.delivery-data { +.attempt-data { height: 100%; box-sizing: border-box; diff --git a/internal/portal/src/scenes/Destination/Events/Deliveries.tsx b/internal/portal/src/scenes/Destination/Events/Attempts.tsx similarity index 77% rename from internal/portal/src/scenes/Destination/Events/Deliveries.tsx rename to internal/portal/src/scenes/Destination/Events/Attempts.tsx index 3cfb8b8b..529a21d1 100644 --- a/internal/portal/src/scenes/Destination/Events/Deliveries.tsx +++ b/internal/portal/src/scenes/Destination/Events/Attempts.tsx @@ -1,9 +1,9 @@ import { useCallback, useMemo, useState } from "react"; import Badge from "../../../common/Badge/Badge"; import Button from "../../../common/Button/Button"; -import "./Deliveries.scss"; +import "./Attempts.scss"; import Table from "../../../common/Table/Table"; -import { DeliveryListResponse, EventSummary } from "../../../typings/Event"; +import { AttemptListResponse, EventSummary } from "../../../typings/Event"; import useSWR from "swr"; import Dropdown from "../../../common/Dropdown/Dropdown"; import { @@ -13,7 +13,7 @@ import { RefreshIcon, NextIcon, } from "../../../common/Icons"; -import RetryDeliveryButton from "../../../common/RetryDeliveryButton/RetryDeliveryButton"; +import RetryAttemptButton from "../../../common/RetryAttemptButton/RetryAttemptButton"; import { Checkbox } from "../../../common/Checkbox/Checkbox"; import { Route, @@ -24,20 +24,20 @@ import { useParams, } from "react-router-dom"; import CONFIGS from "../../../config"; -import DeliveryDetails from "./DeliveryDetails"; +import AttemptDetails from "./AttemptDetails"; -interface DeliveriesProps { +interface AttemptsProps { destination: any; - navigateDelivery: (path: string, state?: any) => void; + navigateAttempt: (path: string, state?: any) => void; } -const Deliveries: React.FC = ({ +const Attempts: React.FC = ({ destination, - navigateDelivery, + navigateAttempt, }) => { const [timeRange, setTimeRange] = useState("24h"); - const { delivery_id: deliveryId } = useParams<{ delivery_id: string }>(); - const { status, topics, pagination, urlSearchParams } = useDeliveryFilter(); + const { attempt_id: attemptId } = useParams<{ attempt_id: string }>(); + const { status, topics, pagination, urlSearchParams } = useAttemptFilter(); const queryUrl = useMemo(() => { const searchParams = new URLSearchParams(urlSearchParams); @@ -70,31 +70,31 @@ const Deliveries: React.FC = ({ searchParams.set("destination_id", destination.id); searchParams.set("include", "event"); - return `deliveries?${searchParams.toString()}`; + return `attempts?${searchParams.toString()}`; }, [destination.id, timeRange, urlSearchParams]); const { - data: deliveriesList, + data: attemptsList, mutate, isValidating, - } = useSWR(queryUrl, { + } = useSWR(queryUrl, { revalidateOnFocus: false, }); const topicsList = CONFIGS.TOPICS.split(","); - const table_rows = deliveriesList?.models - ? deliveriesList.models.map((delivery) => { + const table_rows = attemptsList?.models + ? attemptsList.models.map((attempt) => { const event = - typeof delivery.event === "object" - ? (delivery.event as EventSummary) + typeof attempt.event === "object" + ? (attempt.event as EventSummary) : null; return { - id: delivery.id, - active: delivery.id === (deliveryId || ""), + id: attempt.id, + active: attempt.id === (attemptId || ""), entries: [ - - {new Date(delivery.delivered_at).toLocaleString("en-US", { + + {new Date(attempt.delivered_at).toLocaleString("en-US", { month: "short", day: "numeric", hour: "numeric", @@ -103,13 +103,13 @@ const Deliveries: React.FC = ({ })} , - {delivery.status === "success" ? ( + {attempt.status === "success" ? ( ) : ( )} - { @@ -120,21 +120,21 @@ const Deliveries: React.FC = ({ /> , {event?.topic || "-"}, - {delivery.id}, + {attempt.id}, ], - onClick: () => navigateDelivery(`/${delivery.id}`), + onClick: () => navigateAttempt(`/${attempt.id}`), }; }) : []; return ( -
-
-

- Deliveries{" "} - +
+
+

+ Attempts{" "} +

-
+
} trigger={`Last ${timeRange}`} @@ -230,8 +230,8 @@ const Deliveries: React.FC = ({
= ({ header: "Topic", }, { - header: "Delivery ID", + header: "Attempt ID", }, ]} rows={table_rows} @@ -256,7 +256,7 @@ const Deliveries: React.FC = ({
- {deliveriesList?.models.length ?? 0} deliveries + {attemptsList?.models.length ?? 0} attempts
@@ -264,9 +264,9 @@ const Deliveries: React.FC = ({