Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
* Make authority-storage, request-storage,instance-authority-links dependencies optional [MODINV-1346](https://folio-org.atlassian.net/browse/MODINV-1346)
* Remove LinkUpdateReport sending on successful processing [MODINV-1348](https://folio-org.atlassian.net/browse/MODINV-1348)
* Move consumption of authority DI events from mod-inventory to mod-entities-links [APPAUTHREC-3](https://folio-org.atlassian.net/browse/APPAUTHREC-3)
* Cancelled data import jobs updates SRS records without updating Instance records [MODINV-1353](https://folio-org.atlassian.net/browse/MODINV-1353)

## 21.1.0 2025-03-13
* Update deduplication logic in mod-inventory [MODINV-1151](https://folio-org.atlassian.net/browse/MODINV-1151)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,14 @@

import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;

import static java.lang.String.format;
import static java.util.Objects.isNull;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
import static org.folio.DataImportEventTypes.DI_ERROR;
import static org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING;
import static org.folio.inventory.dataimport.handlers.matching.util.EventHandlingUtil.OKAPI_REQUEST_ID;
import static org.folio.inventory.dataimport.handlers.matching.util.EventHandlingUtil.OKAPI_USER_ID;
import static org.folio.okapi.common.XOkapiHeaders.PERMISSIONS;
Expand All @@ -98,6 +100,9 @@ public class DataImportKafkaHandler implements AsyncRecordHandler<String, String
private static final String RECORD_ID_HEADER = "recordId";
private static final String CHUNK_ID_HEADER = "chunkId";
private static final String USER_ID_HEADER = "userId";
private static final Set<String> CANCELLED_JOB_ALLOWED_EVENTS = Set.of(
DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING.value()
);

private final Vertx vertx;
private final ProfileSnapshotCache profileSnapshotCache;
Expand Down Expand Up @@ -169,7 +174,7 @@ public Future<String> handle(KafkaConsumerRecord<String, String> kafkaRecord) {
String jobExecutionId = eventPayload.getJobExecutionId();
LOGGER.info("Data import event payload has been received with event type: {}, recordId: {} by jobExecution: {} and chunkId: {}", eventPayload.getEventType(), recordId, jobExecutionId, chunkId);

if (cancelledJobsIdCache.contains(eventPayload.getJobExecutionId())) {
if (shouldSkipEventProcessing(eventPayload)) {
LOGGER.info("Skip processing of event, topic: '{}', tenantId: '{}', jobExecutionId: '{}' recordId: '{}' because the job has been cancelled",
kafkaRecord.topic(), eventPayload.getTenant(), eventPayload.getJobExecutionId(), recordId);
return Future.succeededFuture(kafkaRecord.key());
Expand Down Expand Up @@ -221,6 +226,7 @@ private void registerDataImportProcessingHandlers(Storage storage, HttpClient cl
HoldingsPreloader holdingsPreloader = new HoldingsPreloader(ordersPreloaderHelper);
ItemPreloader itemPreloader = new ItemPreloader(ordersPreloaderHelper);
SnapshotService snapshotService = new SnapshotService(client);
PostgresClientFactory postgresClientFactory = new PostgresClientFactory(vertx);

MatchValueLoaderFactory.register(new InstanceLoader(storage, instancePreloader));
MatchValueLoaderFactory.register(new ItemLoader(storage, itemPreloader));
Expand Down Expand Up @@ -248,10 +254,10 @@ private void registerDataImportProcessingHandlers(Storage storage, HttpClient cl
)));

EventManager.registerEventHandler(new MatchAuthorityEventHandler(mappingMetadataCache, consortiumService));
EventManager.registerEventHandler(new CreateItemEventHandler(storage, mappingMetadataCache, new ItemIdStorageService(new EntityIdStorageDaoImpl(new PostgresClientFactory(vertx))), orderHelperService));
EventManager.registerEventHandler(new CreateHoldingEventHandler(storage, mappingMetadataCache, new HoldingsIdStorageService(new EntityIdStorageDaoImpl(new PostgresClientFactory(vertx))), orderHelperService, consortiumService));
EventManager.registerEventHandler(new CreateInstanceEventHandler(storage, precedingSucceedingTitlesHelper, mappingMetadataCache, new InstanceIdStorageService(new EntityIdStorageDaoImpl(new PostgresClientFactory(vertx))), orderHelperService, snapshotService, client));
EventManager.registerEventHandler(new CreateMarcHoldingsEventHandler(storage, mappingMetadataCache, new HoldingsIdStorageService(new EntityIdStorageDaoImpl(new PostgresClientFactory(vertx))), new HoldingsCollectionService(), consortiumService));
EventManager.registerEventHandler(new CreateItemEventHandler(storage, mappingMetadataCache, new ItemIdStorageService(new EntityIdStorageDaoImpl(postgresClientFactory)), orderHelperService));
EventManager.registerEventHandler(new CreateHoldingEventHandler(storage, mappingMetadataCache, new HoldingsIdStorageService(new EntityIdStorageDaoImpl(postgresClientFactory)), orderHelperService, consortiumService));
EventManager.registerEventHandler(new CreateInstanceEventHandler(storage, precedingSucceedingTitlesHelper, mappingMetadataCache, new InstanceIdStorageService(new EntityIdStorageDaoImpl(postgresClientFactory)), orderHelperService, snapshotService, client));
EventManager.registerEventHandler(new CreateMarcHoldingsEventHandler(storage, mappingMetadataCache, new HoldingsIdStorageService(new EntityIdStorageDaoImpl(postgresClientFactory)), new HoldingsCollectionService(), consortiumService));
EventManager.registerEventHandler(new UpdateMarcHoldingsEventHandler(storage, mappingMetadataCache, new KafkaEventPublisher(kafkaConfig, vertx, 100)));
EventManager.registerEventHandler(new UpdateItemEventHandler(storage, mappingMetadataCache));
EventManager.registerEventHandler(new UpdateHoldingEventHandler(storage, mappingMetadataCache));
Expand All @@ -260,6 +266,11 @@ private void registerDataImportProcessingHandlers(Storage storage, HttpClient cl
EventManager.registerEventHandler(new MarcBibModifyEventHandler(mappingMetadataCache, new InstanceUpdateDelegate(storage), precedingSucceedingTitlesHelper, client));
}

private boolean shouldSkipEventProcessing(DataImportEventPayload eventPayload) {
return cancelledJobsIdCache.contains(eventPayload.getJobExecutionId())
&& !CANCELLED_JOB_ALLOWED_EVENTS.contains(eventPayload.getEventType());
}

private void populateWithPermissionsHeader(DataImportEventPayload eventPayload, Map<String, String> headersMap) {
String permissions = headersMap.getOrDefault(PERMISSIONS, headersMap.get(PERMISSIONS.toLowerCase()));
if (isNotBlank(permissions)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@
import static com.github.tomakehurst.wiremock.client.WireMock.get;
import static org.folio.ActionProfile.Action.CREATE;
import static org.folio.DataImportEventTypes.DI_INCOMING_MARC_BIB_RECORD_PARSED;
import static org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MATCHED;
import static org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING;
import static org.folio.inventory.dataimport.consumers.DataImportKafkaHandler.PROFILE_SNAPSHOT_ID_KEY;
import static org.folio.okapi.common.XOkapiHeaders.PERMISSIONS;
import static org.folio.rest.jaxrs.model.EntityType.INSTANCE;
Expand Down Expand Up @@ -185,7 +187,7 @@ public void shouldReturnFailedFutureWhenProcessingCoreHandlerFailed(TestContext
DataImportEventPayload dataImportEventPayload = new DataImportEventPayload()
.withJobExecutionId(UUID.randomUUID().toString())
.withEventType(DI_INCOMING_MARC_BIB_RECORD_PARSED.value())
.withTenant("diku")
.withTenant(TENANT_ID)
.withOkapiUrl(mockServer.baseUrl())
.withToken("test-token")
.withContext(new HashMap<>(Map.of("JOB_PROFILE_SNAPSHOT_ID", profileSnapshotWrapper.getId())));
Expand Down Expand Up @@ -216,6 +218,7 @@ public void shouldReturnSucceededFutureAndSkipEventProcessingIfEventPayloadConta

DataImportEventPayload dataImportEventPayload = new DataImportEventPayload()
.withJobExecutionId(cancelledJobId)
.withEventType(DI_INCOMING_MARC_BIB_RECORD_PARSED.value())
.withTenant(TENANT_ID)
.withOkapiUrl(mockServer.baseUrl())
.withContext(new HashMap<>(Map.of(PROFILE_SNAPSHOT_ID_KEY, profileSnapshotWrapper.getId())));
Expand Down Expand Up @@ -246,4 +249,47 @@ public void shouldReturnSucceededFutureAndSkipEventProcessingIfEventPayloadConta
}));
}

@Test
public void shouldProcessEventIfEventPayloadContainsCancelledJobExecutionIdButEventTypeIsDiSrsMarcBibRecordModifiedReadyForPostProcessing(TestContext context) {
// given
String expectedKafkaRecordKey = "test_key";
String cancelledJobId = UUID.randomUUID().toString();
cancelledJobsIdCache.put(cancelledJobId);

DataImportEventPayload dataImportEventPayload = new DataImportEventPayload()
.withEventType(DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING.value())
.withJobExecutionId(cancelledJobId)
.withTenant(TENANT_ID)
.withOkapiUrl(mockServer.baseUrl())
.withContext(new HashMap<>(Map.of(PROFILE_SNAPSHOT_ID_KEY, profileSnapshotWrapper.getId())))
.withEventsChain(List.of(DI_SRS_MARC_BIB_RECORD_MATCHED.value()));
context.assertEquals(
DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING.value(), dataImportEventPayload.getEventType());

Event event = new Event().withId("01").withEventPayload(Json.encode(dataImportEventPayload));
List<KafkaHeader> headers = List.of(
KafkaHeader.header(RECORD_ID_HEADER, UUID.randomUUID().toString()),
KafkaHeader.header(CHUNK_ID_HEADER, UUID.randomUUID().toString())
);
when(kafkaRecord.key()).thenReturn(expectedKafkaRecordKey);
when(kafkaRecord.value()).thenReturn(Json.encode(event));
when(kafkaRecord.headers()).thenReturn(headers);

EventHandler mockedEventHandler = mock(EventHandler.class);
when(mockedEventHandler.isEligible(any(DataImportEventPayload.class))).thenReturn(true);
when(mockedEventHandler.handle(any(DataImportEventPayload.class)))
.thenReturn(CompletableFuture.completedFuture(dataImportEventPayload));
EventManager.registerEventHandler(mockedEventHandler);

// when
Future<String> future = dataImportKafkaHandler.handle(kafkaRecord);

// then
future.onComplete(context.asyncAssertSuccess(actualKafkaRecordKey -> {
context.assertEquals(expectedKafkaRecordKey, actualKafkaRecordKey);
verify(mockedEventHandler).isEligible(any(DataImportEventPayload.class));
verify(mockedEventHandler).handle(any(DataImportEventPayload.class));
}));
}

}