diff --git a/assembly/dependencies-apache-ignite-slim.xml b/assembly/dependencies-apache-ignite-slim.xml index e98695c8a0c85..6d222c7d0446e 100644 --- a/assembly/dependencies-apache-ignite-slim.xml +++ b/assembly/dependencies-apache-ignite-slim.xml @@ -156,6 +156,7 @@ org.apache.ignite:ignite-ml-h2o-model-parser org.apache.ignite:ignite-ml-spark-model-parser org.apache.ignite:ignite-ml-xgboost-model-parser + org.apache.ignite:ignite-ml-catboost-model-parser org.apache.ignite:ignite-osgi org.apache.ignite:ignite-osgi-karaf org.apache.ignite:ignite-osgi-paxlogging diff --git a/docs/_docs/index.adoc b/docs/_docs/index.adoc index 2a3ceb66fe0c3..73686d8a20069 100644 --- a/docs/_docs/index.adoc +++ b/docs/_docs/index.adoc @@ -19,7 +19,7 @@ applications that can process terabytes of data with in-memory speed. Ignite documentation introduces you to the project's main capabilities, shows how to use certain features, or how to approach cluster optimizations and issues troubleshooting. If you are new to Ignite, then start with the -link:docs/latest/quick-start/java[Quick Start Guides], and build the first application in a matter of 5-10 minutes. +link:quick-start/java[Quick Start Guides], and build the first application in a matter of 5-10 minutes. Otherwise, select the topic of your interest and have your problems solved, and questions answered. Good luck with your Ignite journey! diff --git a/examples/pom-standalone-lgpl.xml b/examples/pom-standalone-lgpl.xml index 6d2fe4142285c..a21776df7fac0 100644 --- a/examples/pom-standalone-lgpl.xml +++ b/examples/pom-standalone-lgpl.xml @@ -110,6 +110,12 @@ to_be_replaced_by_ignite_version + + org.apache.ignite + ignite-ml-catboost-model-parser + to_be_replaced_by_ignite_version + + org.apache.ignite ignite-ml-spark-model-parser diff --git a/examples/pom-standalone.xml b/examples/pom-standalone.xml index ca1d0972d8c98..ccce3554c072c 100644 --- a/examples/pom-standalone.xml +++ b/examples/pom-standalone.xml @@ -110,6 +110,12 @@ to_be_replaced_by_ignite_version + + org.apache.ignite + ignite-ml-catboost-model-parser + to_be_replaced_by_ignite_version + + org.apache.ignite ignite-ml-spark-model-parser diff --git a/examples/pom.xml b/examples/pom.xml index 25a5b87852a8b..62087eda3c521 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -120,6 +120,12 @@ ${project.version} + + org.apache.ignite + ignite-ml-catboost-model-parser + ${project.version} + + org.apache.ignite ignite-ml-h2o-model-parser diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostClassificationModelParserExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostClassificationModelParserExample.java new file mode 100644 index 0000000000000..e6f9f657a8a18 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostClassificationModelParserExample.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.catboost; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.HashMap; +import java.util.Scanner; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.ml.catboost.CatboostClassificationModelParser; +import org.apache.ignite.ml.inference.Model; +import org.apache.ignite.ml.inference.builder.AsyncModelBuilder; +import org.apache.ignite.ml.inference.builder.IgniteDistributedModelBuilder; +import org.apache.ignite.ml.inference.reader.FileSystemModelReader; +import org.apache.ignite.ml.inference.reader.ModelReader; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; + +/** + * This example demonstrates how to import Catboost model and use imported model for distributed inference in Apache + * Ignite. + */ +public class CatboostClassificationModelParserExample { + /** + * Test model resource name. + */ + private static final String TEST_MODEL_RES = "examples/src/main/resources/models/catboost/model_clf.cbm"; + + /** + * Test data. + */ + private static final String TEST_DATA_RES = "examples/src/main/resources/datasets/amazon-employee-access-challenge-sample.csv"; + + /** + * Test expected results. + */ + private static final String TEST_ER_RES = "examples/src/main/resources/datasets/amazon-employee-access-challenge-sample-catboost-expected-results.csv"; + + /** + * Parser. + */ + private static final CatboostClassificationModelParser parser = new CatboostClassificationModelParser(); + + /** + * Run example. + */ + public static void main(String... args) throws ExecutionException, InterruptedException, + FileNotFoundException { + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + File mdlRsrc = IgniteUtils.resolveIgnitePath(TEST_MODEL_RES); + if (mdlRsrc == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_MODEL_RES + "]"); + + ModelReader reader = new FileSystemModelReader(mdlRsrc.getPath()); + + AsyncModelBuilder mdlBuilder = new IgniteDistributedModelBuilder(ignite, 4, 4); + + File testData = IgniteUtils.resolveIgnitePath(TEST_DATA_RES); + if (testData == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_DATA_RES + "]"); + + File testExpRes = IgniteUtils.resolveIgnitePath(TEST_ER_RES); + if (testExpRes == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_ER_RES + "]"); + + try (Model> mdl = mdlBuilder.build(reader, parser); + Scanner testDataScanner = new Scanner(testData); + Scanner testExpResultsScanner = new Scanner(testExpRes)) { + String header = testDataScanner.nextLine(); + String[] columns = header.split(","); + + while (testDataScanner.hasNextLine()) { + String testDataStr = testDataScanner.nextLine(); + String testExpResultsStr = testExpResultsScanner.nextLine(); + + HashMap testObj = new HashMap<>(); + String[] values = testDataStr.split(","); + + for (int i = 0; i < columns.length; i++) { + testObj.put(columns[i], Double.valueOf(values[i])); + } + + double prediction = mdl.predict(VectorUtils.of(testObj)).get(); + double expPrediction = Double.parseDouble(testExpResultsStr); + + System.out.println("Expected: " + expPrediction + ", prediction: " + prediction); + } + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostRegressionModelParserExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostRegressionModelParserExample.java new file mode 100644 index 0000000000000..3e5e25850afeb --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/CatboostRegressionModelParserExample.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.inference.catboost; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.HashMap; +import java.util.Scanner; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.ml.catboost.CatboostRegressionModelParser; +import org.apache.ignite.ml.inference.Model; +import org.apache.ignite.ml.inference.builder.AsyncModelBuilder; +import org.apache.ignite.ml.inference.builder.IgniteDistributedModelBuilder; +import org.apache.ignite.ml.inference.reader.FileSystemModelReader; +import org.apache.ignite.ml.inference.reader.ModelReader; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; + +/** + * This example demonstrates how to import Catboost model and use imported model for distributed inference in Apache + * Ignite. + */ +public class CatboostRegressionModelParserExample { + /** + * Test model resource name. + * */ + private static final String TEST_MODEL_RES = "examples/src/main/resources/models/catboost/model_reg.cbm"; + + /** + * Test data. + */ + private static final String TEST_DATA_RES = "examples/src/main/resources/datasets/boston_housing_dataset.txt"; + + /** + * Test expected results. + */ + private static final String TEST_ER_RES = "examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results.txt"; + + /** + * Parser. + */ + private static final CatboostRegressionModelParser parser = new CatboostRegressionModelParser(); + + /** + * Run example. + */ + public static void main(String... args) throws ExecutionException, InterruptedException, + FileNotFoundException { + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + File mdlRsrc = IgniteUtils.resolveIgnitePath(TEST_MODEL_RES); + if (mdlRsrc == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_MODEL_RES + "]"); + + ModelReader reader = new FileSystemModelReader(mdlRsrc.getPath()); + AsyncModelBuilder mdlBuilder = new IgniteDistributedModelBuilder(ignite, 4, 4); + + File testData = IgniteUtils.resolveIgnitePath(TEST_DATA_RES); + if (testData == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_DATA_RES + "]"); + + File testExpRes = IgniteUtils.resolveIgnitePath(TEST_ER_RES); + if (testExpRes == null) + throw new IllegalArgumentException("File not found [resource_path=" + TEST_ER_RES + "]"); + + try (Model> mdl = mdlBuilder.build(reader, parser); + Scanner testDataScanner = new Scanner(testData); + Scanner testExpResultsScanner = new Scanner(testExpRes)) { + String[] columns = new String[]{ + "f_0", + "f_1", + "f_2", + "f_3", + "f_4", + "f_5", + "f_6", + "f_7", + "f_8", + "f_9", + "f_10", + "f_11", + "f_12", + }; + + while (testDataScanner.hasNextLine()) { + String testDataStr = testDataScanner.nextLine(); + String testExpResultsStr = testExpResultsScanner.nextLine(); + + HashMap testObj = new HashMap<>(); + String[] values = testDataStr.split(","); + + for (int i = 0; i < columns.length; i++) { + testObj.put(columns[i], Double.valueOf(values[i])); + } + + double prediction = mdl.predict(VectorUtils.of(testObj)).get(); + double expPrediction = Double.parseDouble(testExpResultsStr); + + System.out.println("Expected: " + expPrediction + ", prediction: " + prediction); + } + } + } + finally { + System.out.flush(); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/package-info.java similarity index 80% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java rename to examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/package-info.java index e7cfd411f3755..70bbbd2b8b902 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/InsertLast.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/inference/catboost/package-info.java @@ -15,10 +15,8 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.cache.persistence.tree.util; - /** - * Rows with this marker interface will always be inserted in the very end of the tree. + * XGBoost model inference examples. */ -public interface InsertLast { -} + +package org.apache.ignite.examples.ml.inference.catboost; diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/TargetEncoderExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/TargetEncoderExample.java new file mode 100644 index 0000000000000..e3864b6721d79 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/encoding/TargetEncoderExample.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.preprocessing.encoding; + +import java.io.FileNotFoundException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.examples.ml.util.MLSandboxDatasets; +import org.apache.ignite.examples.ml.util.SandboxMLCache; +import org.apache.ignite.ml.composition.ModelsComposition; +import org.apache.ignite.ml.composition.boosting.GDBTrainer; +import org.apache.ignite.ml.composition.boosting.convergence.median.MedianOfMedianConvergenceCheckerFactory; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.ObjectArrayVectorizer; +import org.apache.ignite.ml.preprocessing.Preprocessor; +import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer; +import org.apache.ignite.ml.preprocessing.encoding.EncoderType; +import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator; +import org.apache.ignite.ml.selection.scoring.metric.classification.Accuracy; +import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer; + +/** + * Example that shows how to use Target Encoder preprocessor to encode labels presented as a mean target value. + *

+ * Code in this example launches Ignite grid and fills the cache with test data (based on mushrooms dataset).

+ *

+ * After that it defines preprocessors that extract features from an upstream data and encode category with avarage + * target value (categories).

+ *

+ * Then, it trains the model based on the processed data using gradient boosing decision tree classification.

+ *

+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.

+ * + *

Daniele Miccii-Barreca (2001). A Preprocessing Scheme for High-Cardinality Categorical + * Attributes in Classification and Prediction Problems. SIGKDD Explor. Newsl. 3, 1. + * From http://dx.doi.org/10.1145/507533.507538

+ */ +public class TargetEncoderExample { + /** + * Run example. + */ + public static void main(String[] args) { + System.out.println(); + System.out.println(">>> Train Gradient Boosing Decision Tree model on amazon-employee-access-challenge_train.csv dataset."); + + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + try { + IgniteCache dataCache = new SandboxMLCache(ignite) + .fillObjectCacheWithCategoricalData(MLSandboxDatasets.AMAZON_EMPLOYEE_ACCESS); + + Set featuresIndexies = new HashSet<>(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9)); + Set targetEncodedfeaturesIndexies = new HashSet<>(Arrays.asList(1, 5, 6)); + Integer targetIndex = 0; + + final Vectorizer vectorizer = new ObjectArrayVectorizer(featuresIndexies.toArray(new Integer[0])) + .labeled(targetIndex); + + Preprocessor strEncoderPreprocessor = new EncoderTrainer() + .withEncoderType(EncoderType.STRING_ENCODER) + .withEncodedFeature(0) + .withEncodedFeatures(featuresIndexies) + .fit(ignite, + dataCache, + vectorizer + ); + + Preprocessor targetEncoderProcessor = new EncoderTrainer() + .withEncoderType(EncoderType.TARGET_ENCODER) + .labeled(0) + .withEncodedFeatures(targetEncodedfeaturesIndexies) + .minSamplesLeaf(1) + .minCategorySize(1L) + .smoothing(1d) + .fit(ignite, + dataCache, + strEncoderPreprocessor + ); + + Preprocessor lbEncoderPreprocessor = new EncoderTrainer() + .withEncoderType(EncoderType.LABEL_ENCODER) + .fit(ignite, + dataCache, + targetEncoderProcessor + ); + + GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.5, 500, 4, 0.) + .withCheckConvergenceStgyFactory(new MedianOfMedianConvergenceCheckerFactory(0.1)); + + // Train model. + ModelsComposition mdl = trainer.fit( + ignite, + dataCache, + lbEncoderPreprocessor + ); + + System.out.println("\n>>> Trained model: " + mdl); + + double accuracy = Evaluator.evaluate( + dataCache, + mdl, + lbEncoderPreprocessor, + new Accuracy() + ); + + System.out.println("\n>>> Accuracy " + accuracy); + System.out.println("\n>>> Test Error " + (1 - accuracy)); + + System.out.println(">>> Train Gradient Boosing Decision Tree model on amazon-employee-access-challenge_train.csv dataset."); + + } + catch (FileNotFoundException e) { + e.printStackTrace(); + } + } + finally { + System.out.flush(); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/util/MLSandboxDatasets.java b/examples/src/main/java/org/apache/ignite/examples/ml/util/MLSandboxDatasets.java index 7021e7dde4587..9f706599af012 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/util/MLSandboxDatasets.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/util/MLSandboxDatasets.java @@ -68,7 +68,10 @@ public enum MLSandboxDatasets { MIXED_DATASET("examples/src/main/resources/datasets/mixed_dataset.csv", true, ","), /** A dataset with categorical features and labels. */ - MUSHROOMS("examples/src/main/resources/datasets/mushrooms.csv", true, ","); + MUSHROOMS("examples/src/main/resources/datasets/mushrooms.csv", true, ","), + + /** A dataset with categorical features and labels. */ + AMAZON_EMPLOYEE_ACCESS("examples/src/main/resources/datasets/amazon-employee-access-challenge_train.csv", true, ","); /** Filename. */ private final String filename; diff --git a/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample-catboost-expected-results.csv b/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample-catboost-expected-results.csv new file mode 100644 index 0000000000000..b5c34f5fcaa01 --- /dev/null +++ b/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample-catboost-expected-results.csv @@ -0,0 +1,4 @@ +0.9928904609329371 +0.9963369818846654 +0.9775200762137463 +0.9491935983699706 diff --git a/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample.csv b/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample.csv new file mode 100644 index 0000000000000..ba86b87e605c5 --- /dev/null +++ b/examples/src/main/resources/datasets/amazon-employee-access-challenge-sample.csv @@ -0,0 +1,5 @@ +RESOURCE,MGR_ID,ROLE_ROLLUP_1,ROLE_ROLLUP_2,ROLE_DEPTNAME,ROLE_TITLE,ROLE_FAMILY_DESC,ROLE_FAMILY,ROLE_CODE +39353,85475,117961,118300,123472,117905,117906,290919,117908 +17183,1540,117961,118343,123125,118536,118536,308574,118539 +36724,14457,118219,118220,117884,117879,267952,19721,117880 +36135,5396,117961,118343,119993,118321,240983,290919,118322 diff --git a/examples/src/main/resources/datasets/amazon-employee-access-challenge_train.csv b/examples/src/main/resources/datasets/amazon-employee-access-challenge_train.csv new file mode 100644 index 0000000000000..ec68fef127eaa --- /dev/null +++ b/examples/src/main/resources/datasets/amazon-employee-access-challenge_train.csv @@ -0,0 +1,100 @@ +ACTION,RESOURCE,MGR_ID,ROLE_ROLLUP_1,ROLE_ROLLUP_2,ROLE_DEPTNAME,ROLE_TITLE,ROLE_FAMILY_DESC,ROLE_FAMILY,ROLE_CODE +1,39353,85475,117961,118300,123472,117905,117906,290919,117908 +1,17183,1540,117961,118343,123125,118536,118536,308574,118539 +1,36724,14457,118219,118220,117884,117879,267952,19721,117880 +1,36135,5396,117961,118343,119993,118321,240983,290919,118322 +1,42680,5905,117929,117930,119569,119323,123932,19793,119325 +0,45333,14561,117951,117952,118008,118568,118568,19721,118570 +1,25993,17227,117961,118343,123476,118980,301534,118295,118982 +1,19666,4209,117961,117969,118910,126820,269034,118638,126822 +1,31246,783,117961,118413,120584,128230,302830,4673,128231 +1,78766,56683,118079,118080,117878,117879,304519,19721,117880 +1,4675,3005,117961,118413,118481,118784,117906,290919,118786 +1,15030,94005,117902,118041,119238,119093,138522,119095,119096 +1,79954,46608,118315,118463,122636,120773,123148,118960,120774 +1,4675,50997,91261,118026,118202,119962,168365,118205,119964 +1,95836,18181,117961,118343,118514,118321,117906,290919,118322 +1,19484,6657,118219,118220,118221,117885,117886,117887,117888 +1,114267,23136,117961,118052,119742,118321,117906,290919,118322 +1,35197,57715,117961,118446,118701,118702,118703,118704,118705 +1,86316,7002,117961,118343,123125,118278,132715,290919,118279 +1,27785,5636,117961,118413,122007,118321,117906,290919,118322 +1,37427,5220,117961,118300,118458,120006,303717,118424,120008 +1,15672,111936,117961,118300,118783,117905,240983,290919,117908 +1,92885,744,117961,118300,119181,118777,279443,308574,118779 +1,1020,85475,117961,118300,120410,118321,117906,290919,118322 +1,4675,7551,117961,118052,118867,118259,117906,290919,118261 +1,41334,28253,118315,118463,123089,118259,128796,290919,118261 +1,77385,14829,117961,118052,119986,117905,117906,290919,117908 +1,20273,11506,118216,118587,118846,179731,128361,117887,117973 +1,78098,46556,118090,118091,117884,118568,165015,19721,118570 +1,79328,4219,117961,118300,120312,120313,144958,118424,120315 +1,23921,4953,117961,118343,119598,120344,310997,118424,120346 +1,34687,815,117961,118300,123719,117905,117906,290919,117908 +1,43452,169112,117902,118041,119781,118563,121024,270488,118565 +1,33248,4929,117961,118300,118825,118826,226343,118424,118828 +1,78282,7445,117961,118343,122299,118054,121350,117887,118055 +1,17183,794,118752,119070,117945,280788,152940,292795,119082 +1,38658,1912,119134,119135,118042,120097,174445,270488,120099 +1,14354,50368,117926,118266,117884,118568,281735,19721,118570 +1,45019,1080,117961,118327,118378,120952,120953,118453,120954 +1,13878,1541,117961,118225,123173,120812,123174,118638,120814 +1,14570,46805,117929,117930,117920,118568,281735,19721,118570 +0,74310,49521,117961,118300,118301,119849,235245,118638,119851 +1,6977,1398,117961,118300,120722,118784,130735,290919,118786 +1,31613,5899,117961,118327,120318,118777,296252,308574,118779 +1,1020,21127,117961,118052,119408,118777,279443,308574,118779 +1,32270,3887,117961,118343,120347,120348,265969,118295,120350 +1,19629,19645,117961,118413,118481,118784,240983,290919,118786 +1,15702,1938,117961,118300,118066,120560,304465,118643,120562 +1,113037,5396,117961,118343,119993,120773,118959,118960,120774 +1,20279,17695,117890,117891,117878,117879,117879,19721,117880 +1,80746,16690,117961,118446,119064,122022,131302,119221,122024 +1,80263,36145,117961,118052,120304,307024,311622,118331,118332 +1,73753,70062,117961,118386,118746,117905,117906,290919,117908 +1,39883,7551,117961,118052,118867,117905,172635,290919,117908 +1,25993,7023,117961,117962,119223,118259,118260,290919,118261 +0,78106,50613,117916,118150,118810,118568,159905,19721,118570 +1,33150,1915,117961,118300,119181,118784,117906,290919,118786 +1,34817,5899,117961,118327,120318,118641,240982,118643,118644 +1,28354,3860,117961,118446,120317,118321,117906,290919,118322 +1,33642,13196,117951,117952,117941,117879,117897,19721,117880 +1,26430,56310,118212,118580,117895,117896,117913,117887,117898 +1,28149,50120,91261,118026,119507,118321,117906,290919,118322 +1,40867,6736,117961,117969,6725,122290,268766,6725,122292 +1,20293,273476,117926,118266,117920,118568,310732,19721,118570 +1,36020,2163,118219,118220,120694,118777,130218,308574,118779 +1,60006,16821,117961,118225,120535,118396,269406,118398,118399 +0,35043,14800,117961,117962,118352,118784,117906,290919,118786 +1,17308,4088,117961,118300,118458,118728,223125,118295,118730 +0,15716,18073,118256,118257,118623,118995,286106,292795,118997 +1,39883,55956,118555,118178,119262,117946,119727,292795,117948 +1,42031,88387,118315,118463,118522,119172,121927,118467,119174 +1,27124,2318,117961,118327,118933,117905,117906,290919,117908 +1,35498,18454,117961,118343,119598,125171,257115,118424,125173 +1,79168,58465,118602,118603,117941,117885,119621,117887,117888 +1,2252,782,117961,118413,127522,118784,240983,290919,118786 +1,45652,7338,117961,118225,119924,118321,118448,290919,118322 +1,23921,4145,117961,118300,120026,307024,303717,118331,118332 +1,95247,50690,118269,118270,117878,118568,118568,19721,118570 +1,78844,15645,117961,118052,122392,128903,160695,292795,128905 +1,19481,10627,118106,118107,119565,179731,155780,117887,117973 +1,18380,44022,117961,117962,122215,127782,130085,290919,127783 +1,37734,58406,117975,117976,117884,117885,117913,117887,117888 +1,3853,17550,117961,118446,118684,118321,117906,290919,118322 +1,278393,7076,117961,118225,120323,119093,136840,119095,119096 +1,35625,6454,117961,118343,118856,117905,240983,290919,117908 +1,35066,17465,91261,118026,118202,118278,118260,290919,118279 +1,3853,5043,117961,118300,118458,120006,310997,118424,120008 +1,41569,16671,117961,118052,118706,118523,310608,118331,118525 +1,25862,46224,117961,118327,118378,120952,143223,118453,120954 +1,75078,45963,117961,118386,118896,122645,309858,119221,122647 +1,1020,1483,117961,117962,118840,118641,306399,118643,118644 +0,22956,3967,117961,118052,118706,118321,117906,290919,118322 +1,20364,2612,117961,118386,123901,117905,117906,290919,117908 +1,28943,7547,117961,118052,118933,118784,213944,290919,118786 +1,75329,17414,118752,119070,118042,118043,151099,270488,118046 +1,41569,70066,91261,118026,118202,117905,117906,290919,117908 +1,4684,50806,117961,118446,119961,118259,118260,290919,118261 +1,77943,4478,117961,118386,118692,118321,117906,290919,118322 +1,38860,15541,118573,118574,118556,280788,127423,292795,119082 diff --git a/examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results.txt b/examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results.txt new file mode 100644 index 0000000000000..70bd4503cfaf9 --- /dev/null +++ b/examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results.txt @@ -0,0 +1,505 @@ +21.164552741740483 +34.44455359262485 +33.94734205787078 +35.358021389142024 +28.217148379945492 +21.979623483476228 +25.179151087820795 +16.663784000265505 +18.6332288900902 +16.057363242333274 +19.620020829117497 +21.425096156547266 +20.18953001360293 +18.541744236595534 +19.905009893658633 +22.817998611044295 +17.503116976482364 +19.602061128432926 +18.714094616880438 +13.043964078144153 +17.95103957809092 +16.11765782736545 +14.64413663576813 +16.185286414626507 +14.496079185487954 +15.881579761736644 +15.048892781075 +18.215320269522508 +20.433786993263364 +12.775459494813123 +15.38727503366468 +13.40734400102275 +13.593646293177692 +13.557122556169663 +20.283611241930156 +20.293302348210815 +21.318776069286322 +23.952502661770687 +30.344785709705167 +35.05826914648418 +27.20049305979657 +24.61178278078263 +24.618991512565287 +21.935606702480257 +20.0536180503364 +20.06581000678366 +17.215167736904057 +14.710548808235021 +19.00364327349821 +20.082422388118232 +20.736569614291824 +25.80355134178083 +22.525808056631174 +18.730933044239823 +35.60300729810213 +24.361817631604794 +31.72993595445449 +23.198333216803814 +19.85002226443056 +18.285418055538244 +16.75095037452558 +22.464570879312504 +24.679784812215896 +32.86400124477411 +24.05478101103431 +19.348380567799698 +20.999621681886243 +18.132191466275003 +20.832009749075805 +24.088971315686862 +21.813085306481824 +22.894053567336112 +23.379104331575054 +24.33053821245845 +22.152640952593423 +20.411872025744643 +21.408623510845107 +20.810299700032573 +20.70531852050972 +27.310483630842196 +23.94836472063911 +24.25239921382196 +22.649113302157275 +23.18983802717364 +26.379810372978937 +21.403543045498985 +23.041037015322697 +24.642863977405163 +29.241700734347805 +22.93261682207133 +22.172194719762174 +23.704440957027128 +25.15136140666791 +20.663894656862876 +27.853780131480843 +21.92962322037711 +38.82011666216642 +43.89667059543811 +32.91042869715988 +26.045561593459762 +26.110476484368686 +18.880196699725307 +19.960637302429078 +20.242191052597644 +18.881321953619217 +18.99356014672636 +20.192418865632042 +20.196003721285983 +19.188430635701017 +22.00423135469518 +22.930827665114776 +18.93940211813681 +18.947288931081346 +19.774110600032568 +18.220372489208046 +21.30392472528196 +19.752774329500976 +19.504171941732174 +19.698920034030287 +22.225853149605744 +20.688122106679526 +20.039462328714063 +16.925492403924604 +18.99852203470001 +21.254047505796265 +16.01625259336251 +15.94864028248406 +17.48642470955056 +15.28896627644686 +19.396660296420013 +19.782549243746097 +21.812708812078647 +17.710152224835046 +15.413100325589951 +17.70340364100517 +17.384525367273696 +17.907392701793928 +13.607239056111437 +16.847521564783612 +14.499023063866698 +14.265860573929002 +14.03589509226626 +15.019842627560848 +12.663891023766888 +14.131570716919317 +16.2461219598252 +14.298816599091582 +16.637092245956154 +15.604086293154985 +20.702527127474973 +19.46740940424256 +15.985108607193087 +18.245044278822125 +17.300417921712594 +15.478312040288447 +13.726513847035916 +40.46153474218855 +24.729832310129535 +23.958445104246024 +27.167020538774082 +49.81376987494329 +49.763899610387476 +49.740265263888446 +21.953542778261728 +24.566058286243084 +50.56979388715128 +22.787064866885373 +22.996984087726574 +22.575360458961253 +18.829906022042753 +19.95614299480929 +22.489287237090124 +24.31344455301194 +22.853874878872183 +29.231831811224854 +22.8922437114595 +24.120043903186193 +29.23591807260737 +37.73932876923115 +40.324333387355985 +34.67045200524914 +37.583641702156974 +32.247707746472045 +26.258909528694033 +29.340846958558156 +49.12469209055186 +31.02349742520599 +30.187382160082088 +35.02371794277118 +35.646983341238524 +30.553148093421097 +36.941046577918655 +30.593813975152443 +29.054323721705735 +49.615315903587565 +34.353585404168356 +30.36973215187362 +33.77575025416485 +33.991299512614276 +33.230401227171384 +23.713437729224786 +42.564342273096344 +48.79360052051148 +49.61205410881091 +23.08941526105016 +23.666777788950462 +21.66200393617301 +24.0390258095901 +19.488259028903943 +21.43819058183768 +19.49823133809392 +22.718210403212506 +27.154130376929203 +23.50554318162597 +25.08489891352236 +22.86599092502905 +27.047978614874694 +21.930162059226504 +23.554578852162642 +27.29640542044021 +21.505124097374143 +27.533662049839982 +27.86430902213703 +45.209006701495 +48.87051996243624 +38.46129207721255 +31.53450081366175 +46.364673518296854 +31.64997970969701 +23.49765869642234 +32.28680389245416 +42.04361549831056 +47.54834973862798 +28.575148034368123 +23.688344788258444 +25.37476714203794 +32.425419627828845 +24.347614887508044 +24.50129050865779 +22.798639215956133 +20.033968936438214 +21.386284275663467 +23.795449412147377 +17.45414151144564 +18.580643011708247 +23.377981149376673 +20.53341228602143 +23.52185945551625 +26.170461500376256 +24.362040264816457 +25.300382075623922 +29.775991729691 +42.55303137829672 +22.035104870392306 +20.563120544406843 +43.535566665229666 +49.77398640688496 +36.143807589723615 +30.632534298225195 +34.47606813686944 +42.53603143850725 +48.95898311562212 +31.471207216881954 +36.19925373952015 +22.85960349136891 +30.300818284084684 +49.75406749441813 +43.6129177696169 +21.327438277212263 +20.710391564187564 +25.36250083294143 +24.762201014336746 +35.71769761604902 +32.33753165907187 +31.913203988376274 +33.25328392022616 +32.73407024554032 +27.917981706763676 +34.878131976046504 +45.70427124038703 +35.71065315134212 +45.8587657829742 +50.187239973555606 +31.841635005997325 +22.501652910867552 +20.063893302618112 +23.257681189466947 +22.56026584874239 +24.250803257188185 +29.69251939059277 +36.19973924039158 +27.992073514249764 +24.32486806953272 +21.84106052875242 +28.378832199186164 +26.422031489981613 +19.959059689045365 +22.679639079970237 +29.656162409051543 +25.33847765530765 +23.282054947829984 +25.566786207892285 +33.26997806384548 +35.62200679627758 +28.17816316825635 +33.730764176133064 +28.46297981036522 +23.91387745908587 +20.31472216983363 +16.899493917810325 +22.65231504224782 +18.984427852082455 +21.755350311707566 +23.361440743987306 +17.624085959832733 +17.941534685477443 +19.052289922834408 +22.556847572697198 +21.307587876227466 +23.80074402757554 +23.276569706109512 +21.1864449651466 +18.461470447811372 +24.088155499568764 +25.40645329433642 +23.746955009726367 +21.580417567916797 +19.36526836266955 +22.618174026483366 +20.222332662324682 +17.5401189541411 +19.7325917476802 +22.25366785871715 +21.09161841102577 +20.897489003359453 +18.974567501413663 +18.414270972726804 +20.437094838240856 +19.665255050964582 +19.498610552709884 +32.4347320626257 +17.188042095340236 +24.520317088589213 +30.848528359497863 +17.962569662453305 +17.593474675697106 +23.311048681685914 +24.887093655576127 +26.338759207592332 +23.10634435908665 +23.746558541675498 +19.11156329389437 +30.276577355747925 +18.131565994922134 +20.466981614454447 +17.557677174097133 +21.416144608946404 +22.42945458898263 +22.25725888336469 +24.741343605559717 +19.234306772123688 +21.502699798934817 +17.844222132970824 +22.193450303574235 +27.522943412034657 +20.734608359385405 +23.22083675952978 +49.7268756453758 +50.316387345329446 +49.83714473296656 +49.17688834360419 +49.77567522132131 +14.182533585805587 +13.384975617772794 +15.371679422004473 +13.891899259189174 +13.078399336852993 +11.689877320993613 +10.40629866815365 +10.590988449775296 +11.187511710856114 +10.699404391438337 +11.299016068261464 +8.948054514582488 +7.6009282232279265 +9.812052746463724 +7.6331647802079825 +9.990588038608827 +11.308808817142909 +14.673930682603622 +21.155687492192005 +10.2551456581643 +14.460236437489163 +12.313406691919944 +13.2493083046598 +12.582734504218593 +10.197401747365666 +5.208812701405201 +7.017469485197408 +6.031846616563065 +8.71441499272153 +11.95215600950543 +8.410353928522795 +8.327592690998838 +5.439373354800356 +12.394442232227915 +27.66659948948724 +16.838093968757427 +26.208693358875212 +15.486109129210284 +17.564240425969317 +17.23959811092 +16.04148834711414 +7.424796860641504 +7.574708004733665 +8.374715347661533 +10.099487093475474 +8.117528752079668 +8.895503368583991 +16.15593811973578 +14.918601374011807 +19.823062668655528 +13.176943012611968 +11.787692311721155 +8.27071448325512 +10.83272368748242 +10.546354866229864 +10.822758452077135 +9.632229397417156 +14.283801758103968 +14.622417750027324 +16.63389720007538 +14.252782253498086 +12.106427930309948 +13.17368952190395 +10.382466517837265 +8.593917899919186 +8.341010543309778 +12.358234984129313 +10.437531194460895 +15.920114960755544 +17.156717190383212 +15.535071003939581 +10.90063547746989 +10.820305432589691 +14.921240356012788 +13.867640472748148 +14.20031078315193 +13.562450456065102 +13.174301057770835 +15.516887774877546 +16.6214238533029 +17.856618031639137 +13.995438919234763 +13.9918492639568 +13.462096502177403 +13.782174549809977 +15.81476545696465 +19.23055474486078 +16.721742536354455 +18.371669151125573 +19.54678404584893 +20.962653748742536 +21.92505449677977 +20.150160651727457 +18.01599065887942 +17.385312783339558 +18.75484442216188 +19.78934290291277 +19.34178188165078 +20.192165880962822 +21.571657564965964 +29.102871141380515 +14.452906705647992 +13.676531786057728 +17.078332183275275 +12.139745070856558 +15.627002831948143 +21.074845276293402 +22.277023781019544 +24.402402749088747 +25.384059230134838 +21.493854249161604 +20.694732491502627 +21.59250678105503 +19.381287338956593 +21.218798956538112 +14.800212716306316 +7.9354523912413395 +8.101134581126702 +14.030090136930372 +19.297195651119253 +21.180770151806414 +23.31680605026658 +22.639547784601067 +19.18771952159576 +19.418581405301946 +21.367034972981788 +18.510321322006938 +18.222278628288336 +22.415673688219442 +19.581687315022645 +23.395332403966947 +22.29817573266537 +13.314773833341942 diff --git a/examples/src/main/resources/models/catboost/model_clf.cbm b/examples/src/main/resources/models/catboost/model_clf.cbm new file mode 100644 index 0000000000000..f915c27cd6b87 Binary files /dev/null and b/examples/src/main/resources/models/catboost/model_clf.cbm differ diff --git a/examples/src/main/resources/models/catboost/model_reg.cbm b/examples/src/main/resources/models/catboost/model_reg.cbm new file mode 100644 index 0000000000000..d311a529143fd Binary files /dev/null and b/examples/src/main/resources/models/catboost/model_reg.cbm differ diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/PersistenceBasicCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/PersistenceBasicCompatibilityTest.java index 2a283e551a07e..3c4c16360cf14 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/PersistenceBasicCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/PersistenceBasicCompatibilityTest.java @@ -78,8 +78,8 @@ public class PersistenceBasicCompatibilityTest extends IgnitePersistenceCompatib * @throws Exception If failed. */ @Test - public void testNodeStartByOldVersionPersistenceData_2_2() throws Exception { - doTestStartupWithOldVersion("2.2.0"); + public void testNodeStartByOldVersionPersistenceData_2_1() throws Exception { + doTestStartupWithOldVersion("2.1.0"); } /** @@ -88,8 +88,8 @@ public void testNodeStartByOldVersionPersistenceData_2_2() throws Exception { * @throws Exception If failed. */ @Test - public void testNodeStartByOldVersionPersistenceData_2_1() throws Exception { - doTestStartupWithOldVersion("2.1.0"); + public void testNodeStartByOldVersionPersistenceData_2_2() throws Exception { + doTestStartupWithOldVersion("2.2.0"); } /** @@ -132,6 +132,56 @@ public void testNodeStartByOldVersionPersistenceData_2_6() throws Exception { doTestStartupWithOldVersion("2.6.0"); } + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + @Test + public void testNodeStartByOldVersionPersistenceData_2_7() throws Exception { + doTestStartupWithOldVersion("2.7.0"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + @Test + public void testNodeStartByOldVersionPersistenceData_2_7_6() throws Exception { + doTestStartupWithOldVersion("2.7.6"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + @Test + public void testNodeStartByOldVersionPersistenceData_2_8() throws Exception { + doTestStartupWithOldVersion("2.8.0"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + @Test + public void testNodeStartByOldVersionPersistenceData_2_8_1() throws Exception { + doTestStartupWithOldVersion("2.8.1"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + @Test + public void testNodeStartByOldVersionPersistenceData_2_9() throws Exception { + doTestStartupWithOldVersion("2.9.0"); + } + /** * Tests opportunity to read data from previous Ignite DB version. * diff --git a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DefragmentationCommand.java b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DefragmentationCommand.java index ec5c1f0a02992..e42186395edac 100644 --- a/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DefragmentationCommand.java +++ b/modules/control-utility/src/main/java/org/apache/ignite/internal/commandline/DefragmentationCommand.java @@ -226,7 +226,6 @@ private void printResult(VisorDefragmentationTaskResult res, Logger log) { private VisorDefragmentationTaskArg convertArguments() { return new VisorDefragmentationTaskArg( convertSubcommand(args.subcommand()), - args.nodeIds(), args.cacheNames() ); } diff --git a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/ClusterNodeAttributeAffinityBackupFilter.java b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/ClusterNodeAttributeAffinityBackupFilter.java index 592ca53cdd7b2..7a94d7a579be0 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/ClusterNodeAttributeAffinityBackupFilter.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/ClusterNodeAttributeAffinityBackupFilter.java @@ -24,10 +24,10 @@ import org.apache.ignite.lang.IgniteBiPredicate; /** - * This class can be used as a {@link RendezvousAffinityFunction#affinityBackupFilter } to create - * cache templates in Spring that force each partition's primary and backup to different hardware which - * is not expected to fail simultaneously, e.g., in AWS, to different "availability zones". This + * Attribute-based affinity backup filter that forces each partition's primary and backup nodes to different hardware + * which is not expected to fail simultaneously, e.g., in AWS, to different "availability zones". This * is a per-partition selection, and different partitions may choose different primaries. + * See {@link RendezvousAffinityFunction#setAffinityBackupFilter}. *

* This implementation will discard backups rather than place multiple on the same set of nodes. This avoids * trying to cram more data onto remaining nodes when some have failed. @@ -91,7 +91,7 @@ public class ClusterNodeAttributeAffinityBackupFilter implements IgniteBiPredica public ClusterNodeAttributeAffinityBackupFilter(String... attributeNames) { A.ensure(attributeNames.length > 0, "attributeNames.length > 0"); - this.attributeNames = attributeNames; + this.attributeNames = attributeNames.clone(); } /** @@ -128,4 +128,12 @@ public ClusterNodeAttributeAffinityBackupFilter(String... attributeNames) { return true; } + /** + * Gets attribute names. + * + * @return Attribute names. + */ + public String[] getAttributeNames() { + return attributeNames.clone(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java index 2a1927b79fdaa..2fe48248bd128 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -72,6 +72,9 @@ public class DataStorageConfiguration implements Serializable { /** */ private static final long serialVersionUID = 0L; + /** Value used for making WAL archive size unlimited */ + public static final long UNLIMITED_WAL_ARCHIVE = -1; + /** Default data region start size (256 MB). */ public static final long DFLT_DATA_REGION_INITIAL_SIZE = 256L * 1024 * 1024; @@ -594,21 +597,26 @@ public boolean isWalHistorySizeParameterUsed() { /** * Gets a max allowed size(in bytes) of WAL archives. * - * @return max size(in bytes) of WAL archive directory(always greater than 0). + * @return max size(in bytes) of WAL archive directory(greater than 0, or {@link #UNLIMITED_WAL_ARCHIVE} if + * WAL archive size is unlimited). */ public long getMaxWalArchiveSize() { - return maxWalArchiveSize <= 0 ? DFLT_WAL_ARCHIVE_MAX_SIZE : maxWalArchiveSize; + return maxWalArchiveSize; } /** * Sets a max allowed size(in bytes) of WAL archives. * - * If value is not positive, {@link #DFLT_WAL_ARCHIVE_MAX_SIZE} will be used. + * If value is not positive or {@link #UNLIMITED_WAL_ARCHIVE}, {@link #DFLT_WAL_ARCHIVE_MAX_SIZE} will be used. * * @param walArchiveMaxSize max size(in bytes) of WAL archive directory. * @return {@code this} for chaining. */ public DataStorageConfiguration setMaxWalArchiveSize(long walArchiveMaxSize) { + if (walArchiveMaxSize != UNLIMITED_WAL_ARCHIVE) + A.ensure(walArchiveMaxSize > 0, "Max WAL archive size can be only greater than 0 " + + "or must be equal to " + UNLIMITED_WAL_ARCHIVE + " (to be unlimited)"); + this.maxWalArchiveSize = walArchiveMaxSize; return this; diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java index 3da980109d906..6fb70569d73c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java +++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java @@ -925,6 +925,11 @@ public interface EventType { /** * Built-in event type: query execution. + * This event is triggered after a corresponding SQL query validated and before it is executed. + * Unlike {@link #EVT_CACHE_QUERY_EXECUTED}, {@code EVT_SQL_QUERY_EXECUTION} is fired only once for a request + * and does not relate to a specific cache. + * Enet includes the following information: qurey text and its arguments, security subject id. + * *

* NOTE: all types in range from 1 to 1000 are reserved for * internal Ignite events and should not be used by user-defined events. diff --git a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java index 4700d7b9fff15..d8feb07551394 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/SqlQueryExecutionEvent.java @@ -28,6 +28,10 @@ /** * Query execution event. + * This event is triggered after a corresponding SQL query validated and before it is executed. + * Unlike {@link EventType#EVT_CACHE_QUERY_EXECUTED}, {@link EventType#EVT_SQL_QUERY_EXECUTION} is fired only once for a request + * and does not relate to a specific cache. + * *

* Grid events are used for notification about what happens within the grid. Note that by * design Ignite keeps all events generated on the local node locally and it provides diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index 56f976534fe84..08c9382334640 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -39,6 +39,7 @@ import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.IgniteDefragmentation; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; @@ -452,6 +453,13 @@ public interface GridKernalContext extends Iterable { */ public GridEncryptionManager encryption(); + /** + * Gets defragmentation manager. + * + * @return Defragmentation manager. + */ + public IgniteDefragmentation defragmentation(); + /** * Gets workers registry. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index ae589adff86e3..78d88a945ebe8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -58,6 +58,8 @@ import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.IgniteDefragmentation; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.IgniteDefragmentationImpl; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; @@ -174,6 +176,10 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable @GridToStringExclude private GridEncryptionManager encryptionMgr; + /** */ + @GridToStringExclude + private IgniteDefragmentation defragMgr; + /** */ @GridToStringExclude private GridTracingManager tracingMgr; @@ -557,6 +563,8 @@ protected GridKernalContextImpl( marshCtx = new MarshallerContextImpl(plugins, clsFilter); + defragMgr = new IgniteDefragmentationImpl(this); + try { spring = SPRING.create(false); } @@ -906,6 +914,11 @@ public void addHelper(Object helper) { return encryptionMgr; } + /** {@inheritDoc} */ + @Override public IgniteDefragmentation defragmentation() { + return defragMgr; + } + /** {@inheritDoc} */ @Override public WorkersRegistry workersRegistry() { return workersRegistry; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java index 550b60bf0eaae..81d84657bced3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.TransactionsMXBeanImpl; import org.apache.ignite.internal.managers.encryption.EncryptionMXBeanImpl; import org.apache.ignite.internal.processors.cache.persistence.DataStorageMXBeanImpl; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationMXBeanImpl; import org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotMXBeanImpl; import org.apache.ignite.internal.processors.cache.warmup.WarmUpMXBeanImpl; import org.apache.ignite.internal.processors.cluster.BaselineAutoAdjustMXBeanImpl; @@ -51,6 +52,7 @@ import org.apache.ignite.mxbean.ClusterMetricsMXBean; import org.apache.ignite.mxbean.ComputeMXBean; import org.apache.ignite.mxbean.DataStorageMXBean; +import org.apache.ignite.mxbean.DefragmentationMXBean; import org.apache.ignite.mxbean.EncryptionMXBean; import org.apache.ignite.mxbean.FailureHandlingMxBean; import org.apache.ignite.mxbean.IgniteMXBean; @@ -185,6 +187,10 @@ public void registerMBeansAfterNodeStarted( SnapshotMXBean snpMXBean = new SnapshotMXBeanImpl(ctx); registerMBean("Snapshot", snpMXBean.getClass().getSimpleName(), snpMXBean, SnapshotMXBean.class); + // Defragmentation. + DefragmentationMXBean defragMXBean = new DefragmentationMXBeanImpl(ctx); + registerMBean("Defragmentation", defragMXBean.getClass().getSimpleName(), defragMXBean, DefragmentationMXBean.class); + // Metrics configuration MetricsMxBean metricsMxBean = new MetricsMxBeanImpl(ctx.metric(), log); registerMBean("Metrics", metricsMxBean.getClass().getSimpleName(), metricsMxBean, MetricsMxBean.class); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 65a331fcb0e04..e686e55e178a3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -37,7 +37,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteClientDisconnectedException; import org.apache.ignite.IgniteException; @@ -101,6 +100,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P1; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.S; @@ -2740,18 +2740,19 @@ public synchronized void submit(GridFutureAdapter notificationFut, Runnable cmd) try { body0(); } - catch (InterruptedException e) { - if (!isCancelled) - ctx.failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, e)); - - throw e; - } catch (Throwable t) { - U.error(log, "Exception in discovery notyfier worker thread.", t); + boolean isInterruptedException = X.hasCause(t, InterruptedException.class) + || X.hasCause(t, IgniteInterruptedException.class) + || X.hasCause(t, IgniteInterruptedCheckedException.class); - FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION; + if (!isInterruptedException) + U.error(log, "Exception in discovery notifier worker thread.", t); - ctx.failure().process(new FailureContext(type, t)); + if (!isInterruptedException || !isCancelled) { + FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION; + + ctx.failure().process(new FailureContext(type, t)); + } throw t; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index cb4fc306cdb49..f3d85c597c0ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -146,15 +146,14 @@ public WALIterator replay( public void release(WALPointer start) throws IgniteCheckedException; /** - * Gives a hint to WAL manager to clear entries logged before the given pointer. Some entries before the - * the given pointer will be kept because there is a configurable WAL history size. Those entries may be used - * for partial partition rebalancing. + * Gives a hint to WAL manager to clear entries logged before the given pointer. + * If entries are needed for binary recovery, they will not be affected. + * Some entries may be reserved eg for historical rebalance and they also will not be affected. * - * @param low Pointer since which WAL will be truncated. If null, WAL will be truncated from the oldest segment. - * @param high Pointer for which it is safe to clear the log. + * @param high Upper border to which WAL segments will be deleted. * @return Number of deleted WAL segments. */ - public int truncate(WALPointer low, WALPointer high); + public int truncate(@Nullable WALPointer high); /** * Notifies {@code this} about latest checkpoint pointer. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index 4a1aceb068bf2..ae33bf8e7aec1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -33,6 +33,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -150,7 +151,6 @@ import org.apache.ignite.internal.util.F0; import org.apache.ignite.internal.util.IgniteCollectors; import org.apache.ignite.internal.util.InitializationProtector; -import org.apache.ignite.internal.util.StripedExecutor; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -192,7 +192,6 @@ import static java.util.Arrays.asList; import static java.util.Objects.isNull; import static java.util.Objects.nonNull; -import static org.apache.ignite.IgniteSystemProperties.IGNITE_ALLOW_START_CACHES_IN_PARALLEL; import static org.apache.ignite.IgniteSystemProperties.IGNITE_CACHE_REMOVED_ENTRIES_TTL; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK; import static org.apache.ignite.IgniteSystemProperties.getBoolean; @@ -5465,12 +5464,12 @@ private void restorePartitionStates( AtomicReference restoreStateError = new AtomicReference<>(); - StripedExecutor stripedExec = ctx.getStripedExecutorService(); + ExecutorService sysPool = ctx.getSystemExecutorService(); - int roundRobin = 0; + CountDownLatch completionLatch = new CountDownLatch(forGroups.size()); for (CacheGroupContext grp : forGroups) { - stripedExec.execute(roundRobin % stripedExec.stripesCount(), () -> { + sysPool.execute(() -> { try { long processed = grp.offheap().restorePartitionStates(partitionStates); @@ -5487,14 +5486,15 @@ private void restorePartitionStates( : new IgniteCheckedException(e) ); } + finally { + completionLatch.countDown(); + } }); - - roundRobin++; } try { // Await completion restore state tasks in all stripes. - stripedExec.awaitComplete(); + completionLatch.await(); } catch (InterruptedException e) { throw new IgniteInterruptedException(e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index 4a9435c378c93..65e4b4d1b663f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -134,6 +134,8 @@ import org.apache.ignite.lang.IgniteRunnable; import org.jetbrains.annotations.Nullable; +import static java.util.Collections.emptySet; +import static java.util.stream.Stream.concat; import static org.apache.ignite.IgniteSystemProperties.IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT_LIMIT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD; import static org.apache.ignite.IgniteSystemProperties.IGNITE_THREAD_DUMP_ON_EXCHANGE_TIMEOUT; @@ -1237,7 +1239,7 @@ private void updateTopologies(boolean crd) throws IgniteCheckedException { top.update(null, clientTop.partitionMap(true), clientTop.fullUpdateCounters(), - Collections.emptySet(), + emptySet(), null, null, null, @@ -3933,7 +3935,7 @@ private void finishExchangeOnCoordinator(@Nullable Collection sndRe assert firstDiscoEvt instanceof DiscoveryCustomEvent; if (activateCluster() || changedBaseline()) - assignPartitionsStates(true); + assignPartitionsStates(null); DiscoveryCustomMessage discoveryCustomMessage = ((DiscoveryCustomEvent) firstDiscoEvt).customMessage(); @@ -3944,20 +3946,26 @@ private void finishExchangeOnCoordinator(@Nullable Collection sndRe if (!F.isEmpty(caches)) resetLostPartitions(caches); - assignPartitionsStates(true); + Set cacheGroupsToResetOwners = concat(exchActions.cacheGroupsToStart().stream() + .map(grp -> grp.descriptor().groupId()), + exchActions.cachesToResetLostPartitions().stream() + .map(CU::cacheId)) + .collect(Collectors.toSet()); + + assignPartitionsStates(cacheGroupsToResetOwners); } } else if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage && ((SnapshotDiscoveryMessage)discoveryCustomMessage).needAssignPartitions()) { markAffinityReassign(); - assignPartitionsStates(true); + assignPartitionsStates(null); } } else if (exchCtx.events().hasServerJoin()) - assignPartitionsStates(true); + assignPartitionsStates(null); else if (exchCtx.events().hasServerLeft()) - assignPartitionsStates(false); + assignPartitionsStates(emptySet()); // Validation should happen after resetting owners to avoid false desync reporting. validatePartitionsState(); @@ -4248,9 +4256,10 @@ private void validatePartitionsState() { } /** - * @param resetOwners True if reset partitions state needed, false otherwise. + * @param cacheGroupsToResetOwners Set of cache groups which need to reset partitions state, + * null if reset partitions state for all cache groups needed */ - private void assignPartitionsStates(boolean resetOwners) { + private void assignPartitionsStates(Set cacheGroupsToResetOwners) { Map> supplyInfoMap = log.isInfoEnabled() ? new ConcurrentHashMap<>() : null; @@ -4266,12 +4275,17 @@ private void assignPartitionsStates(boolean resetOwners) { : cctx.exchange().clientTopology(grpDesc.groupId(), events().discoveryCache()); if (CU.isPersistentCache(grpDesc.config(), cctx.gridConfig().getDataStorageConfiguration())) { - List list = assignPartitionStates(top, resetOwners); + List list; + + if (cacheGroupsToResetOwners == null || cacheGroupsToResetOwners.contains(grpDesc.groupId())) + list = assignPartitionStates(top, true); + else + list = assignPartitionStates(top, false); if (supplyInfoMap != null && !F.isEmpty(list)) supplyInfoMap.put(grpDesc.cacheOrGroupName(), list); } - else if (resetOwners) + else if (cacheGroupsToResetOwners == null) assignPartitionSizes(top); return null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 346b842585c5b..821d3a3016e93 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -627,15 +627,18 @@ private void checkWalArchiveSizeConfiguration(DataStorageConfiguration memCfg) t LT.warn(log, "DataRegionConfiguration.maxWalArchiveSize instead DataRegionConfiguration.walHistorySize " + "would be used for removing old archive wal files"); else if (memCfg.getMaxWalArchiveSize() == DFLT_WAL_ARCHIVE_MAX_SIZE) - LT.warn(log, "walHistorySize was deprecated. maxWalArchiveSize should be used instead"); + LT.warn(log, "walHistorySize was deprecated and does not have any effect anymore. " + + "maxWalArchiveSize should be used instead"); else throw new IgniteCheckedException("Should be used only one of wal history size or max wal archive size." + "(use DataRegionConfiguration.maxWalArchiveSize because DataRegionConfiguration.walHistorySize was deprecated)" ); - if (memCfg.getMaxWalArchiveSize() < memCfg.getWalSegmentSize()) + if (memCfg.getMaxWalArchiveSize() != DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE + && memCfg.getMaxWalArchiveSize() < memCfg.getWalSegmentSize()) throw new IgniteCheckedException( - "DataRegionConfiguration.maxWalArchiveSize should be greater than DataRegionConfiguration.walSegmentSize" + "DataRegionConfiguration.maxWalArchiveSize should be greater than DataRegionConfiguration.walSegmentSize " + + "or must be equal to " + DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE + "." ); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java index 11125792f0c1b..869f0dac2368b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java @@ -68,8 +68,8 @@ public class CheckpointHistory { /** The maximal number of checkpoints hold in memory. */ private final int maxCpHistMemSize; - /** If WalHistorySize was setted by user will use old way for removing checkpoints. */ - private final boolean isWalHistorySizeParameterEnabled; + /** Should WAL be truncated */ + private final boolean isWalTruncationEnabled; /** Map stores the earliest checkpoint for each partition from particular group. */ private final Map earliestCp = new ConcurrentHashMap<>(); @@ -80,9 +80,6 @@ public class CheckpointHistory { /** Checking that checkpoint is applicable or not for given cache group. */ private final IgniteThrowableBiPredicate checkpointInapplicable; - /** It is available or not to truncate WAL on checkpoint finish. */ - private final boolean truncateWalOnCpFinish; - /** It is available or not to reserve checkpoint(deletion protection). */ private final boolean reservationDisabled; @@ -103,15 +100,9 @@ public class CheckpointHistory { this.wal = wal; this.checkpointInapplicable = inapplicable; - maxCpHistMemSize = Math.min(dsCfg.getWalHistorySize(), - IgniteSystemProperties.getInteger(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, - DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE)); - - isWalHistorySizeParameterEnabled = dsCfg.isWalHistorySizeParameterUsed(); + isWalTruncationEnabled = dsCfg.getMaxWalArchiveSize() != DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE; - truncateWalOnCpFinish = dsCfg.isWalHistorySizeParameterUsed() - ? dsCfg.getWalHistorySize() != Integer.MAX_VALUE - : dsCfg.getMaxWalArchiveSize() != Long.MAX_VALUE; + maxCpHistMemSize = IgniteSystemProperties.getInteger(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE); reservationDisabled = dsCfg.getWalMode() == WALMode.NONE; } @@ -317,7 +308,7 @@ private void addPartitionToEarliestCheckpoints(GroupPartitionId grpPartKey, Chec * @return {@code true} if there is space for next checkpoint. */ public boolean hasSpace() { - return histMap.size() + 1 <= maxCpHistMemSize; + return isWalTruncationEnabled || histMap.size() + 1 <= maxCpHistMemSize; } /** @@ -334,30 +325,69 @@ public List onWalTruncated(WALPointer highBound) { if (highBound.compareTo(cpPnt) <= 0) break; - if (wal.reserved(cpEntry.checkpointMark())) { - U.warn(log, "Could not clear historyMap due to WAL reservation on cp: " + cpEntry + - ", history map size is " + histMap.size()); - + if (!removeCheckpoint(cpEntry)) break; - } - synchronized (earliestCp) { - CheckpointEntry deletedCpEntry = histMap.remove(cpEntry.timestamp()); + removed.add(cpEntry); + } - CheckpointEntry oldestCpInHistory = firstCheckpoint(); + return removed; + } - for (Map.Entry grpPartPerCp : earliestCp.entrySet()) { - if (grpPartPerCp.getValue() == deletedCpEntry) - grpPartPerCp.setValue(oldestCpInHistory); - } - } + /** + * Removes checkpoints from history. + * + * @return List of checkpoint entries removed from history. + */ + public List removeCheckpoints(int countToRemove) { + if (countToRemove == 0) + return Collections.emptyList(); - removed.add(cpEntry); + List removed = new ArrayList<>(); + + for (Iterator> iterator = histMap.entrySet().iterator(); + iterator.hasNext() && removed.size() < countToRemove; ) { + Map.Entry entry = iterator.next(); + + CheckpointEntry checkpoint = entry.getValue(); + + if (!removeCheckpoint(checkpoint)) + break; + + removed.add(checkpoint); } return removed; } + /** + * Remove checkpoint from history + * + * @param checkpoint Checkpoint to be removed + * @return Whether checkpoint was removed from history + */ + private boolean removeCheckpoint(CheckpointEntry checkpoint) { + if (wal.reserved(checkpoint.checkpointMark())) { + U.warn(log, "Could not clear historyMap due to WAL reservation on cp: " + checkpoint + + ", history map size is " + histMap.size()); + + return false; + } + + synchronized (earliestCp) { + CheckpointEntry deletedCpEntry = histMap.remove(checkpoint.timestamp()); + + CheckpointEntry oldestCpInHistory = firstCheckpoint(); + + for (Map.Entry grpPartPerCp : earliestCp.entrySet()) { + if (grpPartPerCp.getValue() == deletedCpEntry) + grpPartPerCp.setValue(oldestCpInHistory); + } + } + + return true; + } + /** * Logs and clears checkpoint history after checkpoint finish. * @@ -366,21 +396,20 @@ public List onWalTruncated(WALPointer highBound) { public List onCheckpointFinished(Checkpoint chp) { chp.walSegsCoveredRange(calculateWalSegmentsCovered()); - WALPointer checkpointMarkUntilDel = isWalHistorySizeParameterEnabled //check for compatibility mode. - ? checkpointMarkUntilDeleteByMemorySize() - : newerPointer(checkpointMarkUntilDeleteByMemorySize(), checkpointMarkUntilDeleteByArchiveSize()); + int removeCount = isWalTruncationEnabled + ? checkpointCountUntilDeleteByArchiveSize() + : (histMap.size() - maxCpHistMemSize); - if (checkpointMarkUntilDel == null) + if (removeCount <= 0) return Collections.emptyList(); - List deletedCheckpoints = onWalTruncated(checkpointMarkUntilDel); + List deletedCheckpoints = removeCheckpoints(removeCount); - int deleted = 0; + if (isWalTruncationEnabled) { + int deleted = wal.truncate(firstCheckpointPointer()); - if (truncateWalOnCpFinish) - deleted += wal.truncate(null, firstCheckpointPointer()); - - chp.walFilesDeleted(deleted); + chp.walFilesDeleted(deleted); + } return deletedCheckpoints; } @@ -420,28 +449,32 @@ private WALPointer checkpointMarkUntilDeleteByMemorySize() { } /** - * Calculate mark until delete by maximum allowed archive size. + * Calculate count of checkpoints to delete by maximum allowed archive size. * - * @return Checkpoint mark until which checkpoints can be deleted(not including this pointer). + * @return Checkpoint count to be deleted. */ - @Nullable private WALPointer checkpointMarkUntilDeleteByArchiveSize() { + private int checkpointCountUntilDeleteByArchiveSize() { long absFileIdxToDel = wal.maxArchivedSegmentToDelete(); if (absFileIdxToDel < 0) - return null; + return 0; long fileUntilDel = absFileIdxToDel + 1; long checkpointFileIdx = absFileIdx(lastCheckpoint()); + int countToRemove = 0; + for (CheckpointEntry cpEntry : histMap.values()) { long currFileIdx = absFileIdx(cpEntry); if (checkpointFileIdx <= currFileIdx || fileUntilDel <= currFileIdx) - return cpEntry.checkpointMark(); + return countToRemove; + + countToRemove++; } - return lastCheckpoint().checkpointMark(); + return histMap.size() - 1; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java index 75b3458f48913..48616b63f6bca 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/CachePartitionDefragmentationManager.java @@ -19,12 +19,10 @@ import java.io.File; import java.nio.file.Path; -import java.text.DecimalFormat; -import java.text.DecimalFormatSymbols; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -46,6 +44,7 @@ import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheType; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager; import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager.CacheDataStore; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.CheckpointState; @@ -93,6 +92,7 @@ import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_MAPPING_REGION_NAME; import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DEFRAGMENTATION_PART_REGION_NAME; import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.batchRenameDefragmentedCacheGroupPartitions; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexFile; import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedIndexTmpFile; import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartFile; import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.DefragmentationFileUtils.defragmentedPartMappingFile; @@ -153,7 +153,7 @@ public class CachePartitionDefragmentationManager { private final AtomicBoolean cancel = new AtomicBoolean(); /** */ - private final DefragmentationStatus status = new DefragmentationStatus(); + private final Status status = new Status(); /** */ private final GridFutureAdapter completionFut = new GridFutureAdapter<>(); @@ -220,7 +220,30 @@ public void beforeDefragmentation() throws IgniteCheckedException { /** */ public void executeDefragmentation() throws IgniteCheckedException { - status.onStart(cacheGrpCtxsForDefragmentation); + Map> oldStores = new HashMap<>(); + + for (CacheGroupContext oldGrpCtx : cacheGrpCtxsForDefragmentation) { + int grpId = oldGrpCtx.groupId(); + + final IgniteCacheOffheapManager offheap = oldGrpCtx.offheap(); + + List oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false) + .filter(store -> { + try { + return filePageStoreMgr.exists(grpId, store.partId()); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + }) + .collect(Collectors.toList()); + + oldStores.put(grpId, oldCacheDataStores); + } + + int partitionCount = oldStores.values().stream().mapToInt(List::size).sum(); + + status.onStart(cacheGrpCtxsForDefragmentation, partitionCount); try { // Now the actual process starts. @@ -234,8 +257,10 @@ public void executeDefragmentation() throws IgniteCheckedException { File workDir = filePageStoreMgr.cacheWorkDir(oldGrpCtx.sharedGroup(), oldGrpCtx.cacheOrGroupName()); + List oldCacheDataStores = oldStores.get(grpId); + if (skipAlreadyDefragmentedCacheGroup(workDir, grpId, log)) { - status.onCacheGroupSkipped(oldGrpCtx); + status.onCacheGroupSkipped(oldGrpCtx, oldCacheDataStores.size()); continue; } @@ -243,17 +268,6 @@ public void executeDefragmentation() throws IgniteCheckedException { try { GridCacheOffheapManager offheap = (GridCacheOffheapManager)oldGrpCtx.offheap(); - List oldCacheDataStores = stream(offheap.cacheDataStores().spliterator(), false) - .filter(store -> { - try { - return filePageStoreMgr.exists(grpId, store.partId()); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - }) - .collect(Collectors.toList()); - status.onCacheGroupStart(oldGrpCtx, oldCacheDataStores.size()); if (workDir == null || oldCacheDataStores.isEmpty()) { @@ -386,9 +400,9 @@ public void executeDefragmentation() throws IgniteCheckedException { partCtx.partPageMemory ); - partCtx.createNewCacheDataStore(offheap); + partCtx.createNewCacheDataStore(offheap); - copyPartitionData(partCtx, treeIter); + copyPartitionData(partCtx, treeIter); DefragmentationPageReadWriteManager pageMgr = (DefragmentationPageReadWriteManager)partCtx.partPageMemory.pageManager(); @@ -450,7 +464,21 @@ public void executeDefragmentation() throws IgniteCheckedException { .futureFor(CheckpointState.FINISHED); } + PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION); + idxDfrgFut = idxDfrgFut.chain(fut -> { + if (log.isDebugEnabled()) { + log.debug(S.toString( + "Index partition defragmented", + "grpId", grpId, false, + "oldPages", oldIdxPageStore.pages(), false, + "newPages", idxAllocationTracker.get() + 1, false, + "pageSize", pageSize, false, + "partFile", defragmentedIndexFile(workDir).getName(), false, + "workDir", workDir, false + )); + } + oldPageMem.invalidate(grpId, PageIdAllocator.INDEX_PARTITION); PageMemoryEx partPageMem = (PageMemoryEx)partDataRegion.pageMemory(); @@ -476,8 +504,6 @@ public void executeDefragmentation() throws IgniteCheckedException { return null; }); - PageStore oldIdxPageStore = filePageStoreMgr.getStore(grpId, INDEX_PARTITION); - status.onIndexDefragmented( oldGrpCtx, oldIdxPageStore.size(), @@ -596,8 +622,8 @@ private void checkCancellation() throws DefragmentationCancelledException { } /** */ - public String status() { - return status.toString(); + public Status status() { + return status; } /** @@ -614,6 +640,9 @@ private void copyPartitionData( CacheDataTree tree = partCtx.oldCacheDataStore.tree(); CacheDataTree newTree = partCtx.newCacheDataStore.tree(); + + newTree.enableSequentialWriteMode(); + PendingEntriesTree newPendingTree = partCtx.newCacheDataStore.pendingTree(); AbstractFreeList freeList = partCtx.newCacheDataStore.getCacheStoreFreeList(); @@ -963,58 +992,110 @@ private static class DefragmentationCancelledException extends RuntimeException private static final long serialVersionUID = 0L; } - /** */ - private class DefragmentationStatus { - /** */ + /** Defragmentation status. */ + class Status { + /** Defragmentation start timestamp. */ private long startTs; - /** */ + /** Defragmentation finish timestamp. */ private long finishTs; - /** */ - private final Set scheduledGroups = new TreeSet<>(); + /** Total count of partitions. */ + private int totalPartitionCount; - /** */ - private final Map progressGroups - = new TreeMap<>(comparing(CacheGroupContext::cacheOrGroupName)); + /** Partitions, that are already defragmented. */ + private int defragmentedPartitionCount; - /** */ - private final Map finishedGroups - = new TreeMap<>(comparing(CacheGroupContext::cacheOrGroupName)); + /** Cache groups scheduled for defragmentation. */ + private final Set scheduledGroups; - /** */ - private final Set skippedGroups = new TreeSet<>(); + /** Progress for cache group. */ + private final Map progressGroups; - /** */ - public synchronized void onStart(Set scheduledGroups) { + /** Finished cache groups. */ + private final Map finishedGroups; + + /** Skipped cache groups. */ + private final Set skippedGroups; + + /** Constructor. */ + public Status() { + scheduledGroups = new TreeSet<>(); + progressGroups = new TreeMap<>(comparing(CacheGroupContext::cacheOrGroupName)); + finishedGroups = new TreeMap<>(comparing(CacheGroupContext::cacheOrGroupName)); + skippedGroups = new TreeSet<>(); + } + + /** Copy constructor. */ + public Status( + long startTs, + long finishTs, + Set scheduledGroups, + Map progressGroups, + Map finishedGroups, + Set skippedGroups + ) { + this.startTs = startTs; + this.finishTs = finishTs; + this.scheduledGroups = scheduledGroups; + this.progressGroups = progressGroups; + this.finishedGroups = finishedGroups; + this.skippedGroups = skippedGroups; + } + + /** + * Mark the start of the defragmentation. + * @param scheduledGroups Groups scheduled for defragmentation. + * @param partitions Total partition count. + */ + public synchronized void onStart(Set scheduledGroups, int partitions) { startTs = System.currentTimeMillis(); + totalPartitionCount = partitions; - for (CacheGroupContext grp : scheduledGroups) { + for (CacheGroupContext grp : scheduledGroups) this.scheduledGroups.add(grp.cacheOrGroupName()); - } log.info("Defragmentation started."); } - /** */ - public synchronized void onCacheGroupStart(CacheGroupContext grpCtx, int parts) { + /** + * Mark the start of the cache group defragmentation. + * @param grpCtx Cache group context. + * @param parts Partition count. + */ + private synchronized void onCacheGroupStart(CacheGroupContext grpCtx, int parts) { scheduledGroups.remove(grpCtx.cacheOrGroupName()); progressGroups.put(grpCtx, new DefragmentationCacheGroupProgress(parts)); } - /** */ - public synchronized void onPartitionDefragmented(CacheGroupContext grpCtx, long oldSize, long newSize) { + /** + * Mark the end of the partition defragmentation. + * @param grpCtx Cache group context. + * @param oldSize Old size. + * @param newSize New size; + */ + private synchronized void onPartitionDefragmented(CacheGroupContext grpCtx, long oldSize, long newSize) { progressGroups.get(grpCtx).onPartitionDefragmented(oldSize, newSize); + + defragmentedPartitionCount++; } - /** */ - public synchronized void onIndexDefragmented(CacheGroupContext grpCtx, long oldSize, long newSize) { + /** + * Mark the end of the index partition defragmentation. + * @param grpCtx Cache group context. + * @param oldSize Old size. + * @param newSize New size; + */ + private synchronized void onIndexDefragmented(CacheGroupContext grpCtx, long oldSize, long newSize) { progressGroups.get(grpCtx).onIndexDefragmented(oldSize, newSize); } - /** */ - public synchronized void onCacheGroupFinish(CacheGroupContext grpCtx) { + /** + * Mark the end of the cache group defragmentation. + * @param grpCtx Cache group context. + */ + private synchronized void onCacheGroupFinish(CacheGroupContext grpCtx) { DefragmentationCacheGroupProgress progress = progressGroups.remove(grpCtx); progress.onFinish(); @@ -1022,15 +1103,20 @@ public synchronized void onCacheGroupFinish(CacheGroupContext grpCtx) { finishedGroups.put(grpCtx, progress); } - /** */ - public synchronized void onCacheGroupSkipped(CacheGroupContext grpCtx) { + /** + * Mark that cache group defragmentation was skipped. + * @param grpCtx Cache group context. + */ + private synchronized void onCacheGroupSkipped(CacheGroupContext grpCtx, int partitions) { scheduledGroups.remove(grpCtx.cacheOrGroupName()); skippedGroups.add(grpCtx.cacheOrGroupName()); + + defragmentedPartitionCount += partitions; } - /** */ - public synchronized void onFinish() { + /** Mark the end of the defragmentation. */ + private synchronized void onFinish() { finishTs = System.currentTimeMillis(); progressGroups.clear(); @@ -1040,67 +1126,80 @@ public synchronized void onFinish() { log.info("Defragmentation process completed. Time: " + (finishTs - startTs) * 1e-3 + "s."); } - /** {@inheritDoc} */ - @Override public synchronized String toString() { - StringBuilder sb = new StringBuilder(); - - if (!finishedGroups.isEmpty()) { - sb.append("Defragmentation is completed for cache groups:\n"); - - for (Map.Entry entry : finishedGroups.entrySet()) { - sb.append(" ").append(entry.getKey().cacheOrGroupName()).append(" - "); - - sb.append(entry.getValue().toString()).append('\n'); - } - } + /** Copy object. */ + private synchronized Status copy() { + return new Status( + startTs, + finishTs, + new HashSet<>(scheduledGroups), + new HashMap<>(progressGroups), + new HashMap<>(finishedGroups), + new HashSet<>(skippedGroups) + ); + } - if (!progressGroups.isEmpty()) { - sb.append("Defragmentation is in progress for cache groups:\n"); + /** */ + public long getStartTs() { + return startTs; + } - for (Map.Entry entry : progressGroups.entrySet()) { - sb.append(" ").append(entry.getKey().cacheOrGroupName()).append(" - "); + /** */ + public long getFinishTs() { + return finishTs; + } - sb.append(entry.getValue().toString()).append('\n'); - } - } + /** */ + public Set getScheduledGroups() { + return scheduledGroups; + } - if (!skippedGroups.isEmpty()) - sb.append("Skipped cache groups: ").append(String.join(", ", skippedGroups)).append('\n'); + /** */ + public Map getProgressGroups() { + return progressGroups; + } - if (!scheduledGroups.isEmpty()) - sb.append("Awaiting defragmentation: ").append(String.join(", ", scheduledGroups)).append('\n'); + /** */ + public Map getFinishedGroups() { + return finishedGroups; + } - return sb.toString(); + /** */ + public Set getSkippedGroups() { + return skippedGroups; } - } - /** */ - private static class DefragmentationCacheGroupProgress { /** */ - private static final DecimalFormat MB_FORMAT = new DecimalFormat( - "#.##", - DecimalFormatSymbols.getInstance(Locale.US) - ); + public int getTotalPartitionCount() { + return totalPartitionCount; + } /** */ + public int getDefragmentedPartitionCount() { + return defragmentedPartitionCount; + } + } + + /** Cache group defragmentation progress. */ + static class DefragmentationCacheGroupProgress { + /** Partition count. */ private final int partsTotal; - /** */ + /** Defragmented partitions. */ private int partsCompleted; - /** */ + /** Old cache group size. */ private long oldSize; - /** */ + /** New cache group size. */ private long newSize; - /** */ + /** Start timestamp. */ private final long startTs; - /** */ + /** Finish timestamp. */ private long finishTs; - /** */ + /** Constructor. */ public DefragmentationCacheGroupProgress(int parts) { partsTotal = parts; @@ -1128,43 +1227,38 @@ public void onIndexDefragmented(long oldSize, long newSize) { } /** */ - public void onFinish() { - finishTs = System.currentTimeMillis(); + public long getOldSize() { + return oldSize; } - /** {@inheritDoc} */ - @Override public String toString() { - StringBuilder sb = new StringBuilder(); - - if (finishTs == 0) { - sb.append("partitions processed/all: ").append(partsCompleted).append("/").append(partsTotal); - - sb.append(", time elapsed: "); - - appendDuration(sb, System.currentTimeMillis()); - } - else { - double mb = 1024 * 1024; - - sb.append("size before/after: ").append(MB_FORMAT.format(oldSize / mb)).append("MB/"); - sb.append(MB_FORMAT.format(newSize / mb)).append("MB"); - - sb.append(", time took: "); + /** */ + public long getNewSize() { + return newSize; + } - appendDuration(sb, finishTs); - } + /** */ + public long getStartTs() { + return startTs; + } - return sb.toString(); + /** */ + public long getFinishTs() { + return finishTs; } /** */ - private void appendDuration(StringBuilder sb, long end) { - long duration = Math.round((end - startTs) * 1e-3); + public int getPartsTotal() { + return partsTotal; + } - long mins = duration / 60; - long secs = duration % 60; + /** */ + public int getPartsCompleted() { + return partsCompleted; + } - sb.append(mins).append(" mins ").append(secs).append(" secs"); + /** */ + public void onFinish() { + finishTs = System.currentTimeMillis(); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanImpl.java new file mode 100644 index 0000000000000..1e3becba51319 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanImpl.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.mxbean.DefragmentationMXBean; + +/** + * Defragmentation MX bean implementation. + */ +public class DefragmentationMXBeanImpl implements DefragmentationMXBean { + /** Defragmentation manager. */ + private final IgniteDefragmentation defragmentation; + + public DefragmentationMXBeanImpl(GridKernalContext ctx) { + this.defragmentation = ctx.defragmentation(); + } + + /** {@inheritDoc} */ + @Override public boolean schedule(String cacheNames) { + final List caches = Arrays.stream(cacheNames.split(",")) + .filter(s -> !s.isEmpty()) + .collect(Collectors.toList()); + + try { + defragmentation.schedule(caches); + + return true; + } + catch (IgniteCheckedException e) { + return false; + } + } + + /** {@inheritDoc} */ + @Override public boolean cancel() { + try { + defragmentation.cancel(); + + return true; + } + catch (IgniteCheckedException e) { + return false; + } + } + + /** {@inheritDoc} */ + @Override public boolean inProgress() { + return defragmentation.inProgress(); + } + + /** {@inheritDoc} */ + @Override public int processedPartitions() { + return defragmentation.processedPartitions(); + } + + /** {@inheritDoc} */ + @Override public int totalPartitions() { + return defragmentation.totalPartitions(); + } + + /** {@inheritDoc} */ + @Override public long startTime() { + return defragmentation.startTime(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentation.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentation.java new file mode 100644 index 0000000000000..a5dc811f90880 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentation.java @@ -0,0 +1,341 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.text.DecimalFormat; +import java.text.DecimalFormatSymbols; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import org.apache.ignite.IgniteCheckedException; + +/** + * Defragmentation operation service. + */ +public interface IgniteDefragmentation { + /** + * Schedule defragmentaton on next start of the node. + * + * @param cacheNames Names of caches to run defragmentation on. + * @return Result of the scheduling. + * @throws IgniteCheckedException If failed. + */ + ScheduleResult schedule(List cacheNames) throws IgniteCheckedException; + + /** + * Cancel scheduled or ongoing defragmentation. + * @return Result of the cancellation. + * @throws IgniteCheckedException If failed. + */ + CancelResult cancel() throws IgniteCheckedException; + + /** + * Get the status of the ongoing defragmentation. + * @return Defragmentation status. + * @throws IgniteCheckedException If failed. + */ + DefragmentationStatus status() throws IgniteCheckedException; + + /** + * @return {@code true} if there is an ongoing defragmentation. + */ + boolean inProgress(); + + /** + * @return Number of processed partitions, or 0 if there is no ongoing defragmentation. + */ + int processedPartitions(); + + /** + * @return Number of total partitions, or 0 if there is no ongoing defragmentation. + */ + int totalPartitions(); + + /** + * @return Timestamp of the beginning of the ongoing defragmentation or 0 if there is none. + */ + long startTime(); + + /** Result of the scheduling. */ + public enum ScheduleResult { + /** + * Successfully scheduled. + */ + SUCCESS, + + /** + * Successfuly scheduled, superseding previously scheduled defragmentation. + */ + SUCCESS_SUPERSEDED_PREVIOUS + } + + /** Result of the cancellation. */ + public enum CancelResult { + /** + * Cancelled scheduled defragmentation. + */ + CANCELLED_SCHEDULED, + + /** + * Nothing to cancel, no ongoing defragmentation. + */ + SCHEDULED_NOT_FOUND, + + /** + * Cancelled ongoing defragmentation. + */ + CANCELLED, + + /** + * Defragmentation is already completed or cancelled. + */ + COMPLETED_OR_CANCELLED + } + + /** */ + public static class DefragmentationStatus { + /** */ + private final Map completedCaches; + + /** */ + private final Map inProgressCaches; + + /** */ + private final Set awaitingCaches; + + /** */ + private final Set skippedCaches; + + /** */ + private final int totalPartitions; + + /** */ + private final int processedPartitions; + + /** */ + private final long startTs; + + /** */ + private final long totalElapsedTime; + + public DefragmentationStatus( + Map completedCaches, + Map inProgressCaches, + Set awaitingCaches, + Set skippedCaches, + int totalPartitions, + int processedPartitions, + long startTs, + long totalElapsedTime + ) { + this.completedCaches = completedCaches; + this.inProgressCaches = inProgressCaches; + this.awaitingCaches = awaitingCaches; + this.skippedCaches = skippedCaches; + this.totalPartitions = totalPartitions; + this.processedPartitions = processedPartitions; + this.startTs = startTs; + this.totalElapsedTime = totalElapsedTime; + } + + /** {@inheritDoc} */ + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + + if (!completedCaches.isEmpty()) { + sb.append("Defragmentation is completed for cache groups:\n"); + + for (Map.Entry entry : completedCaches.entrySet()) { + sb.append(" ").append(entry.getKey()).append(" - "); + + sb.append(entry.getValue().toString()).append('\n'); + } + } + + if (!inProgressCaches.isEmpty()) { + sb.append("Defragmentation is in progress for cache groups:\n"); + + for (Map.Entry entry : inProgressCaches.entrySet()) { + sb.append(" ").append(entry.getKey()).append(" - "); + + sb.append(entry.getValue().toString()).append('\n'); + } + } + + if (!skippedCaches.isEmpty()) + sb.append("Skipped cache groups: ").append(String.join(", ", skippedCaches)).append('\n'); + + if (!awaitingCaches.isEmpty()) + sb.append("Awaiting defragmentation: ").append(String.join(", ", awaitingCaches)).append('\n'); + + return sb.toString(); + } + + /** */ + public Map getCompletedCaches() { + return completedCaches; + } + + /** */ + public Map getInProgressCaches() { + return inProgressCaches; + } + + /** */ + public Set getAwaitingCaches() { + return awaitingCaches; + } + + /** */ + public Set getSkippedCaches() { + return skippedCaches; + } + + /** */ + public long getTotalElapsedTime() { + return totalElapsedTime; + } + + /** */ + public int getTotalPartitions() { + return totalPartitions; + } + + /** */ + public int getProcessedPartitions() { + return processedPartitions; + } + + /** */ + public long getStartTs() { + return startTs; + } + } + + /** */ + abstract class DefragmentationInfo { + /** */ + long elapsedTime; + + public DefragmentationInfo(long elapsedTime) { + this.elapsedTime = elapsedTime; + } + + /** */ + void appendDuration(StringBuilder sb, long elapsedTime) { + long duration = Math.round(elapsedTime * 1e-3); + + long mins = duration / 60; + long secs = duration % 60; + + sb.append(mins).append(" mins ").append(secs).append(" secs"); + } + + /** */ + public long getElapsedTime() { + return elapsedTime; + } + } + + /** */ + public static class CompletedDefragmentationInfo extends DefragmentationInfo { + /** */ + private static final DecimalFormat MB_FORMAT = new DecimalFormat( + "#.##", + DecimalFormatSymbols.getInstance(Locale.US) + ); + + /** */ + long sizeBefore; + + /** */ + long sizeAfter; + + public CompletedDefragmentationInfo(long elapsedTime, long sizeBefore, long sizeAfter) { + super(elapsedTime); + this.sizeBefore = sizeBefore; + this.sizeAfter = sizeAfter; + } + + /** {@inheritDoc} */ + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + + double mb = 1024 * 1024; + + sb.append("size before/after: ").append(MB_FORMAT.format(sizeBefore / mb)).append("MB/"); + sb.append(MB_FORMAT.format(sizeAfter / mb)).append("MB"); + + sb.append(", time took: "); + + appendDuration(sb, elapsedTime); + + return sb.toString(); + } + + /** */ + public long getSizeBefore() { + return sizeBefore; + } + + /** */ + public long getSizeAfter() { + return sizeAfter; + } + } + + /** */ + public static class InProgressDefragmentationInfo extends DefragmentationInfo { + /** */ + int processedPartitions; + + /** */ + int totalPartitions; + + public InProgressDefragmentationInfo(long elapsedTime, int processedPartitions, int totalPartitions) { + super(elapsedTime); + this.processedPartitions = processedPartitions; + this.totalPartitions = totalPartitions; + } + + /** {@inheritDoc} */ + @Override public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("partitions processed/all: ").append(processedPartitions).append("/").append(totalPartitions); + + sb.append(", time elapsed: "); + + appendDuration(sb, elapsedTime); + + return sb.toString(); + } + + /** */ + public int getProcessedPartitions() { + return processedPartitions; + } + + /** */ + public int getTotalPartitions() { + return totalPartitions; + } + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentationImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentationImpl.java new file mode 100644 index 0000000000000..5c443baac411b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/IgniteDefragmentationImpl.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.Status; +import org.apache.ignite.maintenance.MaintenanceAction; +import org.apache.ignite.maintenance.MaintenanceRegistry; +import org.apache.ignite.maintenance.MaintenanceTask; + +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; +import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.toStore; + +/** + * Defragmentation operation service implementation. + */ +public class IgniteDefragmentationImpl implements IgniteDefragmentation { + /** Kernal context. */ + private final GridKernalContext ctx; + + public IgniteDefragmentationImpl(GridKernalContext ctx) { + this.ctx = ctx; + } + + /** {@inheritDoc} */ + @Override public ScheduleResult schedule(List cacheNames) throws IgniteCheckedException { + final MaintenanceRegistry maintenanceRegistry = ctx.maintenanceRegistry(); + + MaintenanceTask oldTask; + + try { + oldTask = maintenanceRegistry.registerMaintenanceTask(toStore(cacheNames != null ? cacheNames : Collections.emptyList())); + } + catch (IgniteCheckedException e) { + throw new IgniteCheckedException("Scheduling failed: " + e.getMessage()); + } + + return oldTask != null ? ScheduleResult.SUCCESS_SUPERSEDED_PREVIOUS : ScheduleResult.SUCCESS; + } + + /** {@inheritDoc} */ + @Override public CancelResult cancel() throws IgniteCheckedException { + final MaintenanceRegistry maintenanceRegistry = ctx.maintenanceRegistry(); + + if (!maintenanceRegistry.isMaintenanceMode()) { + boolean deleted = maintenanceRegistry.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); + + return deleted ? CancelResult.CANCELLED_SCHEDULED : CancelResult.SCHEDULED_NOT_FOUND; + } + else { + List> actions; + + try { + actions = maintenanceRegistry.actionsForMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); + } + catch (IgniteException e) { + return CancelResult.COMPLETED_OR_CANCELLED; + } + + Optional> stopAct = actions.stream().filter(a -> "stop".equals(a.name())).findAny(); + + assert stopAct.isPresent(); + + try { + Object res = stopAct.get().execute(); + + assert res instanceof Boolean; + + boolean cancelled = (Boolean)res; + + return cancelled ? CancelResult.CANCELLED : CancelResult.COMPLETED_OR_CANCELLED; + } + catch (Exception e) { + throw new IgniteCheckedException("Exception occurred: " + e.getMessage(), e); + } + } + } + + /** {@inheritDoc} */ + @Override public DefragmentationStatus status() throws IgniteCheckedException { + final MaintenanceRegistry maintenanceRegistry = ctx.maintenanceRegistry(); + + if (!maintenanceRegistry.isMaintenanceMode()) + throw new IgniteCheckedException("Node is not in maintenance mode."); + + IgniteCacheDatabaseSharedManager dbMgr = ctx.cache().context().database(); + + assert dbMgr instanceof GridCacheDatabaseSharedManager; + + CachePartitionDefragmentationManager defrgMgr = ((GridCacheDatabaseSharedManager)dbMgr) + .defragmentationManager(); + + if (defrgMgr == null) + throw new IgniteCheckedException("There's no active defragmentation process on the node."); + + final Status status = defrgMgr.status(); + + final long startTs = status.getStartTs(); + final long finishTs = status.getFinishTs(); + final long elapsedTime = finishTs != 0 ? finishTs - startTs : System.currentTimeMillis() - startTs; + + Map completedCaches = new HashMap<>(); + Map progressCaches = new HashMap<>(); + + status.getFinishedGroups().forEach((context, progress) -> { + final String name = context.cacheOrGroupName(); + + final long oldSize = progress.getOldSize(); + final long newSize = progress.getNewSize(); + final long cgElapsedTime = progress.getFinishTs() - progress.getStartTs(); + + final CompletedDefragmentationInfo info = new CompletedDefragmentationInfo(cgElapsedTime, oldSize, newSize); + completedCaches.put(name, info); + }); + + status.getProgressGroups().forEach((context, progress) -> { + final String name = context.cacheOrGroupName(); + + final long cgElapsedTime = System.currentTimeMillis() - progress.getStartTs(); + final int partsTotal = progress.getPartsTotal(); + final int partsCompleted = progress.getPartsCompleted(); + + final InProgressDefragmentationInfo info = new InProgressDefragmentationInfo(cgElapsedTime, partsCompleted, partsTotal); + progressCaches.put(name, info); + }); + + return new DefragmentationStatus( + completedCaches, + progressCaches, + status.getScheduledGroups(), + status.getSkippedGroups(), + status.getTotalPartitionCount(), + status.getDefragmentedPartitionCount(), + startTs, + elapsedTime + ); + } + + /** {@inheritDoc} */ + @Override public boolean inProgress() { + final Status status = getStatus(); + + return status != null && status.getFinishTs() == 0; + } + + /** {@inheritDoc} */ + @Override public int processedPartitions() { + final Status status = getStatus(); + + if (status == null) + return 0; + + return status.getDefragmentedPartitionCount(); + } + + /** {@inheritDoc} */ + @Override public int totalPartitions() { + final CachePartitionDefragmentationManager.Status status = getStatus(); + + if (status == null) + return 0; + + return status.getTotalPartitionCount(); + } + + /** {@inheritDoc} */ + @Override public long startTime() { + final CachePartitionDefragmentationManager.Status status = getStatus(); + + if (status == null) + return 0; + + return status.getStartTs(); + } + + /** + * Get defragmentation status. + * @return Defragmentation status or {@code null} if there is no ongoing defragmentation. + */ + private Status getStatus() { + final MaintenanceRegistry maintenanceRegistry = ctx.maintenanceRegistry(); + + if (!maintenanceRegistry.isMaintenanceMode()) + return null; + + IgniteCacheDatabaseSharedManager dbMgr = ctx.cache().context().database(); + + assert dbMgr instanceof GridCacheDatabaseSharedManager; + + CachePartitionDefragmentationManager defrgMgr = ((GridCacheDatabaseSharedManager) dbMgr) + .defragmentationManager(); + + if (defrgMgr == null) + return null; + + return defrgMgr.status(); + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index c50e75dadf142..10e2c6617a278 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -63,7 +63,6 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; -import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; @@ -154,6 +153,9 @@ public abstract class BPlusTree extends DataStructure implements /** Failure processor. */ private final FailureProcessor failureProcessor; + /** Flag for enabling single-threaded append-only tree creation. */ + private boolean sequentialWriteOptsEnabled; + /** */ private final GridTreePrinter treePrinter = new GridTreePrinter() { /** */ @@ -884,6 +886,11 @@ public final String getName() { return name; } + /** Flag for enabling single-threaded append-only tree creation. */ + public void enableSequentialWriteMode() { + sequentialWriteOptsEnabled = true; + } + /** * Initialize new tree. * @@ -1398,6 +1405,8 @@ public final R findOne(L row, TreeRowClosure c, Object x) throws Ignit * @throws IgniteCheckedException If failed. */ private void doFind(Get g) throws IgniteCheckedException { + assert !sequentialWriteOptsEnabled; + for (;;) { // Go down with retries. g.init(); @@ -2054,6 +2063,8 @@ private Result invokeDown(final Invoke x, final long pageId, final long backId, * @throws IgniteCheckedException If failed. */ private T doRemove(L row, boolean needOld) throws IgniteCheckedException { + assert !sequentialWriteOptsEnabled; + checkDestroyed(); Remove r = new Remove(row, needOld); @@ -2711,7 +2722,8 @@ private boolean splitPage( long pageId, long page, long pageAddr, BPlusIO io, long fwdId, long fwdBuf, int idx ) throws IgniteCheckedException { int cnt = io.getCount(pageAddr); - int mid = cnt >>> 1; + + int mid = sequentialWriteOptsEnabled ? (int)(cnt * 0.85) : cnt >>> 1; boolean res = false; @@ -2767,7 +2779,7 @@ private Result askNeighbor(long pageId, Get g, boolean back) throws IgniteChecke * @return Result code. * @throws IgniteCheckedException If failed. */ - private Result putDown(final Put p, final long pageId, final long fwdId, final int lvl) + private Result putDown(final Put p, final long pageId, final long fwdId, int lvl) throws IgniteCheckedException { assert lvl >= 0 : lvl; @@ -5302,8 +5314,11 @@ private int findInsertionPoint(int lvl, BPlusIO io, long buf, int low, int cn throws IgniteCheckedException { assert row != null; - if (row instanceof InsertLast) + if (sequentialWriteOptsEnabled) { + assert io.getForward(buf) == 0L; + return -cnt - 1; + } int high = cnt - 1; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java index 8730c1f700640..5bf7d399fe459 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java @@ -401,7 +401,7 @@ protected AbstractReadFileHandle initReadHandle( SegmentIO fileIO = null; try { - fileIO = desc.toIO(ioFactory); + fileIO = desc.toReadOnlyIO(ioFactory); SegmentHeader segmentHeader; @@ -513,6 +513,6 @@ protected interface AbstractFileDescriptor { * @return One of implementation of {@link FileIO}. * @throws IOException if creation of fileIo was not success. */ - SegmentIO toIO(FileIOFactory fileIOFactory) throws IOException; + SegmentIO toReadOnlyIO(FileIOFactory fileIOFactory) throws IOException; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java index 2f088d19f6979..f654c3213ce44 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java @@ -27,6 +27,8 @@ import org.apache.ignite.internal.util.typedef.internal.SB; import org.jetbrains.annotations.Nullable; +import static java.nio.file.StandardOpenOption.READ; + /** * WAL file descriptor. */ @@ -144,8 +146,8 @@ public String getAbsolutePath() { } /** {@inheritDoc} */ - @Override public SegmentIO toIO(FileIOFactory fileIOFactory) throws IOException { - FileIO fileIO = isCompressed() ? new UnzipFileIO(file()) : fileIOFactory.create(file()); + @Override public SegmentIO toReadOnlyIO(FileIOFactory fileIOFactory) throws IOException { + FileIO fileIO = isCompressed() ? new UnzipFileIO(file()) : fileIOFactory.create(file(), READ); return new SegmentIO(idx, fileIO); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index ff64f5b034156..de277533cb076 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -290,8 +290,8 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Failure processor */ private final FailureProcessor failureProcessor; - /** */ - private IgniteConfiguration igCfg; + /** Ignite configuration. */ + private final IgniteConfiguration igCfg; /** Persistence metrics tracker. */ private DataStorageMetricsImpl metrics; @@ -400,7 +400,12 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl */ private final Map segmentSize = new ConcurrentHashMap<>(); + /** Pointer to the last successful checkpoint until which WAL segments can be safely deleted. */ + private volatile WALPointer lastCheckpointPtr = new WALPointer(0, 0, 0); + /** + * Constructor. + * * @param ctx Kernal context. */ public FileWriteAheadLogManager(final GridKernalContext ctx) { @@ -428,8 +433,8 @@ public FileWriteAheadLogManager(final GridKernalContext ctx) { fileHandleManagerFactory = new FileHandleManagerFactory(dsCfg); maxSegCountWithoutCheckpoint = - (long)((U.adjustedWalHistorySize(dsCfg, log) * CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) - / dsCfg.getWalSegmentSize()); + (long)((U.adjustedWalHistorySize(dsCfg, log) * CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) + / dsCfg.getWalSegmentSize()); switchSegmentRecordOffset = isArchiverEnabled() ? new AtomicLongArray(dsCfg.getWalSegments()) : null; } @@ -973,7 +978,8 @@ private FileWriteHandle closeBufAndRollover( log, segmentAware, segmentRouter, - lockedSegmentFileInputFactory); + lockedSegmentFileInputFactory + ); try { iter.init(); // Make sure iterator is closed on any error. @@ -989,25 +995,27 @@ private FileWriteHandle closeBufAndRollover( /** {@inheritDoc} */ @Override public boolean reserve(WALPointer start) { - assert start != null : "Invalid start pointer: " + start; + assert start != null; if (mode == WALMode.NONE) return false; - segmentAware.reserve(start.index()); + // Protection from deletion. + boolean reserved = segmentAware.reserve(start.index()); - if (!hasIndex(start.index())) { - segmentAware.release(start.index()); + // Segment presence check. + if (reserved && !hasIndex(start.index())) { + segmentAware.reserve(start.index()); - return false; + reserved = false; } - return true; + return reserved; } /** {@inheritDoc} */ @Override public void release(WALPointer start) { - assert start != null : "Invalid start pointer: " + start; + assert start != null; if (mode == WALMode.NONE) return; @@ -1016,16 +1024,16 @@ private FileWriteHandle closeBufAndRollover( } /** - * @param absIdx Absolulte index to check. - * @return {@code true} if has this index. + * Checking for the existence of an index. + * + * @param absIdx Segment index. + * @return {@code True} exists. */ private boolean hasIndex(long absIdx) { String segmentName = fileName(absIdx); - String zipSegmentName = segmentName + ZIP_SUFFIX; - boolean inArchive = new File(walArchiveDir, segmentName).exists() || - new File(walArchiveDir, zipSegmentName).exists(); + new File(walArchiveDir, segmentName + ZIP_SUFFIX).exists(); if (inArchive) return true; @@ -1039,30 +1047,25 @@ private boolean hasIndex(long absIdx) { } /** {@inheritDoc} */ - @Override public int truncate(WALPointer low, WALPointer high) { + @Override public int truncate(@Nullable WALPointer high) { if (high == null) return 0; - // File pointer bound: older entries will be deleted from archive - - FileDescriptor[] descs = scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)); + FileDescriptor[] descs = walArchiveFiles(); int deleted = 0; for (FileDescriptor desc : descs) { - if (low != null && desc.idx < low.index()) - continue; - - // Do not delete reserved or locked segment and any segment after it. - if (segmentReservedOrLocked(desc.idx)) - return deleted; - long archivedAbsIdx = segmentAware.lastArchivedAbsoluteIndex(); long lastArchived = archivedAbsIdx >= 0 ? archivedAbsIdx : lastArchivedIndex(); - // We need to leave at least one archived segment to correctly determine the archive index. - if (desc.idx < high.index() && desc.idx < lastArchived) { + if (desc.idx >= lastCheckpointPtr.index() // We cannot delete segments needed for binary recovery. + || desc.idx >= lastArchived // We cannot delete last segment, it is needed at start of node and avoid gaps. + || !segmentAware.minReserveIndex(desc.idx)) // We cannot delete reserved segment. + return deleted; + + if (desc.idx < high.index()) { if (!desc.file.delete()) { U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " + desc.file.getAbsolutePath()); @@ -1099,8 +1102,7 @@ private boolean segmentReservedOrLocked(long absIdx) { /** {@inheritDoc} */ @Override public void notchLastCheckpointPtr(WALPointer ptr) { - if (compressor != null) - segmentAware.keepUncompressedIdxFrom(ptr.index()); + lastCheckpointPtr = ptr; } /** {@inheritDoc} */ @@ -1117,9 +1119,7 @@ private boolean segmentReservedOrLocked(long absIdx) { if (lastArchived == -1) return 0; - int res = (int)(lastArchived - lastTruncated); - - return res >= 0 ? res : 0; + return Math.max((int)(lastArchived - lastTruncated), 0); } /** {@inheritDoc} */ @@ -1191,7 +1191,7 @@ private long lastArchivedIndex() { @Nullable private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { FileDescriptor ds = new FileDescriptor(file); - try (SegmentIO fileIO = ds.toIO(ioFactory)) { + try (SegmentIO fileIO = ds.toReadOnlyIO(ioFactory)) { // File may be empty when LOG_ONLY mode is enabled and mmap is disabled. if (fileIO.size() == 0) return null; @@ -1369,9 +1369,13 @@ private FileWriteHandle restoreWriteHandle(@Nullable WALPointer lastReadPtr) thr FileWriteHandle hnd = fileHandleManager.initHandle(fileIO, off + len, ser); - if (archiver0 != null) - segmentAware.curAbsWalIdx(absIdx); - else + segmentAware.curAbsWalIdx(absIdx); + + FileDescriptor[] walArchiveFiles = walArchiveFiles(); + + segmentAware.minReserveIndex(F.isEmpty(walArchiveFiles) ? -1 : walArchiveFiles[0].idx - 1); + + if (archiver0 == null) segmentAware.setLastArchivedAbsoluteIndex(absIdx - 1); // Getting segment sizes. @@ -1494,17 +1498,17 @@ private FileWriteHandle initNextWriteHandle(FileWriteHandle cur) throws IgniteCh * @throws StorageException If failed. */ private void checkOrPrepareFiles() throws StorageException { - // Clean temp files. - { - File[] tmpFiles = walWorkDir.listFiles(WAL_SEGMENT_TEMP_FILE_FILTER); - - if (!F.isEmpty(tmpFiles)) { - for (File tmp : tmpFiles) { - if (!tmp.delete()) { - throw new StorageException("Failed to delete previously created temp file " + - "(make sure Ignite process has enough rights): " + tmp.getAbsolutePath()); - } - } + Collection tmpFiles = new HashSet<>(); + + for (File walDir : F.asList(walWorkDir, walArchiveDir)) { + tmpFiles.addAll(F.asList(walDir.listFiles(WAL_SEGMENT_TEMP_FILE_FILTER))); + tmpFiles.addAll(F.asList(walDir.listFiles(WAL_SEGMENT_TEMP_FILE_COMPACTED_FILTER))); + } + + for (File tmpFile : tmpFiles) { + if (tmpFile.exists() && !tmpFile.delete()) { + throw new StorageException("Failed to delete previously created temp file " + + "(make sure Ignite process has enough rights): " + tmpFile.getAbsolutePath()); } } @@ -1605,6 +1609,7 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte FileArchiver archiver0 = archiver; if (archiver0 == null) { + segmentAware.curAbsWalIdx(curIdx + 1); segmentAware.setLastArchivedAbsoluteIndex(curIdx); return new File(walWorkDir, fileName(curIdx + 1)); @@ -1634,7 +1639,9 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte } /** - * Files from archive WAL directory. + * Files from {@link #walArchiveDir}. + * + * @return Raw or compressed WAL segments from archive. */ private FileDescriptor[] walArchiveFiles() { return scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)); @@ -1642,8 +1649,8 @@ private FileDescriptor[] walArchiveFiles() { /** {@inheritDoc} */ @Override public long maxArchivedSegmentToDelete() { - //When maxWalArchiveSize==MAX_VALUE deleting files is not permit. - if (dsCfg.getMaxWalArchiveSize() == Long.MAX_VALUE) + //When maxWalArchiveSize==-1 deleting files is not permitted. + if (dsCfg.getMaxWalArchiveSize() == DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE) return -1; FileDescriptor[] archivedFiles = walArchiveFiles(); @@ -1984,22 +1991,6 @@ private long nextAbsoluteSegmentIndex() throws StorageException, IgniteInterrupt } } - /** - * @param absIdx Segment absolute index. - * @return

  • {@code True} if can read, no lock is held,
  • {@code false} if work segment, need - * release segment later, use {@link #releaseWorkSegment} for unlock
- */ - public boolean checkCanReadArchiveOrReserveWorkSegment(long absIdx) { - return segmentAware.checkCanReadArchiveOrReserveWorkSegment(absIdx); - } - - /** - * @param absIdx Segment absolute index. - */ - public void releaseWorkSegment(long absIdx) { - segmentAware.releaseWorkSegment(absIdx); - } - /** * Moves WAL segment from work folder to archive folder. Temp file is used to do movement. * @@ -2081,18 +2072,12 @@ private void allocateRemainingFiles() throws StorageException { checkFiles( 1, true, - new IgnitePredicate() { - @Override public boolean apply(Integer integer) { - return !checkStop(); - } - }, - new CI1() { - @Override public void apply(Integer idx) { - synchronized (FileArchiver.this) { - formatted = idx; + (IgnitePredicate)integer -> !checkStop(), + (CI1)idx -> { + synchronized (FileArchiver.this) { + formatted = idx; - FileArchiver.this.notifyAll(); - } + FileArchiver.this.notifyAll(); } } ); @@ -2131,15 +2116,6 @@ private class FileCompressor extends FileCompressorWorker { /** */ private void init() { - File[] toDel = walArchiveDir.listFiles(WAL_SEGMENT_TEMP_FILE_COMPACTED_FILTER); - - for (File f : toDel) { - if (isCancelled()) - return; - - f.delete(); - } - for (int i = 1; i < calculateThreadCount(); i++) { FileCompressorWorker worker = new FileCompressorWorker(i, log); @@ -2400,7 +2376,7 @@ private void deleteObsoleteRawSegments() { if (segmentReservedOrLocked(desc.idx)) return; - if (desc.idx < segmentAware.keepUncompressedIdxFrom() && duplicateIndices.contains(desc.idx)) { + if (desc.idx < lastCheckpointPtr.index() && duplicateIndices.contains(desc.idx)) { if (desc.file.exists() && !desc.file.delete()) { U.warn(log, "Failed to remove obsolete WAL segment " + "(make sure the process has enough rights): " + desc.file.getAbsolutePath() + @@ -2416,13 +2392,13 @@ private void deleteObsoleteRawSegments() { */ private class FileDecompressor extends GridWorker { /** Decompression futures. */ - private Map> decompressionFutures = new HashMap<>(); + private final Map> decompressionFutures = new HashMap<>(); /** Segments queue. */ private final PriorityBlockingQueue segmentsQueue = new PriorityBlockingQueue<>(); /** Byte array for draining data. */ - private byte[] arr = new byte[BUF_SIZE]; + private final byte[] arr = new byte[BUF_SIZE]; /** * @param log Logger. @@ -2730,18 +2706,16 @@ private static class RecordsIterator extends AbstractWalRecordsIterator { private final DataStorageConfiguration dsCfg; /** Optional start pointer. */ - @Nullable - private WALPointer start; + @Nullable private final WALPointer start; /** Optional end pointer. */ - @Nullable - private WALPointer end; + @Nullable private final WALPointer end; /** Manager of segment location. */ - private SegmentRouter segmentRouter; + private final SegmentRouter segmentRouter; /** Holder of actual information of latest manipulation on WAL segments. */ - private SegmentAware segmentAware; + private final SegmentAware segmentAware; /** * @param cctx Shared context. @@ -2756,10 +2730,10 @@ private static class RecordsIterator extends AbstractWalRecordsIterator { * @param log Logger @throws IgniteCheckedException If failed to initialize WAL segment. * @param segmentAware Segment aware. * @param segmentRouter Segment router. - * @param segmentFileInputFactory + * @param segmentFileInputFactory Factory to provide I/O interfaces for read primitives with files. */ private RecordsIterator( - GridCacheSharedContext cctx, + GridCacheSharedContext cctx, File walArchiveDir, File walWorkDir, @Nullable WALPointer start, @@ -2774,13 +2748,15 @@ private RecordsIterator( SegmentRouter segmentRouter, SegmentFileInputFactory segmentFileInputFactory ) throws IgniteCheckedException { - super(log, + super( + log, cctx, serializerFactory, ioFactory, dsCfg.getWalRecordIteratorBufferSize(), segmentFileInputFactory ); + this.walArchiveDir = walArchiveDir; this.walWorkDir = walWorkDir; this.archiver = archiver; @@ -2890,57 +2866,70 @@ private void init() throws IgniteCheckedException { curWalSegmIdx++; - boolean readArchive = canReadArchiveOrReserveWork(curWalSegmIdx); //lock during creation handle. + // Segment deletion protection. + if (!segmentAware.reserve(curWalSegmIdx)) + throw new IgniteCheckedException("Segment does not exist: " + curWalSegmIdx); - FileDescriptor fd = null; - ReadFileHandle nextHandle; try { - fd = segmentRouter.findSegment(curWalSegmIdx); + // Protection against transferring a segment to the archive by #archiver. + boolean readArchive = archiver != null && !segmentAware.lock(curWalSegmIdx); - if (log.isDebugEnabled()) - log.debug("Reading next file [absIdx=" + curWalSegmIdx + ", file=" + fd.file.getAbsolutePath() + ']'); + FileDescriptor fd = null; + ReadFileHandle nextHandle; + try { + fd = segmentRouter.findSegment(curWalSegmIdx); - nextHandle = initReadHandle(fd, start != null && curWalSegmIdx == start.index() ? start : null); - } - catch (FileNotFoundException e) { - if (readArchive) - throw new IgniteCheckedException("Missing WAL segment in the archive", e); - else { - // Log only when no segments were read. This will help us avoiding logging on the end of the WAL. - if (curRec == null && curWalSegment == null) { - File workDirFile = new File(walWorkDir, fileName(curWalSegmIdx % dsCfg.getWalSegments())); - File archiveDirFile = new File(walArchiveDir, fileName(curWalSegmIdx)); - - U.warn( - log, - "Next segment file is not found [" + - "curWalSegmIdx=" + curWalSegmIdx - + ", start=" + start - + ", end=" + end - + ", filePath=" + (fd == null ? "" : fd.file.getAbsolutePath()) - + ", walWorkDir=" + walWorkDir - + ", walWorkDirContent=" + listFileNames(walWorkDir) - + ", walArchiveDir=" + walArchiveDir - + ", walArchiveDirContent=" + listFileNames(walArchiveDir) - + ", workDirFile=" + workDirFile.getName() - + ", exists=" + workDirFile.exists() - + ", archiveDirFile=" + archiveDirFile.getName() - + ", exists=" + archiveDirFile.exists() - + "]", - e - ); + if (log.isDebugEnabled()) { + log.debug("Reading next file [absIdx=" + curWalSegmIdx + + ", file=" + fd.file.getAbsolutePath() + ']'); } - nextHandle = null; + nextHandle = initReadHandle(fd, start != null && curWalSegmIdx == start.index() ? start : null); } - } + catch (FileNotFoundException e) { + if (readArchive) + throw new IgniteCheckedException("Missing WAL segment in the archive: " + curWalSegment, e); + else { + // Log only when no segments were read. This will help us avoiding logging on the end of the WAL. + if (curRec == null && curWalSegment == null) { + File workDirFile = new File(walWorkDir, fileName(curWalSegmIdx % dsCfg.getWalSegments())); + File archiveDirFile = new File(walArchiveDir, fileName(curWalSegmIdx)); + + U.warn( + log, + "Next segment file is not found [" + + "curWalSegmIdx=" + curWalSegmIdx + + ", start=" + start + + ", end=" + end + + ", filePath=" + (fd == null ? "" : fd.file.getAbsolutePath()) + + ", walWorkDir=" + walWorkDir + + ", walWorkDirContent=" + listFileNames(walWorkDir) + + ", walArchiveDir=" + walArchiveDir + + ", walArchiveDirContent=" + listFileNames(walArchiveDir) + + ", workDirFile=" + workDirFile.getName() + + ", exists=" + workDirFile.exists() + + ", archiveDirFile=" + archiveDirFile.getName() + + ", exists=" + archiveDirFile.exists() + + "]", + e + ); + } - if (!readArchive) - releaseWorkSegment(curWalSegmIdx); + nextHandle = null; + } + } + finally { + if (archiver != null && !readArchive) + segmentAware.unlock(curWalSegmIdx); + } - curRec = null; + curRec = null; - return nextHandle; + return nextHandle; + } + finally { + segmentAware.release(curWalSegmIdx); + } } /** */ @@ -2955,62 +2944,46 @@ private static List listFileNames(File dir) { /** {@inheritDoc} */ @Override protected IgniteCheckedException handleRecordException(Exception e, @Nullable WALPointer ptr) { - if (e instanceof IgniteCheckedException) - if (X.hasCause(e, IgniteDataIntegrityViolationException.class)) - // This means that there is no explicit last sengment, so we iterate unil the very end. - if (end == null) { - long nextWalSegmentIdx = curWalSegmIdx + 1; - - if (!isArchiverEnabled()) - if (canIgnoreCrcError(nextWalSegmentIdx, nextWalSegmentIdx, e, ptr)) - return null; - + if (e instanceof IgniteCheckedException && X.hasCause(e, IgniteDataIntegrityViolationException.class)) { + // This means that there is no explicit last segment, so we iterate until the very end. + if (end == null) { + long nextWalSegmentIdx = curWalSegmIdx + 1; + + if (archiver == null) { + if (canIgnoreCrcError(nextWalSegmentIdx, nextWalSegmentIdx, e, ptr)) + return null; + } + else { // Check that we should not look this segment up in archive directory. // Basically the same check as in "advanceSegment" method. - if (isArchiverEnabled() && archiver != null) - if (!canReadArchiveOrReserveWork(nextWalSegmentIdx)) - try { - long workIdx = nextWalSegmentIdx % dsCfg.getWalSegments(); - if (canIgnoreCrcError(workIdx, nextWalSegmentIdx, e, ptr)) - return null; - } - finally { - releaseWorkSegment(nextWalSegmentIdx); + // Segment deletion protection. + if (segmentAware.reserve(nextWalSegmentIdx)) { + try { + // Protection against transferring a segment to the archive by #archiver. + if (segmentAware.lock(nextWalSegmentIdx)) { + try { + long workIdx = nextWalSegmentIdx % dsCfg.getWalSegments(); + + if (canIgnoreCrcError(workIdx, nextWalSegmentIdx, e, ptr)) + return null; + } + finally { + segmentAware.unlock(nextWalSegmentIdx); + } } + } + finally { + segmentAware.release(nextWalSegmentIdx); + } + } } + } + } return super.handleRecordException(e, ptr); } - /** - * @param absIdx Absolute index to check. - * @return
  • {@code True} if we can safely read the archive,
  • {@code false} if the segment has - * not been archived yet. In this case the corresponding work segment is reserved (will not be deleted until - * release). Use {@link #releaseWorkSegment} for unlock
- */ - private boolean canReadArchiveOrReserveWork(long absIdx) { - return archiver != null && archiver.checkCanReadArchiveOrReserveWorkSegment(absIdx); - } - - /** - * @param absIdx Absolute index to release. - */ - private void releaseWorkSegment(long absIdx) { - if (archiver != null) - archiver.releaseWorkSegment(absIdx); - } - - /** - * Check that archiver is enabled - */ - private boolean isArchiverEnabled() { - if (walArchiveDir != null && walWorkDir != null) - return !walArchiveDir.equals(walWorkDir); - - return !new File(dsCfg.getWalArchivePath()).equals(new File(dsCfg.getWalPath())); - } - /** * @param workIdx Work index. * @param walSegmentIdx Wal segment index. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentArchivedStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentArchivedStorage.java index 438b92217405b..53b3b598a97eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentArchivedStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentArchivedStorage.java @@ -43,21 +43,10 @@ class SegmentArchivedStorage extends SegmentObservable { /** * @param segmentLockStorage Protects WAL work segments from moving. */ - private SegmentArchivedStorage(SegmentLockStorage segmentLockStorage) { + SegmentArchivedStorage(SegmentLockStorage segmentLockStorage) { this.segmentLockStorage = segmentLockStorage; } - /** - * @param segmentLockStorage Protects WAL work segments from moving. - */ - static SegmentArchivedStorage buildArchivedStorage(SegmentLockStorage segmentLockStorage) { - SegmentArchivedStorage archivedStorage = new SegmentArchivedStorage(segmentLockStorage); - - segmentLockStorage.addObserver(archivedStorage::onSegmentUnlocked); - - return archivedStorage; - } - /** * @return Last archived segment absolute index. */ @@ -105,7 +94,7 @@ synchronized void awaitSegmentArchived(long awaitIdx) throws IgniteInterruptedCh */ synchronized void markAsMovedToArchive(long toArchive) throws IgniteInterruptedCheckedException { try { - while (segmentLockStorage.locked(toArchive) && !interrupted) + while (!segmentLockStorage.minLockIndex(toArchive) && !interrupted) wait(); } catch (InterruptedException e) { @@ -145,7 +134,7 @@ private void checkInterrupted() throws IgniteInterruptedCheckedException { /** * Callback for waking up waiters of this object when unlocked happened. */ - private synchronized void onSegmentUnlocked(long segmentId) { + synchronized void onSegmentUnlocked(long segmentId) { notifyAll(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAware.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAware.java index be60895b365f2..89523db552f83 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAware.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAware.java @@ -19,10 +19,6 @@ import org.apache.ignite.internal.IgniteInterruptedCheckedException; -import static org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentArchivedStorage.buildArchivedStorage; -import static org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentCompressStorage.buildCompressStorage; -import static org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentCurrentStateStorage.buildCurrentStateStorage; - /** * Holder of actual information of latest manipulation on WAL segments. */ @@ -34,7 +30,7 @@ public class SegmentAware { private final SegmentLockStorage segmentLockStorage = new SegmentLockStorage(); /** Manages last archived index, emulates archivation in no-archiver mode. */ - private final SegmentArchivedStorage segmentArchivedStorage = buildArchivedStorage(segmentLockStorage); + private final SegmentArchivedStorage segmentArchivedStorage; /** Storage of actual information about current index of compressed segments. */ private final SegmentCompressStorage segmentCompressStorage; @@ -43,12 +39,21 @@ public class SegmentAware { private final SegmentCurrentStateStorage segmentCurrStateStorage; /** + * Constructor. + * * @param walSegmentsCnt Total WAL segments count. * @param compactionEnabled Is wal compaction enabled. */ public SegmentAware(int walSegmentsCnt, boolean compactionEnabled) { - segmentCurrStateStorage = buildCurrentStateStorage(walSegmentsCnt, segmentArchivedStorage); - segmentCompressStorage = buildCompressStorage(segmentArchivedStorage, compactionEnabled); + segmentArchivedStorage = new SegmentArchivedStorage(segmentLockStorage); + + segmentCurrStateStorage = new SegmentCurrentStateStorage(walSegmentsCnt); + segmentCompressStorage = new SegmentCompressStorage(compactionEnabled); + + segmentArchivedStorage.addObserver(segmentCurrStateStorage::onSegmentArchived); + segmentArchivedStorage.addObserver(segmentCompressStorage::onSegmentArchived); + + segmentLockStorage.addObserver(segmentArchivedStorage::onSegmentUnlocked); } /** @@ -132,20 +137,6 @@ public long lastCompressedIdx() { return segmentCompressStorage.lastCompressedIdx(); } - /** - * @param idx Minimum raw segment index that should be preserved from deletion. - */ - public void keepUncompressedIdxFrom(long idx) { - segmentCompressStorage.keepUncompressedIdxFrom(idx); - } - - /** - * @return Minimum raw segment index that should be preserved from deletion. - */ - public long keepUncompressedIdxFrom() { - return segmentCompressStorage.keepUncompressedIdxFrom(); - } - /** * Update current WAL index. * @@ -184,10 +175,14 @@ public long lastArchivedAbsoluteIndex() { } /** + * Segment reservation. It will be successful if segment is {@code >} than + * the {@link #minReserveIndex minimum}. + * * @param absIdx Index for reservation. + * @return {@code True} if the reservation was successful. */ - public void reserve(long absIdx) { - reservationStorage.reserve(absIdx); + public boolean reserve(long absIdx) { + return reservationStorage.reserve(absIdx); } /** @@ -208,9 +203,9 @@ public void release(long absIdx) { } /** - * Check if WAL segment locked (protected from move to archive) + * Check if WAL segment locked (protected from move to archive). * - * @param absIdx Index for check reservation. + * @param absIdx Index for check locking. * @return {@code True} if index is locked. */ public boolean locked(long absIdx) { @@ -218,27 +213,20 @@ public boolean locked(long absIdx) { } /** - * @param absIdx Segment absolute index. - * @return
  • {@code True} if can read, no lock is held,
  • {@code false} if work segment, need release - * segment later, use {@link #releaseWorkSegment} for unlock
- */ - public boolean checkCanReadArchiveOrReserveWorkSegment(long absIdx) { - return lastArchivedAbsoluteIndex() >= absIdx || segmentLockStorage.lockWorkSegment(absIdx); - } - - /** - * Visible for test. + * Segment lock. It will be successful if segment is {@code >} than + * the {@link #lastArchivedAbsoluteIndex last archived}. * - * @param absIdx Segment absolute index. segment later, use {@link #releaseWorkSegment} for unlock + * @param absIdx Index to lock. + * @return {@code True} if the lock was successful. */ - void lockWorkSegment(long absIdx) { - segmentLockStorage.lockWorkSegment(absIdx); + public boolean lock(long absIdx) { + return segmentLockStorage.lockWorkSegment(absIdx); } /** - * @param absIdx Segment absolute index. + * @param absIdx Index to unlock. */ - public void releaseWorkSegment(long absIdx) { + public void unlock(long absIdx) { segmentLockStorage.releaseWorkSegment(absIdx); } @@ -274,4 +262,28 @@ public void forceInterrupt() { segmentCurrStateStorage.forceInterrupt(); } + + /** + * Increasing minimum segment index after that can be reserved. + * Value will be updated if it is greater than the current one. + * If segment is already reserved, the update will fail. + * + * @param absIdx Absolut segment index. + * @return {@code True} if update is successful. + */ + public boolean minReserveIndex(long absIdx) { + return reservationStorage.minReserveIndex(absIdx); + } + + /** + * Increasing minimum segment index after that can be locked. + * Value will be updated if it is greater than the current one. + * If segment is already reserved, the update will fail. + * + * @param absIdx Absolut segment index. + * @return {@code True} if update is successful. + */ + public boolean minLockIndex(long absIdx) { + return segmentLockStorage.minLockIndex(absIdx); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCompressStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCompressStorage.java index 5d88e5233ecc4..62fe69d7c1a47 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCompressStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCompressStorage.java @@ -30,9 +30,6 @@ public class SegmentCompressStorage { /** Flag of interrupt waiting on this object. */ private volatile boolean interrupted; - /** Manages last archived index, emulates archivation in no-archiver mode. */ - private final SegmentArchivedStorage segmentArchivedStorage; - /** If WAL compaction enabled. */ private final boolean compactionEnabled; @@ -51,32 +48,15 @@ public class SegmentCompressStorage { /** Compressed segment with maximal index. */ private long lastMaxCompressedIdx = -1L; - /** Min uncompressed index to keep. */ - private volatile long minUncompressedIdxToKeep = -1L; - /** - * @param segmentArchivedStorage Storage of last archived segment. + * Constructor. + * * @param compactionEnabled If WAL compaction enabled. */ - private SegmentCompressStorage(SegmentArchivedStorage segmentArchivedStorage, boolean compactionEnabled) { - this.segmentArchivedStorage = segmentArchivedStorage; - + SegmentCompressStorage(boolean compactionEnabled) { this.compactionEnabled = compactionEnabled; } - /** - * @param segmentArchivedStorage Storage of last archived segment. - * @param compactionEnabled If WAL compaction enabled. - */ - static SegmentCompressStorage buildCompressStorage(SegmentArchivedStorage segmentArchivedStorage, - boolean compactionEnabled) { - SegmentCompressStorage storage = new SegmentCompressStorage(segmentArchivedStorage, compactionEnabled); - - segmentArchivedStorage.addObserver(storage::onSegmentArchived); - - return storage; - } - /** * Callback after segment compression finish. * @@ -148,27 +128,13 @@ private void checkInterrupted() throws IgniteInterruptedCheckedException { /** * Callback for waking up compressor when new segment is archived. */ - private synchronized void onSegmentArchived(long lastAbsArchivedIdx) { + synchronized void onSegmentArchived(long lastAbsArchivedIdx) { while (lastEnqueuedToCompressIdx < lastAbsArchivedIdx && compactionEnabled) segmentsToCompress.add(++lastEnqueuedToCompressIdx); notifyAll(); } - /** - * @param idx Minimum raw segment index that should be preserved from deletion. - */ - void keepUncompressedIdxFrom(long idx) { - minUncompressedIdxToKeep = idx; - } - - /** - * @return Minimum raw segment index that should be preserved from deletion. - */ - long keepUncompressedIdxFrom() { - return minUncompressedIdxToKeep; - } - /** * Reset interrupted flag. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCurrentStateStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCurrentStateStorage.java index 73394972db182..6672879b1a6c8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCurrentStateStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentCurrentStateStorage.java @@ -22,7 +22,7 @@ /** * Storage of absolute current segment index. */ -class SegmentCurrentStateStorage { +class SegmentCurrentStateStorage extends SegmentObservable { /** Flag of interrupt of waiting on this object. */ private volatile boolean interrupted; @@ -32,38 +32,22 @@ class SegmentCurrentStateStorage { /** Total WAL segments count. */ private final int walSegmentsCnt; - /** Manages last archived index, emulates archivation in no-archiver mode. */ - private final SegmentArchivedStorage segmentArchivedStorage; - /** * Absolute current segment index WAL Manager writes to. Guarded by this. Incremented during rollover. * Also may be directly set if WAL is resuming logging after start. */ private volatile long curAbsWalIdx = -1; - /** - * @param walSegmentsCnt Total WAL segments count. - * @param segmentArchivedStorage Last archived segment storage. - */ - private SegmentCurrentStateStorage(int walSegmentsCnt, SegmentArchivedStorage segmentArchivedStorage) { - this.walSegmentsCnt = walSegmentsCnt; - this.segmentArchivedStorage = segmentArchivedStorage; - } + /** Last archived file absolute index. */ + private volatile long lastAbsArchivedIdx = -1; /** + * Constructor. + * * @param walSegmentsCnt Total WAL segments count. - * @param segmentArchivedStorage Last archived segment storage. */ - static SegmentCurrentStateStorage buildCurrentStateStorage( - int walSegmentsCnt, - SegmentArchivedStorage segmentArchivedStorage - ) { - - SegmentCurrentStateStorage currStorage = new SegmentCurrentStateStorage(walSegmentsCnt, segmentArchivedStorage); - - segmentArchivedStorage.addObserver(currStorage::onSegmentArchived); - - return currStorage; + SegmentCurrentStateStorage(int walSegmentsCnt) { + this.walSegmentsCnt = walSegmentsCnt; } /** @@ -87,13 +71,11 @@ synchronized void awaitSegment(long absSegIdx) throws IgniteInterruptedCheckedEx * Waiting until archivation of next segment will be allowed. */ synchronized long waitNextSegmentForArchivation() throws IgniteInterruptedCheckedException { - long lastArchivedSegment = segmentArchivedStorage.lastArchivedAbsoluteIndex(); - //We can archive segment if it less than current work segment so for archivate lastArchiveSegment + 1 // we should be ensure that currentWorkSegment = lastArchiveSegment + 2 - awaitSegment(lastArchivedSegment + 2); + awaitSegment(lastAbsArchivedIdx + 2); - return lastArchivedSegment + 1; + return lastAbsArchivedIdx + 1; } /** @@ -102,23 +84,31 @@ synchronized long waitNextSegmentForArchivation() throws IgniteInterruptedChecke * * @return Next absolute segment index. */ - synchronized long nextAbsoluteSegmentIndex() throws IgniteInterruptedCheckedException { - curAbsWalIdx++; + long nextAbsoluteSegmentIndex() throws IgniteInterruptedCheckedException { + long nextAbsIdx; - notifyAll(); + synchronized (this) { + curAbsWalIdx++; - try { - while (curAbsWalIdx - segmentArchivedStorage.lastArchivedAbsoluteIndex() > walSegmentsCnt && !forceInterrupted) - wait(); - } - catch (InterruptedException e) { - throw new IgniteInterruptedCheckedException(e); + notifyAll(); + + try { + while (curAbsWalIdx - lastAbsArchivedIdx > walSegmentsCnt && !forceInterrupted) + wait(); + } + catch (InterruptedException e) { + throw new IgniteInterruptedCheckedException(e); + } + + if (forceInterrupted) + throw new IgniteInterruptedCheckedException("Interrupt waiting of change archived idx"); + + nextAbsIdx = curAbsWalIdx; } - if (forceInterrupted) - throw new IgniteInterruptedCheckedException("Interrupt waiting of change archived idx"); + notifyObservers(nextAbsIdx); - return curAbsWalIdx; + return nextAbsIdx; } /** @@ -126,10 +116,14 @@ synchronized long nextAbsoluteSegmentIndex() throws IgniteInterruptedCheckedExce * * @param curAbsWalIdx New current WAL index. */ - synchronized void curAbsWalIdx(long curAbsWalIdx) { - this.curAbsWalIdx = curAbsWalIdx; + void curAbsWalIdx(long curAbsWalIdx) { + synchronized (this) { + this.curAbsWalIdx = curAbsWalIdx; - notifyAll(); + notifyAll(); + } + + notifyObservers(curAbsWalIdx); } /** @@ -160,8 +154,12 @@ synchronized void forceInterrupt() { /** * Callback for waking up awaiting when new segment is archived. + * + * @param lastAbsArchivedIdx Last archived file absolute index. */ - private synchronized void onSegmentArchived(long lastAbsArchivedIdx) { + synchronized void onSegmentArchived(long lastAbsArchivedIdx) { + this.lastAbsArchivedIdx = lastAbsArchivedIdx; + notifyAll(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentLockStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentLockStorage.java index 6588769edd01b..a5a79486ec4b4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentLockStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentLockStorage.java @@ -29,7 +29,10 @@ public class SegmentLockStorage extends SegmentObservable { * Maps absolute segment index to locks counter. Lock on segment protects from archiving segment and may come from * {@link FileWriteAheadLogManager.RecordsIterator} during WAL replay. Map itself is guarded by this. */ - private Map locked = new ConcurrentHashMap<>(); + private final Map locked = new ConcurrentHashMap<>(); + + /** Maximum segment index that can be locked. */ + private volatile long minLockIdx = -1; /** * Check if WAL segment locked (protected from move to archive) @@ -37,17 +40,22 @@ public class SegmentLockStorage extends SegmentObservable { * @param absIdx Index for check reservation. * @return {@code True} if index is locked. */ - public boolean locked(long absIdx) { + boolean locked(long absIdx) { return locked.containsKey(absIdx); } /** - * @param absIdx Segment absolute index. - * @return
  • {@code True} if can read, no lock is held,
  • {@code false} if work segment, need release - * segment later, use {@link #releaseWorkSegment} for unlock
+ * Segment lock. It will be successful if segment is {@code >} than the {@link #minLockIdx minimum}. + * + * @param absIdx Index to lock. + * @return {@code True} if the lock was successful. */ - boolean lockWorkSegment(long absIdx) { - locked.compute(absIdx, (idx, count) -> count == null ? 1 : count + 1); + synchronized boolean lockWorkSegment(long absIdx) { + if (absIdx > minLockIdx) { + locked.merge(absIdx, 1, Integer::sum); + + return true; + } return false; } @@ -64,4 +72,21 @@ void releaseWorkSegment(long absIdx) { notifyObservers(absIdx); } + + /** + * Increasing minimum segment index that can be locked. + * Value will be updated if it is greater than the current one. + * If segment is already locked, the update will fail. + * + * @param absIdx Absolut segment index. + * @return {@code True} if update is successful. + */ + synchronized boolean minLockIndex(long absIdx) { + if (locked(absIdx)) + return false; + + minLockIdx = Math.max(minLockIdx, absIdx); + + return true; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentReservationStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentReservationStorage.java index 50c2bbf067d84..42eece70761b2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentReservationStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentReservationStorage.java @@ -27,13 +27,25 @@ class SegmentReservationStorage { * Maps absolute segment index to reservation counter. If counter > 0 then we wouldn't delete all segments which has * index >= reserved segment index. Guarded by {@code this}. */ - private NavigableMap reserved = new TreeMap<>(); + private final NavigableMap reserved = new TreeMap<>(); + + /** Maximum segment index that can be reserved. */ + private long minReserveIdx = -1; /** + * Segment reservation. It will be successful if segment is {@code >} than the {@link #minReserveIdx minimum}. + * * @param absIdx Index for reservation. + * @return {@code True} if the reservation was successful. */ - synchronized void reserve(long absIdx) { - reserved.merge(absIdx, 1, (a, b) -> a + b); + synchronized boolean reserve(long absIdx) { + if (absIdx > minReserveIdx) { + reserved.merge(absIdx, 1, Integer::sum); + + return true; + } + + return false; } /** @@ -59,4 +71,21 @@ synchronized void release(long absIdx) { else reserved.put(absIdx, cur - 1); } + + /** + * Increasing minimum segment index that can be reserved. + * Value will be updated if it is greater than the current one. + * If segment is already reserved, the update will fail. + * + * @param absIdx Absolut segment index. + * @return {@code True} if update is successful. + */ + synchronized boolean minReserveIndex(long absIdx) { + if (reserved(absIdx)) + return false; + + minReserveIdx = Math.max(minReserveIdx, absIdx); + + return true; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedReadFileInput.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedReadFileInput.java index 6bb47863f4afd..13a905fd99234 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedReadFileInput.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedReadFileInput.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence.wal.io; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.wal.ByteBufferExpander; @@ -69,19 +70,29 @@ final class LockedReadFileInput extends SimpleFileInput { if (available >= requested) return; - boolean readArchive = segmentAware.checkCanReadArchiveOrReserveWorkSegment(segmentId); + // Segment deletion protection. + if (!segmentAware.reserve(segmentId)) + throw new FileNotFoundException("Segment does not exist: " + segmentId); + try { - if (readArchive && !isLastReadFromArchive) { - isLastReadFromArchive = true; + // Protection against transferring a segment to the archive by #archiver. + boolean readArchive = !segmentAware.lock(segmentId); + try { + if (readArchive && !isLastReadFromArchive) { + isLastReadFromArchive = true; - refreshIO(); - } + refreshIO(); + } - super.ensure(requested); + super.ensure(requested); + } + finally { + if (!readArchive) + segmentAware.unlock(segmentId); + } } finally { - if (!readArchive) - segmentAware.releaseWorkSegment(segmentId); + segmentAware.release(segmentId); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedSegmentFileInputFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedSegmentFileInputFactory.java index 909f912659c5e..6e38b70393e0c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedSegmentFileInputFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/io/LockedSegmentFileInputFactory.java @@ -63,7 +63,7 @@ public LockedSegmentFileInputFactory( id -> { FileDescriptor segment = segmentRouter.findSegment(id); - return segment.toIO(fileIOFactory); + return segment.toReadOnlyIO(fileIOFactory); } ); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java index dd06244d4f71f..68fc432483eda 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java @@ -332,7 +332,7 @@ private FileDescriptor readFileDescriptor(File file, FileIOFactory ioFactory) { FileDescriptor ds = new FileDescriptor(file); try ( - SegmentIO fileIO = ds.toIO(ioFactory); + SegmentIO fileIO = ds.toReadOnlyIO(ioFactory); ByteBufferExpander buf = new ByteBufferExpander(HEADER_RECORD_SIZE, ByteOrder.nativeOrder()) ) { final DataInput in = segmentFileInputFactory.createFileInput(fileIO, buf); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index d5a9ed05f5956..e0e2092d770f5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -54,6 +54,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.IgniteDefragmentation; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; @@ -490,6 +491,11 @@ protected IgniteConfiguration prepareIgniteConfiguration() { return null; } + /** {@inheritDoc} */ + @Override public IgniteDefragmentation defragmentation() { + return null; + } + /** {@inheritDoc} */ @Override public WorkersRegistry workersRegistry() { return null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java index 912aecd348ae9..b2f9975f58111 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java @@ -326,7 +326,7 @@ private boolean checkBounds(long idx) { SegmentHeader segmentHeader; while (true) { try { - fileIO = fd.toIO(ioFactory); + fileIO = fd.toReadOnlyIO(ioFactory); segmentHeader = readSegmentHeader(fileIO, FILE_INPUT_FACTORY); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageImpl.java index 4ffd0caaa7396..880f6454eb610 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metastorage/persistence/DistributedMetaStorageImpl.java @@ -28,6 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiConsumer; import java.util.function.Predicate; @@ -41,6 +42,7 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; import org.apache.ignite.internal.managers.discovery.DiscoveryLocalJoinData; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; @@ -59,12 +61,14 @@ import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.apache.ignite.spi.IgniteNodeValidationResult; +import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.discovery.DiscoveryDataBag; import org.apache.ignite.spi.discovery.DiscoveryDataBag.GridDiscoveryData; import org.apache.ignite.spi.discovery.DiscoveryDataBag.JoiningNodeDiscoveryData; @@ -175,6 +179,12 @@ public class DistributedMetaStorageImpl extends GridProcessorAdapter */ private final ConcurrentMap> updateFuts = new ConcurrentHashMap<>(); + /** */ + private final ReadWriteLock updateFutsStopLock = new ReentrantReadWriteLock(); + + /** */ + private boolean stopped; + /** * Lock to access/update data and component's state. */ @@ -287,7 +297,7 @@ public DistributedMetaStorageImpl(GridKernalContext ctx) { finally { lock.writeLock().unlock(); - cancelUpdateFutures(); + cancelUpdateFutures(nodeStoppingException(), true); } } @@ -914,7 +924,7 @@ private String validatePayload(DistributedMetaStorageJoiningNodeData joiningData ver = INITIAL_VERSION; - cancelUpdateFutures(); + cancelUpdateFutures(new IgniteCheckedException("Client was disconnected during the operation."), false); } finally { lock.writeLock().unlock(); @@ -924,13 +934,28 @@ private String validatePayload(DistributedMetaStorageJoiningNodeData joiningData /** * Cancel all waiting futures and clear the map. */ - private void cancelUpdateFutures() { - for (GridFutureAdapter fut : updateFuts.values()) - fut.onDone(new IgniteCheckedException("Client was disconnected during the operation.")); + private void cancelUpdateFutures(Exception e, boolean stop) { + updateFutsStopLock.writeLock().lock(); + + try { + stopped = stop; + + for (GridFutureAdapter fut : updateFuts.values()) + fut.onDone(e); - updateFuts.clear(); + updateFuts.clear(); + } + finally { + updateFutsStopLock.writeLock().unlock(); + } + } + + /** */ + private static NodeStoppingException nodeStoppingException() { + return new NodeStoppingException("Node is stopping."); } + /** {@inheritDoc} */ @Override public IgniteInternalFuture onReconnected(boolean clusterRestarted) { assert isClient; @@ -1033,14 +1058,12 @@ else if (!isClient && ver.id() > 0) { * @throws IgniteCheckedException If there was an error while sending discovery message. */ private GridFutureAdapter startWrite(String key, byte[] valBytes) throws IgniteCheckedException { - if (!isSupported(ctx)) - throw new IgniteCheckedException(NOT_SUPPORTED_MSG); - UUID reqId = UUID.randomUUID(); - GridFutureAdapter fut = new GridFutureAdapter<>(); + GridFutureAdapter fut = prepareWriteFuture(key, reqId); - updateFuts.put(reqId, fut); + if (fut.isDone()) + return fut; DiscoveryCustomMessage msg = new DistributedMetaStorageUpdateMessage(reqId, key, valBytes); @@ -1054,14 +1077,12 @@ private GridFutureAdapter startWrite(String key, byte[] valBytes) throws Igni */ private GridFutureAdapter startCas(String key, byte[] expValBytes, byte[] newValBytes) throws IgniteCheckedException { - if (!isSupported(ctx)) - throw new IgniteCheckedException(NOT_SUPPORTED_MSG); - UUID reqId = UUID.randomUUID(); - GridFutureAdapter fut = new GridFutureAdapter<>(); + GridFutureAdapter fut = prepareWriteFuture(key, reqId); - updateFuts.put(reqId, fut); + if (fut.isDone()) + return fut; DiscoveryCustomMessage msg = new DistributedMetaStorageCasMessage(reqId, key, expValBytes, newValBytes); @@ -1070,6 +1091,58 @@ private GridFutureAdapter startCas(String key, byte[] expValBytes, byte return fut; } + /** + * This method will perform some preliminary checks before starting write or cas operation. + * It also updates {@link #updateFuts} in case if everything's ok. + * + * Tricky part is exception handling from "isSupported" method. It can be thrown by + * {@code ZookeeperDiscoveryImpl#checkState()} method, but we can't just leave it as is. + * There are components that rely on distributed metastorage throwing {@link NodeStoppingException}. + * + * @return Future that must be returned immediately or {@code null}. + * @throws IgniteCheckedException If cluster can't perform this update. + */ + private GridFutureAdapter prepareWriteFuture(String key, UUID reqId) throws IgniteCheckedException { + boolean supported; + + try { + supported = isSupported(ctx); + } + catch (Exception e) { + if (X.hasCause(e, IgniteSpiException.class) && e.getMessage() != null && e.getMessage().contains("Node stopped.")) { + GridFutureAdapter fut = new GridFutureAdapter<>(); + + fut.onDone(nodeStoppingException()); + + return fut; + } + + throw e; + } + + if (!supported) + throw new IgniteCheckedException(NOT_SUPPORTED_MSG); + + GridFutureAdapter fut = new GridFutureAdapter<>(); + + updateFutsStopLock.readLock().lock(); + + try { + if (stopped) { + fut.onDone(nodeStoppingException()); + + return fut; + } + + updateFuts.put(reqId, fut); + } + finally { + updateFutsStopLock.readLock().unlock(); + } + + return fut; + } + /** * Invoked when {@link DistributedMetaStorageUpdateMessage} received. Attempts to store received data (depends on * current {@link #bridge} value). Invokes failure handler with critical error if attempt failed for some reason. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java index fddf7ff91fab2..267b2a11ccab2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/metric/GridMetricManager.java @@ -439,8 +439,11 @@ else if (m instanceof HistogramMetric) opsFut.markInitialized(); opsFut.get(); } + catch (NodeStoppingException ignored) { + // No-op. + } catch (IgniteCheckedException e) { - throw new IgniteException(e); + log.error("Failed to remove metrics configuration.", e); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index e02b73d3e1103..fbe3218014e31 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -49,6 +49,7 @@ import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cache.QueryIndexType; import org.apache.ignite.cache.affinity.AffinityFunction; +import org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.eviction.EvictionPolicy; import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy; @@ -452,6 +453,18 @@ public static PlatformAffinityFunction readAffinityFunction(BinaryRawReaderEx in f.setPartitions(partitions); f.setExcludeNeighbors(exclNeighbours); baseFunc = f; + + int attrCnt = in.readInt(); + if (attrCnt > 0) { + String[] attrs = new String[attrCnt]; + + for (int i = 0; i < attrCnt; i++) { + attrs[i] = in.readString(); + } + + f.setAffinityBackupFilter(new ClusterNodeAttributeAffinityBackupFilter(attrs)); + } + break; } default: @@ -492,17 +505,23 @@ private static void writeAffinityFunction(BinaryRawWriter out, AffinityFunction out.writeBoolean(f0.isExcludeNeighbors()); out.writeByte((byte) 0); // override flags out.writeObject(null); // user func + + writeAffinityBackupFilter(out, f0.getAffinityBackupFilter()); } else if (f instanceof PlatformAffinityFunction) { PlatformAffinityFunction f0 = (PlatformAffinityFunction) f; AffinityFunction baseFunc = f0.getBaseFunc(); if (baseFunc instanceof RendezvousAffinityFunction) { + RendezvousAffinityFunction rendezvous = (RendezvousAffinityFunction) baseFunc; + out.writeByte((byte) 2); out.writeInt(f0.partitions()); - out.writeBoolean(((RendezvousAffinityFunction) baseFunc).isExcludeNeighbors()); + out.writeBoolean(rendezvous.isExcludeNeighbors()); out.writeByte(f0.getOverrideFlags()); out.writeObject(f0.getUserFunc()); + + writeAffinityBackupFilter(out, rendezvous.getAffinityBackupFilter()); } else { out.writeByte((byte) 3); @@ -516,6 +535,26 @@ else if (f instanceof PlatformAffinityFunction) { out.writeByte((byte)0); } + /** + * Writes affinity backup filter. + * + * @param out Stream. + * @param filter Filter. + */ + private static void writeAffinityBackupFilter(BinaryRawWriter out, Object filter) { + if (filter instanceof ClusterNodeAttributeAffinityBackupFilter) { + ClusterNodeAttributeAffinityBackupFilter backupFilter = (ClusterNodeAttributeAffinityBackupFilter) filter; + + String[] attrs = backupFilter.getAttributeNames(); + out.writeInt(attrs.length); + + for (String attr : attrs) + out.writeString(attr); + } + else + out.writeInt(-1); + } + /** * Writes the eviction policy. * @param out Stream. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java index 2dc9910ab5709..3cf5c38021780 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java @@ -10797,7 +10797,8 @@ else if (regCfg.getMaxSize() < 8 * GB) * @return User-set max WAL archive size of triple size of the maximum checkpoint buffer. */ public static long adjustedWalHistorySize(DataStorageConfiguration dsCfg, @Nullable IgniteLogger log) { - if (dsCfg.getMaxWalArchiveSize() != DataStorageConfiguration.DFLT_WAL_ARCHIVE_MAX_SIZE) + if (dsCfg.getMaxWalArchiveSize() != DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE && + dsCfg.getMaxWalArchiveSize() != DataStorageConfiguration.DFLT_WAL_ARCHIVE_MAX_SIZE) return dsCfg.getMaxWalArchiveSize(); // Find out the maximum checkpoint buffer size. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTask.java index 14cea626e7724..88fde8b5af6f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTask.java @@ -17,27 +17,17 @@ package org.apache.ignite.internal.visor.defragmentation; -import java.util.Collections; import java.util.List; -import java.util.Optional; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeJobResult; -import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.IgniteDefragmentation; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.processors.task.GridVisorManagementTask; import org.apache.ignite.internal.visor.VisorJob; import org.apache.ignite.internal.visor.VisorMultiNodeTask; -import org.apache.ignite.maintenance.MaintenanceAction; -import org.apache.ignite.maintenance.MaintenanceRegistry; -import org.apache.ignite.maintenance.MaintenanceTask; import org.jetbrains.annotations.Nullable; -import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.CachePartitionDefragmentationManager.DEFRAGMENTATION_MNTC_TASK_NAME; -import static org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters.toStore; - /** */ @GridInternal @GridVisorManagementTask @@ -120,91 +110,71 @@ protected VisorDefragmentationJob(@Nullable VisorDefragmentationTaskArg arg, boo /** */ private VisorDefragmentationTaskResult runSchedule(VisorDefragmentationTaskArg arg) { - MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); + final IgniteDefragmentation defragmentation = ignite.context().defragmentation(); - MaintenanceTask oldTask; + final IgniteDefragmentation.ScheduleResult scheduleResult; try { - List cacheNames = arg.cacheNames(); - - oldTask = mntcReg.registerMaintenanceTask(toStore(cacheNames == null ? Collections.emptyList() : cacheNames)); + scheduleResult = defragmentation.schedule(arg.cacheNames()); } catch (IgniteCheckedException e) { - return new VisorDefragmentationTaskResult(false, "Scheduling failed: " + e.getMessage()); + return new VisorDefragmentationTaskResult(false, e.getMessage()); } - return new VisorDefragmentationTaskResult( - true, - "Scheduling completed successfully." + - (oldTask == null ? "" : " Previously scheduled task has been removed.") - ); + String message; + + switch (scheduleResult) { + case SUCCESS_SUPERSEDED_PREVIOUS: + message = "Scheduling completed successfully. Previously scheduled task has been removed."; + break; + case SUCCESS: + default: + message = "Scheduling completed successfully."; + break; + } + + return new VisorDefragmentationTaskResult(true, message); } /** */ private VisorDefragmentationTaskResult runStatus(VisorDefragmentationTaskArg arg) { - MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); - - if (!mntcReg.isMaintenanceMode()) - return new VisorDefragmentationTaskResult(false, "Node is not in maintenance node."); - - IgniteCacheDatabaseSharedManager dbMgr = ignite.context().cache().context().database(); + final IgniteDefragmentation defragmentation = ignite.context().defragmentation(); - assert dbMgr instanceof GridCacheDatabaseSharedManager; - - CachePartitionDefragmentationManager defrgMgr = ((GridCacheDatabaseSharedManager)dbMgr) - .defragmentationManager(); - - if (defrgMgr == null) - return new VisorDefragmentationTaskResult(true, "There's no active defragmentation process on the node."); - - return new VisorDefragmentationTaskResult(true, defrgMgr.status()); + try { + return new VisorDefragmentationTaskResult(true, defragmentation.status().toString()); + } catch (IgniteCheckedException e) { + return new VisorDefragmentationTaskResult(false, e.getMessage()); + } } /** */ private VisorDefragmentationTaskResult runCancel(VisorDefragmentationTaskArg arg) { - assert arg.cacheNames() == null : "Cancelling specific caches is not yet implemented"; - - MaintenanceRegistry mntcReg = ignite.context().maintenanceRegistry(); - - if (!mntcReg.isMaintenanceMode()) { - boolean deleted = mntcReg.unregisterMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); + final IgniteDefragmentation defragmentation = ignite.context().defragmentation(); - String msg = deleted - ? "Scheduled defragmentation task cancelled successfully." - : "Scheduled defragmentation task is not found."; - - return new VisorDefragmentationTaskResult(true, msg); - } - else { - List> actions; - - try { - actions = mntcReg.actionsForMaintenanceTask(DEFRAGMENTATION_MNTC_TASK_NAME); - } - catch (IgniteException e) { - return new VisorDefragmentationTaskResult(true, "Defragmentation is already completed or has been cancelled previously."); + try { + final IgniteDefragmentation.CancelResult cancelResult = defragmentation.cancel(); + + String message; + + switch (cancelResult) { + case SCHEDULED_NOT_FOUND: + message = "Scheduled defragmentation task is not found."; + break; + case CANCELLED: + message = "Defragmentation cancelled successfully."; + break; + case COMPLETED_OR_CANCELLED: + message = "Defragmentation is already completed or has been cancelled previously."; + break; + case CANCELLED_SCHEDULED: + default: + message = "Scheduled defragmentation task cancelled successfully."; + break; } - Optional> stopAct = actions.stream().filter(a -> "stop".equals(a.name())).findAny(); - - assert stopAct.isPresent(); - - try { - Object res = stopAct.get().execute(); - - assert res instanceof Boolean; - - boolean cancelled = (Boolean)res; - - String msg = cancelled - ? "Defragmentation cancelled successfully." - : "Defragmentation is already completed or has been cancelled previously."; - - return new VisorDefragmentationTaskResult(true, msg); - } - catch (Exception e) { - return new VisorDefragmentationTaskResult(false, "Exception occurred: " + e.getMessage()); - } + return new VisorDefragmentationTaskResult(true, message); + } catch (IgniteCheckedException e) { + return new VisorDefragmentationTaskResult(false, e.getMessage()); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTaskArg.java index 1b1c8b12ba023..9e6ec53f5e48b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTaskArg.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/defragmentation/VisorDefragmentationTaskArg.java @@ -33,9 +33,6 @@ public class VisorDefragmentationTaskArg extends IgniteDataTransferObject { /** */ private VisorDefragmentationOperation operation; - /** */ - private List nodeIds; - /** */ private List cacheNames; @@ -47,12 +44,10 @@ public VisorDefragmentationTaskArg() { /** */ public VisorDefragmentationTaskArg( VisorDefragmentationOperation operation, - List nodeIds, List cacheNames ) { this.operation = operation; - this.nodeIds = nodeIds; this.cacheNames = cacheNames; } @@ -61,11 +56,6 @@ public VisorDefragmentationOperation operation() { return operation; } - /** */ - public List nodeIds() { - return nodeIds; - } - /** */ public List cacheNames() { return cacheNames; @@ -75,8 +65,6 @@ public List cacheNames() { @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeEnum(out, operation); - U.writeCollection(out, nodeIds); - U.writeCollection(out, cacheNames); } @@ -84,8 +72,6 @@ public List cacheNames() { @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { operation = U.readEnum(in, VisorDefragmentationOperation.class); - nodeIds = U.readList(in); - cacheNames = U.readList(in); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/misc/VisorWalTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/misc/VisorWalTask.java index 19b3e921fdf83..d535751c36f9d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/misc/VisorWalTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/misc/VisorWalTask.java @@ -237,7 +237,7 @@ Collection deleteUnusedWalSegments( dbMgr.onWalTruncated(lowBoundForTruncate); - int num = wal.truncate(null, lowBoundForTruncate); + int num = wal.truncate(lowBoundForTruncate); if (walFiles != null) { sortWalFiles(walFiles); diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DefragmentationMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DefragmentationMXBean.java new file mode 100644 index 0000000000000..22a5e2de9c39a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DefragmentationMXBean.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.mxbean; + +/** + * JMX bean for defragmentation manager. + */ +@MXBeanDescription("MBean that provides access for defragmentation features.") +public interface DefragmentationMXBean { + /** + * Schedule defragmentation for given caches. + * + * @param cacheNames Names of caches to run defragmentation on, comma separated. + * @return {@code true} if defragmentation is scheduled, {@code false} otherwise. + */ + @MXBeanDescription("Schedule defragmentation.") + public boolean schedule(@MXBeanParameter(name = "cacheNames", description = "Names of caches to run defragmentation on.") String cacheNames); + + /** + * Cancel defragmentation. + * + * @return {@code true} if defragmentation was canceled, {@code false} otherwise. + */ + @MXBeanDescription("Cancel current defragmentation.") + public boolean cancel(); + + /** + * Get defragmentation status. + * + * @return {@code true} if defragmentation is in progress right now. + */ + @MXBeanDescription("Cancel current defragmentation.") + public boolean inProgress(); + + /** + * Get count of processed partitions. + * + * @return {@code true} if defragmentation is in progress right now. + */ + @MXBeanDescription("Processed partitions.") + public int processedPartitions(); + + /** + * Get total count of partitions. + * + * @return {@code true} if defragmentation is in progress right now. + */ + @MXBeanDescription("Total partitions.") + public int totalPartitions(); + + /** + * Get defragmentation's start time. + * + * @return {@code true} if defragmentation is in progress right now. + */ + @MXBeanDescription("Start time.") + public long startTime(); +} diff --git a/modules/core/src/test/java/org/apache/ignite/TestStorageUtils.java b/modules/core/src/test/java/org/apache/ignite/TestStorageUtils.java new file mode 100644 index 0000000000000..17ff24197d0c8 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/TestStorageUtils.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite; + +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheEntry; +import org.apache.ignite.internal.pagemem.wal.record.DataEntry; +import org.apache.ignite.internal.processors.cache.CacheObjectImpl; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.GridCacheOperation; +import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Test methods for storage manipulation. + */ +public class TestStorageUtils { + /** + * Corrupts data entry. + * + * @param ctx Context. + * @param key Key. + * @param breakCntr Break counter. + * @param breakData Break data. + * @param ver GridCacheVersion to use. + * @param brokenValPostfix Postfix to add to value if breakData flag is set to true. + */ + public static void corruptDataEntry( + GridCacheContext ctx, + Object key, + boolean breakCntr, + boolean breakData, + GridCacheVersion ver, + String brokenValPostfix + ) { + int partId = ctx.affinity().partition(key); + + try { + long updateCntr = ctx.topology().localPartition(partId).updateCounter(); + + CacheEntry e = ctx.cache().keepBinary().getEntry(key); + + Object valToPut = e.getValue(); + + KeyCacheObject keyCacheObj = e.getKey() instanceof BinaryObject ? + (KeyCacheObject)e.getKey() : + new KeyCacheObjectImpl(e.getKey(), null, partId); + + if (breakCntr) + updateCntr++; + + if (breakData) + valToPut = e.getValue().toString() + brokenValPostfix; + + // Create data entry + + DataEntry dataEntry = new DataEntry( + ctx.cacheId(), + keyCacheObj, + new CacheObjectImpl(valToPut, null), + GridCacheOperation.UPDATE, + new GridCacheVersion(), + ver, + 0L, + partId, + updateCntr + ); + + IgniteCacheDatabaseSharedManager db = ctx.shared().database(); + + db.checkpointReadLock(); + + try { + U.invoke(GridCacheDatabaseSharedManager.class, db, "applyUpdate", ctx, dataEntry, + false); + } + finally { + db.checkpointReadUnlock(); + } + } + catch (IgniteCheckedException e) { + e.printStackTrace(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/cache/NoUnnecessaryRebalancesTest.java b/modules/core/src/test/java/org/apache/ignite/cache/NoUnnecessaryRebalancesTest.java new file mode 100644 index 0000000000000..a972c6d0c557e --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/cache/NoUnnecessaryRebalancesTest.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.TestRecordingCommunicationSpi; +import org.apache.ignite.internal.managers.communication.GridIoMessage; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.spi.IgniteSpiException; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.ignite.TestStorageUtils.corruptDataEntry; + +/** + * Tests check that unnecessary rebalances doesn't happen + */ +public class NoUnnecessaryRebalancesTest extends GridCommonAbstractTest { + /** */ + private static final String CACHE_NAME = "testCache"; + + /** */ + private static final int nodeCount = 3; + + /** + * @return Grid test configuration. + * @throws Exception If failed. + */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setCommunicationSpi(new SpecialSpi()); + + cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setPersistenceEnabled(true).setMaxSize(200 * 1024 * 1024) + )); + + return cfg; + } + + /** + * Test check that cache creation doesn't invoke rebalance on cache in other cache group + * @throws Exception If failed. + */ + @Test + public void testNoRebalancesOnCacheCreation() throws Exception { + startGrids(nodeCount); + + Ignite g0 = grid(0); + + g0.cluster().state(ClusterState.ACTIVE); + + g0.createCache(getCacheConfiguration(0)); + + awaitPartitionMapExchange(); + + IgniteCache cache0 = g0.cache(CACHE_NAME + 0); + + for (int i = 0; i < 100; i++) + cache0.put(i, i); + + awaitPartitionMapExchange(); + + GridCacheContext cacheCtx0 = grid(0).cachex(CACHE_NAME + 0).context(); + + corruptDataEntry(cacheCtx0, 1, true, false, new GridCacheVersion(0, 0, 0), "broken"); + + g0.createCache(getCacheConfiguration(1)); + + awaitPartitionMapExchange(true, true, null); + + Assert.assertFalse(SpecialSpi.rebGrpIds.contains(CU.cacheId(CACHE_NAME + 0))); + } + + /** */ + private CacheConfiguration getCacheConfiguration(int idx) { + return new CacheConfiguration<>(CACHE_NAME + idx) + .setBackups(2) + .setAffinity(new RendezvousAffinityFunction().setPartitions(8)); + } + + /** + * Wrapper of communication spi to detect on which cache groups rebalances were happened. + */ + public static class SpecialSpi extends TestRecordingCommunicationSpi { + /** Cache groups on which rebalances were happened */ + public static final Set rebGrpIds = new HashSet<>(); + + /** Lock object. */ + private static final Object mux = new Object(); + + /** */ + public static Set allRebalances() { + synchronized (mux) { + return Collections.unmodifiableSet(rebGrpIds); + } + } + + /** {@inheritDoc} */ + @Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure ackC) throws IgniteSpiException { + if (((GridIoMessage)msg).message() instanceof GridDhtPartitionSupplyMessage) { + GridDhtPartitionSupplyMessage supplyMsg = (GridDhtPartitionSupplyMessage) ((GridIoMessage)msg).message(); + + synchronized (mux) { + rebGrpIds.add(supplyMsg.groupId()); + } + } + + super.sendMessage(node, msg, ackC); + } + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java index 4ce9bb4e8d5cc..25037d31fb9ec 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDefragmentationTest.java @@ -80,7 +80,7 @@ public class IgnitePdsDefragmentationTest extends GridCommonAbstractTest { public static final int PARTS = 5; /** */ - public static final int ADDED_KEYS_COUNT = 150; + public static final int ADDED_KEYS_COUNT = 1500; /** */ protected static final String GRP_NAME = "group"; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java index 65dd7766cdd86..c18f06243f28d 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java @@ -19,6 +19,7 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterState; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; @@ -28,44 +29,19 @@ import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.WithSystemProperty; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.junit.Test; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE; +import static org.apache.ignite.testframework.GridTestUtils.waitForCondition; /** * Test correctness of truncating unused WAL segments. */ @WithSystemProperty(key = IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, value = "2") public class IgnitePdsReserveWalSegmentsTest extends GridCommonAbstractTest { - /** {@inheritDoc} */ - @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { - IgniteConfiguration cfg = super.getConfiguration(gridName); - - cfg.setConsistentId(gridName); - - CacheConfiguration ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME); - - ccfg.setAffinity(new RendezvousAffinityFunction(false, 32)); - - cfg.setCacheConfiguration(ccfg); - - DataStorageConfiguration dbCfg = new DataStorageConfiguration(); - - cfg.setDataStorageConfiguration(dbCfg); - - dbCfg.setWalSegmentSize(1024 * 1024) - .setMaxWalArchiveSize(Long.MAX_VALUE) - .setWalSegments(10) - .setWalMode(WALMode.LOG_ONLY) - .setDefaultDataRegionConfiguration(new DataRegionConfiguration() - .setMaxSize(100 * 1024 * 1024) - .setPersistenceEnabled(true)); - - return cfg; - } - /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { stopAllGrids(); @@ -80,6 +56,28 @@ public class IgnitePdsReserveWalSegmentsTest extends GridCommonAbstractTest { cleanPersistenceDir(); } + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName) + .setConsistentId(igniteInstanceName) + .setCacheConfiguration( + new CacheConfiguration<>(DEFAULT_CACHE_NAME) + .setAffinity(new RendezvousAffinityFunction(false, 32)) + ).setDataStorageConfiguration( + new DataStorageConfiguration() + .setCheckpointFrequency(Long.MAX_VALUE) + .setWalMode(WALMode.LOG_ONLY) + .setMaxWalArchiveSize(DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE) + .setWalSegmentSize(1024 * 1024) + .setWalSegments(10) + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true) + ) + ); + } + /** * Tests that range reserved method return correct number of reserved WAL segments. * @@ -87,18 +85,17 @@ public class IgnitePdsReserveWalSegmentsTest extends GridCommonAbstractTest { */ @Test public void testWalManagerRangeReservation() throws Exception { - IgniteEx ig0 = prepareGrid(4); + IgniteEx n = prepareGrid(2); - GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ig0.context().cache().context() - .database(); + IgniteWriteAheadLogManager wal = n.context().cache().context().wal(); - IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal(); + assertNotNull(wal); - long resIdx = getReservedWalSegmentIndex(dbMgr); + long resIdx = getReservedWalSegmentIndex(wal); assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0); - WALPointer lowPtr = dbMgr.checkpointHistory().firstCheckpointPointer(); + WALPointer lowPtr = lastCheckpointPointer(n); assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx); @@ -117,31 +114,72 @@ public void testWalManagerRangeReservation() throws Exception { */ @Test public void testWalDoesNotTruncatedWhenSegmentReserved() throws Exception { - IgniteEx ig0 = prepareGrid(4); + IgniteEx n = prepareGrid(2); - GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ig0.context().cache().context() - .database(); + IgniteWriteAheadLogManager wal = n.context().cache().context().wal(); - IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal(); + assertNotNull(wal); - long resIdx = getReservedWalSegmentIndex(dbMgr); + long resIdx = getReservedWalSegmentIndex(wal); assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0); - WALPointer lowPtr = dbMgr.checkpointHistory().firstCheckpointPointer(); + WALPointer lowPtr = lastCheckpointPointer(n); assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx); // Reserve previous WAL segment. wal.reserve(new WALPointer(resIdx - 1, 0, 0)); - int numDel = wal.truncate(null, lowPtr); + int numDel = wal.truncate(lowPtr); int expNumDel = (int)resIdx - 1; assertTrue("Expected del segments is " + expNumDel + ", real is " + numDel, expNumDel == numDel); } + /** + * Checking that there will be no truncation of segments required for binary recovery. + * + * @throws Exception If failed. + */ + @Test + public void testNotTruncateSegmentsForBinaryRecovery() throws Exception { + IgniteEx n = prepareGrid(1); + + IgniteWriteAheadLogManager wal = n.context().cache().context().wal(); + + assertNotNull(wal); + + long resIdx = getReservedWalSegmentIndex(wal); + assertTrue(resIdx > 3); + + WALPointer lastCheckpointPtr = lastCheckpointPointer(n); + assertEquals(lastCheckpointPtr.index(), resIdx); + + wal.notchLastCheckpointPtr(new WALPointer(1, 0, 0)); + + if (compactionEnabled(n)) + assertTrue(waitForCondition(() -> wal.lastCompactedSegment() >= 1, 10_000)); + + int truncated = wal.truncate(lastCheckpointPtr); + assertTrue("truncated: " + truncated, truncated >= 1); + + truncated = wal.truncate(lastCheckpointPtr); + assertEquals(0, truncated); + + wal.notchLastCheckpointPtr(new WALPointer(2, 0, 0)); + + if (compactionEnabled(n)) + assertTrue(waitForCondition(() -> wal.lastCompactedSegment() >= 2, 10_000)); + + truncated = wal.truncate(lastCheckpointPtr); + assertTrue("truncated: " + truncated, truncated >= 1); + + truncated = wal.truncate(lastCheckpointPtr); + assertEquals(0, truncated); + } + /** * Starts grid and populates test data. * @@ -152,7 +190,8 @@ public void testWalDoesNotTruncatedWhenSegmentReserved() throws Exception { private IgniteEx prepareGrid(int cnt) throws Exception { IgniteEx ig0 = startGrids(cnt); - ig0.cluster().active(true); + ig0.cluster().state(ClusterState.ACTIVE); + awaitPartitionMapExchange(); IgniteCache cache = ig0.cache(DEFAULT_CACHE_NAME); @@ -167,11 +206,32 @@ private IgniteEx prepareGrid(int cnt) throws Exception { } /** - * Get index of reserved WAL segment by checkpointer. + * Get index of reserved WAL segment by checkpoint. * * @param dbMgr Database shared manager. */ - private long getReservedWalSegmentIndex(GridCacheDatabaseSharedManager dbMgr) { - return dbMgr.checkpointHistory().firstCheckpointPointer().index(); + private long getReservedWalSegmentIndex(IgniteWriteAheadLogManager dbMgr) { + return ((WALPointer)GridTestUtils.getFieldValueHierarchy(dbMgr, "lastCheckpointPtr")).index(); + } + + /** + * Getting WAL pointer last checkpoint. + * + * @param n Node. + * @return WAL pointer last checkpoint. + */ + private WALPointer lastCheckpointPointer(IgniteEx n) { + return ((GridCacheDatabaseSharedManager)n.context().cache().context().database()) + .checkpointHistory().lastCheckpoint().checkpointMark(); + } + + /** + * Checking that wal compaction enabled. + * + * @param n Node. + * @return {@code True} if enabled. + */ + private boolean compactionEnabled(IgniteEx n) { + return n.configuration().getDataStorageConfiguration().isWalCompactionEnabled(); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsStartWIthEmptyArchive.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsStartWIthEmptyArchive.java index 78e6caed1f37c..e55e036304ee5 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsStartWIthEmptyArchive.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsStartWIthEmptyArchive.java @@ -68,7 +68,7 @@ public class IgnitePdsStartWIthEmptyArchive extends GridCommonAbstractTest { cfg.setDataStorageConfiguration( new DataStorageConfiguration() // Checkpoint should not remove any WAL archive files. - .setMaxWalArchiveSize(Long.MAX_VALUE) + .setMaxWalArchiveSize(DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE) .setDefaultDataRegionConfiguration( new DataRegionConfiguration() .setPersistenceEnabled(true) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java index f9b029e3a685c..8d65954cbeaa8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java @@ -19,7 +19,6 @@ import java.io.File; import java.nio.channels.Channel; -import java.nio.file.Paths; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -75,30 +74,30 @@ public class IgniteWalIteratorSwitchSegmentTest extends GridCommonAbstractTest { /** Segment file size. */ private static final int SEGMENT_SIZE = 1024 * 1024; + /** Node dir. */ + private static final String NODE_DIR = "NODE"; + /** WAL segment file sub directory. */ - private static final String WORK_SUB_DIR = "/NODE/wal"; + private static final String WORK_SUB_DIR = String.join(File.separator, "", NODE_DIR, "wal"); /** WAL archive segment file sub directory. */ - private static final String ARCHIVE_SUB_DIR = "/NODE/walArchive"; + private static final String ARCHIVE_SUB_DIR = String.join(File.separator, "", NODE_DIR, "walArchive"); /** Serializer versions for check. */ - private int[] checkSerializerVers = new int[] { - 1, - 2 - }; + private final int[] checkSerializerVers = new int[] {1, 2}; /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); - U.delete(Paths.get(U.defaultWorkDirectory())); + deleteNodeDir(); } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { super.afterTest(); - U.delete(Paths.get(U.defaultWorkDirectory())); + deleteNodeDir(); } /** @@ -113,6 +112,40 @@ public void testCheckSerializer() throws Exception { } } + /** + * Test for check invariant, size of SWITCH_SEGMENT_RECORD should be 1 byte. + * + * @throws Exception If some thing failed. + */ + @Test + public void testInvariantSwitchSegment() throws Exception { + for (int serVer : checkSerializerVers) { + try { + checkInvariantSwitchSegment(serVer); + } + finally { + deleteNodeDir(); + } + } + } + + /** + * Test for check switch segment from work dir to archive dir during iteration. + * + * @throws Exception If some thing failed. + */ + @Test + public void testSwitchReadingSegmentFromWorkToArchive() throws Exception { + for (int serVer : checkSerializerVers) { + try { + checkSwitchReadingSegmentDuringIteration(serVer); + } + finally { + deleteNodeDir(); + } + } + } + /** * @param serVer WAL serializer version. * @throws Exception If some thing failed. @@ -161,40 +194,6 @@ private void checkInvariantSwitchSegmentSize(int serVer) throws Exception { Assert.assertEquals(1, recordSize); } - /** - * Test for check invariant, size of SWITCH_SEGMENT_RECORD should be 1 byte. - * - * @throws Exception If some thing failed. - */ - @Test - public void testInvariantSwitchSegment() throws Exception { - for (int serVer : checkSerializerVers) { - try { - checkInvariantSwitchSegment(serVer); - } - finally { - U.delete(Paths.get(U.defaultWorkDirectory())); - } - } - } - - /** - * Test for check switch segment from work dir to archive dir during iteration. - * - * @throws Exception If some thing failed. - */ - @Test - public void testSwitchReadingSegmentFromWorkToArchive() throws Exception { - for (int serVer : checkSerializerVers) { - try { - checkSwitchReadingSegmentDuringIteration(serVer); - } - finally { - U.delete(Paths.get(U.defaultWorkDirectory())); - } - } - } - /** * @param serVer WAL serializer version. * @throws Exception If some thing failed. @@ -270,8 +269,10 @@ private void checkInvariantSwitchSegment(int serVer) throws Exception { walMgr.flush(null, true); + SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware"); + // Await archiver move segment to WAL archive. - Thread.sleep(5000); + waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5_000); // If switchSegmentRecordSize more that 1, it mean that invariant is broke. // Filling tail some garbage. Simulate tail garbage on rotate segment in WAL work directory. @@ -300,7 +301,7 @@ private void checkInvariantSwitchSegment(int serVer) throws Exception { seg0.close(); } - int expectedRecords = recordsToWrite; + int expRecords = recordsToWrite; int actualRecords = 0; // Check that switch segment works as expected and all record is reachable. @@ -315,7 +316,7 @@ private void checkInvariantSwitchSegment(int serVer) throws Exception { } } - Assert.assertEquals("Not all records read during iteration.", expectedRecords, actualRecords); + Assert.assertEquals("Not all records read during iteration.", expRecords, actualRecords); } /** @@ -340,75 +341,72 @@ private void checkSwitchReadingSegmentDuringIteration(int serVer) throws Excepti SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware"); - //guard from archivation before iterator would be created. - segmentAware.checkCanReadArchiveOrReserveWorkSegment(0); + // Guard from archiving before iterator would be created. + assertTrue(segmentAware.lock(0)); for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value())); walMgr.flush(null, true); - int expectedRecords = recordsToWrite; AtomicInteger actualRecords = new AtomicInteger(0); AtomicReference startedSegmentPath = new AtomicReference<>(); AtomicReference finishedSegmentPath = new AtomicReference<>(); - CountDownLatch startedIteratorLatch = new CountDownLatch(1); + CountDownLatch startedIterLatch = new CountDownLatch(1); CountDownLatch finishedArchivedLatch = new CountDownLatch(1); - IgniteInternalFuture future = GridTestUtils.runAsync( - () -> { - // Check that switch segment works as expected and all record is reachable. - try (WALIterator it = walMgr.replay(null)) { - Object handle = getFieldValueHierarchy(it, "currWalSegment"); - FileInput in = getFieldValueHierarchy(handle, "in"); - Object delegate = getFieldValueHierarchy(in.io(), "delegate"); - Channel ch = getFieldValueHierarchy(delegate, "ch"); - String path = getFieldValueHierarchy(ch, "path"); + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { + // Check that switch segment works as expected and all record is reachable. + try (WALIterator it = walMgr.replay(null)) { + Object handle = getFieldValueHierarchy(it, "currWalSegment"); + FileInput in = getFieldValueHierarchy(handle, "in"); + Object delegate = getFieldValueHierarchy(in.io(), "delegate"); + Channel ch = getFieldValueHierarchy(delegate, "ch"); + String path = getFieldValueHierarchy(ch, "path"); - startedSegmentPath.set(path); + startedSegmentPath.set(path); - startedIteratorLatch.countDown(); + startedIterLatch.countDown(); - while (it.hasNext()) { - IgniteBiTuple tup = it.next(); + while (it.hasNext()) { + IgniteBiTuple tup = it.next(); - WALRecord rec0 = tup.get2(); + WALRecord rec0 = tup.get2(); - if (rec0.type() == METASTORE_DATA_RECORD) - actualRecords.incrementAndGet(); + if (rec0.type() == METASTORE_DATA_RECORD) + actualRecords.incrementAndGet(); - finishedArchivedLatch.await(); - } - - in = getFieldValueHierarchy(handle, "in"); - delegate = getFieldValueHierarchy(in.io(), "delegate"); - ch = getFieldValueHierarchy(delegate, "ch"); - path = getFieldValueHierarchy(ch, "path"); - - finishedSegmentPath.set(path); + finishedArchivedLatch.await(); } - return null; + in = getFieldValueHierarchy(handle, "in"); + delegate = getFieldValueHierarchy(in.io(), "delegate"); + ch = getFieldValueHierarchy(delegate, "ch"); + path = getFieldValueHierarchy(ch, "path"); + + finishedSegmentPath.set(path); } - ); - startedIteratorLatch.await(); + return null; + }); + + startedIterLatch.await(); - segmentAware.releaseWorkSegment(0); + segmentAware.unlock(0); waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5000); finishedArchivedLatch.countDown(); - future.get(); + fut.get(); //should started iteration from work directory but finish from archive directory. - assertEquals(workDir + WORK_SUB_DIR + "/0000000000000000.wal", startedSegmentPath.get()); - assertEquals(workDir + ARCHIVE_SUB_DIR + "/0000000000000000.wal", finishedSegmentPath.get()); + assertEquals(workDir + WORK_SUB_DIR + File.separator + "0000000000000000.wal", startedSegmentPath.get()); + assertEquals(workDir + ARCHIVE_SUB_DIR + File.separator + "0000000000000000.wal", finishedSegmentPath.get()); - Assert.assertEquals("Not all records read during iteration.", expectedRecords, actualRecords.get()); + Assert.assertEquals("Not all records read during iteration.", recordsToWrite, actualRecords.get()); } /*** @@ -493,4 +491,11 @@ private T2 initiate( return new T2<>(walMgr, recordSerializer); } + + /** + * Delete node dir. + */ + private void deleteNodeDir() throws Exception { + U.delete(U.resolveWorkDirectory(U.defaultWorkDirectory(), NODE_DIR, false)); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java index 044c1732682b6..9f3eaaf7acb25 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.processors.cache.persistence.db.wal; -import java.io.File; import java.util.function.Consumer; import java.util.stream.Stream; import org.apache.ignite.Ignite; @@ -195,42 +194,6 @@ public void testCheckpointStarted_WhenWalHasTooBigSizeWithoutCheckpoint() throws assertEquals("too big size of WAL without checkpoint", checkpointReason); } - /** - * Test for check deprecated removing checkpoint by deprecated walHistorySize parameter - * - * @deprecated Test old removing process depends on WalHistorySize. - */ - @Test - public void testCheckpointHistoryRemovingByWalHistorySize() throws Exception { - //given: configured grid with wal history size = 10 - int walHistorySize = 10; - - Ignite ignite = startGrid(dbCfg -> { - dbCfg.setWalHistorySize(walHistorySize); - }); - - GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite); - - IgniteCache cache = ignite.getOrCreateCache(cacheConfiguration()); - - //when: put to cache and do checkpoint - int testNumberOfCheckpoint = walHistorySize * 2; - - for (int i = 0; i < testNumberOfCheckpoint; i++) { - cache.put(i, i); - //and: wait for checkpoint finished - forceCheckpoint(); - } - - //then: number of checkpoints less or equal than walHistorySize - CheckpointHistory hist = dbMgr.checkpointHistory(); - assertTrue(hist.checkpoints().size() == walHistorySize); - - File[] cpFiles = dbMgr.checkpointDirectory().listFiles(); - - assertTrue(cpFiles.length <= (walHistorySize * 2 + 1));// starts & ends + node_start - } - /** * Correct delete checkpoint history from memory depends on IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE. WAL files * doesn't delete because deleting was disabled. @@ -240,7 +203,7 @@ public void testCheckpointHistoryRemovingByWalHistorySize() throws Exception { public void testCorrectDeletedCheckpointHistoryButKeepWalFiles() throws Exception { //given: configured grid with disabled WAL removing. Ignite ignite = startGrid(dbCfg -> { - dbCfg.setMaxWalArchiveSize(Long.MAX_VALUE); + dbCfg.setMaxWalArchiveSize(DataStorageConfiguration.UNLIMITED_WAL_ARCHIVE); }); GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanTest.java new file mode 100644 index 0000000000000..f1e5c77cb64f0 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/defragmentation/DefragmentationMXBeanTest.java @@ -0,0 +1,322 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.defragmentation; + +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.function.UnaryOperator; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterState; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.defragmentation.maintenance.DefragmentationParameters; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.maintenance.MaintenanceTask; +import org.apache.ignite.mxbean.DefragmentationMXBean; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +import static org.apache.ignite.cluster.ClusterState.ACTIVE; + +/** + * Tests for defragmentation JMX bean. + */ +public class DefragmentationMXBeanTest extends GridCommonAbstractTest { + /** */ + private static CountDownLatch blockCdl; + + /** */ + private static CountDownLatch waitCdl; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + stopAllGrids(); + + cleanPersistenceDir(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + final DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + + dsCfg.setWalSegmentSize(512 * 1024).setWalSegments(3); + dsCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(50L * 1024 * 1024).setPersistenceEnabled(true) + ); + + return cfg.setDataStorageConfiguration(dsCfg); + } + + /** + * Test that defragmentation won't be scheduled second time, if previously scheduled via maintenance registry. + * Description: + * 1. Start two nodes. + * 2. Register defragmentation maintenance task on the first node. + * 3. Restart node. + * 3. Scheduling of the defragmentation on the first node via JMX bean should fail. + * @throws Exception If failed. + */ + @Test + public void testDefragmentationSchedule() throws Exception { + Ignite ignite = startGrids(2); + + ignite.cluster().state(ACTIVE); + + DefragmentationMXBean mxBean = defragmentationMXBean(ignite.name()); + + assertTrue(mxBean.schedule("")); + + MaintenanceTask mntcTask = DefragmentationParameters.toStore(Collections.emptyList()); + + assertNotNull(grid(0).context().maintenanceRegistry().registerMaintenanceTask(mntcTask)); + assertNull(grid(1).context().maintenanceRegistry().registerMaintenanceTask(mntcTask)); + + stopGrid(0); + startGrid(0); + + // node is already in defragmentation mode, hence scheduling is not possible + assertFalse(mxBean.schedule("")); + } + + /** + * Test that defragmentation can be successfuly cancelled via JMX bean. + * @throws Exception If failed. + */ + @Test + public void testDefragmentationCancel() throws Exception { + Ignite ignite = startGrids(2); + + ignite.cluster().state(ACTIVE); + + DefragmentationMXBean mxBean = defragmentationMXBean(ignite.name()); + + mxBean.schedule(""); + + assertTrue(mxBean.cancel()); + + // subsequent cancel call should be successful + assertTrue(mxBean.cancel()); + } + + /** + * Test that ongong defragmentation can be stopped via JMX bean. + * Description: + * 1. Start one node. + * 2. Put a load of a data on it. + * 3. Schedule defragmentation. + * 4. Make IO factory slow down after 128 partitions are processed, so we have time to stop the defragmentation. + * 5. Stop the defragmentation. + * @throws Exception If failed. + */ + @Test + public void testDefragmentationCancelInProgress() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + IgniteCache cache = ig.getOrCreateCache(DEFAULT_CACHE_NAME); + + for (int i = 0; i < 1024; i++) + cache.put(i, i); + + forceCheckpoint(ig); + + DefragmentationMXBean mxBean = defragmentationMXBean(ig.name()); + + mxBean.schedule(""); + + stopGrid(0); + + blockCdl = new CountDownLatch(128); + + UnaryOperator cfgOp = cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + + FileIOFactory delegate = dsCfg.getFileIOFactory(); + + dsCfg.setFileIOFactory((file, modes) -> { + if (file.getName().contains("dfrg")) { + if (blockCdl.getCount() == 0) { + try { + // Slow down defragmentation process. + // This'll be enough for the test since we have, like, 900 partitions left. + Thread.sleep(100); + } + catch (InterruptedException ignore) { + // No-op. + } + } + else + blockCdl.countDown(); + } + + return delegate.create(file, modes); + }); + + return cfg; + }; + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { + try { + startGrid(0, cfgOp); + } + catch (Exception e) { + // No-op. + throw new RuntimeException(e); + } + }); + + blockCdl.await(); + + mxBean = defragmentationMXBean(ig.name()); + + assertTrue(mxBean.cancel()); + + fut.get(); + + assertTrue(mxBean.cancel()); + } + + /** + * Test that JMX bean provides correct defragmentation status. + * Description: + * 1. Start one node, + * 2. Put a load of data on it. + * 3. Schedule defragmentation. + * 4. Completely stop defragmentation when 128 partitions processed. + * 5. Check defragmentation status. + * 6. Continue defragmentation and wait for it to end. + * 7. Check defragmentation finished. + * @throws Exception If failed. + */ + @Test + public void testDefragmentationStatus() throws Exception { + IgniteEx ig = startGrid(0); + + ig.cluster().state(ClusterState.ACTIVE); + + ig.getOrCreateCache(DEFAULT_CACHE_NAME + "1"); + + IgniteCache cache = ig.getOrCreateCache(DEFAULT_CACHE_NAME + "2"); + + ig.getOrCreateCache(DEFAULT_CACHE_NAME + "3"); + + for (int i = 0; i < 1024; i++) + cache.put(i, i); + + forceCheckpoint(ig); + + DefragmentationMXBean mxBean = defragmentationMXBean(ig.name()); + + mxBean.schedule(""); + + stopGrid(0); + + blockCdl = new CountDownLatch(128); + waitCdl = new CountDownLatch(1); + + UnaryOperator cfgOp = cfg -> { + DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); + + FileIOFactory delegate = dsCfg.getFileIOFactory(); + + dsCfg.setFileIOFactory((file, modes) -> { + if (file.getName().contains("dfrg")) { + if (blockCdl.getCount() == 0) { + try { + waitCdl.await(); + } + catch (InterruptedException ignore) { + // No-op. + } + } + else + blockCdl.countDown(); + } + + return delegate.create(file, modes); + }); + + return cfg; + }; + + IgniteInternalFuture fut = GridTestUtils.runAsync(() -> { + try { + startGrid(0, cfgOp); + } + catch (Exception e) { + // No-op. + throw new RuntimeException(e); + } + }); + + blockCdl.await(); + + mxBean = defragmentationMXBean(ig.name()); + + final IgniteKernal gridx = IgnitionEx.gridx(ig.name()); + final IgniteDefragmentation defragmentation = gridx.context().defragmentation(); + final IgniteDefragmentation.DefragmentationStatus status1 = defragmentation.status(); + + assertEquals(status1.getStartTs(), mxBean.startTime()); + + assertTrue(mxBean.inProgress()); + assertEquals(126, mxBean.processedPartitions()); + final int totalPartitions = status1.getTotalPartitions(); + assertEquals(totalPartitions, mxBean.totalPartitions()); + + waitCdl.countDown(); + + fut.get(); + + ((GridCacheDatabaseSharedManager) grid(0).context().cache().context().database()) + .defragmentationManager() + .completionFuture() + .get(); + + assertFalse(mxBean.inProgress()); + assertEquals(totalPartitions, mxBean.processedPartitions()); + } + + /** + * Get defragmentation JMX bean. + * @param name Ignite instance name. + * @return Defragmentation JMX bean. + */ + private DefragmentationMXBean defragmentationMXBean(String name) { + return getMxBean( + name, + "Defragmentation", + DefragmentationMXBeanImpl.class, + DefragmentationMXBean.class + ); + } + +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java index 2e78ad03c4b4a..10634cc6b85dc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java @@ -95,7 +95,7 @@ public class NoOpWALManager implements IgniteWriteAheadLogManager { } /** {@inheritDoc} */ - @Override public int truncate(WALPointer low, WALPointer high) { + @Override public int truncate(@Nullable WALPointer high) { return 0; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAwareTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAwareTest.java index 0bd9fcb853e40..60663ef6e1f42 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAwareTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/aware/SegmentAwareTest.java @@ -65,9 +65,8 @@ public void testAvoidDeadlockArchiverAndLockStorage() throws IgniteCheckedExcept int i = iterationCnt; while (i-- > 0) { - aware.lockWorkSegment(segmentToHandle); - - aware.releaseWorkSegment(segmentToHandle); + if (aware.lock(segmentToHandle)) + aware.unlock(segmentToHandle); } }); @@ -388,12 +387,12 @@ public void testMarkAsMovedToArchive_WhenReleaseLockedSegment() throws IgniteChe //given: thread which awaited segment. SegmentAware aware = new SegmentAware(10, false); - aware.checkCanReadArchiveOrReserveWorkSegment(5); + assertTrue(aware.lock(5)); IgniteInternalFuture future = awaitThread(() -> aware.markAsMovedToArchive(5)); //when: release exact expected work segment. - aware.releaseWorkSegment(5); + aware.unlock(5); //then: waiting should finish immediately. future.get(20); @@ -406,7 +405,8 @@ public void testMarkAsMovedToArchive_WhenReleaseLockedSegment() throws IgniteChe public void testMarkAsMovedToArchive_WhenInterruptWasCall() throws IgniteCheckedException, InterruptedException { //given: thread which awaited segment. SegmentAware aware = new SegmentAware(10, false); - aware.checkCanReadArchiveOrReserveWorkSegment(5); + + assertTrue(aware.lock(5)); IgniteInternalFuture future = awaitThread(() -> aware.markAsMovedToArchive(5)); @@ -521,6 +521,10 @@ public void testReserveCorrectly() { //given: thread which awaited segment. SegmentAware aware = new SegmentAware(10, false); + // Set limits. + aware.curAbsWalIdx(10); + aware.minReserveIndex(0); + //when: reserve one segment twice and one segment once. aware.reserve(5); aware.reserve(5); @@ -586,8 +590,8 @@ public void testReserveWorkSegmentCorrectly() { SegmentAware aware = new SegmentAware(10, false); //when: lock one segment twice. - aware.checkCanReadArchiveOrReserveWorkSegment(5); - aware.checkCanReadArchiveOrReserveWorkSegment(5); + assertTrue(aware.lock(5)); + assertTrue(aware.lock(5)); //then: exact one segment should locked. assertTrue(aware.locked(5)); @@ -595,7 +599,7 @@ public void testReserveWorkSegmentCorrectly() { assertFalse(aware.locked(4)); //when: release segment once. - aware.releaseWorkSegment(5); + aware.unlock(5); //then: nothing to change, segment still locked. assertTrue(aware.locked(5)); @@ -603,7 +607,7 @@ public void testReserveWorkSegmentCorrectly() { assertFalse(aware.locked(4)); //when: release segment. - aware.releaseWorkSegment(5); + aware.unlock(5); //then: all segments should be unlocked. assertFalse(aware.locked(5)); @@ -619,10 +623,9 @@ public void testAssertFail_WhenReleaseUnreservedWorkSegment() { //given: thread which awaited segment. SegmentAware aware = new SegmentAware(10, false); - aware.checkCanReadArchiveOrReserveWorkSegment(5); + assertTrue(aware.lock(5)); try { - - aware.releaseWorkSegment(7); + aware.unlock(7); } catch (AssertionError e) { return; @@ -631,6 +634,50 @@ public void testAssertFail_WhenReleaseUnreservedWorkSegment() { fail("Should fail with AssertError because this segment have not reserved"); } + /** + * Check that the reservation border is working correctly. + */ + @Test + public void testReservationBorder() { + SegmentAware aware = new SegmentAware(10, false); + + assertTrue(aware.reserve(0)); + assertTrue(aware.reserve(1)); + + assertFalse(aware.minReserveIndex(0)); + assertFalse(aware.minReserveIndex(1)); + + aware.release(0); + + assertTrue(aware.minReserveIndex(0)); + assertFalse(aware.minReserveIndex(1)); + + assertFalse(aware.reserve(0)); + assertTrue(aware.reserve(1)); + } + + /** + * Check that the lock border is working correctly. + */ + @Test + public void testLockBorder() { + SegmentAware aware = new SegmentAware(10, false); + + assertTrue(aware.lock(0)); + assertTrue(aware.lock(1)); + + assertFalse(aware.minLockIndex(0)); + assertFalse(aware.minLockIndex(1)); + + aware.unlock(0); + + assertTrue(aware.minLockIndex(0)); + assertFalse(aware.minLockIndex(1)); + + assertFalse(aware.lock(0)); + assertTrue(aware.lock(1)); + } + /** * Assert that future is still not finished. * diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java index 26ff5a77963bd..630d643c00aec 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java @@ -20,6 +20,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.OpenOption; +import java.nio.file.StandardOpenOption; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; @@ -261,6 +264,8 @@ private String getArchiveWalDirPath(Ignite ignite) throws IgniteCheckedException private static class CountedFileIOFactory extends RandomAccessFileIOFactory { /** {@inheritDoc} */ @Override public FileIO create(File file, OpenOption... modes) throws IOException { + assertEquals(Collections.singletonList(StandardOpenOption.READ), Arrays.asList(modes)); + return new CountedFileIO(file, modes); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/metastorage/DistributedMetaStorageTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/metastorage/DistributedMetaStorageTest.java index 763a806c3a8e2..298e20213b9c2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/metastorage/DistributedMetaStorageTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/metastorage/DistributedMetaStorageTest.java @@ -22,6 +22,7 @@ import java.util.Comparator; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.configuration.DataRegionConfiguration; @@ -31,7 +32,9 @@ import org.apache.ignite.failure.StopNodeFailureHandler; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.processors.metastorage.persistence.DistributedMetaStorageImpl; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.DiscoverySpi; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -133,6 +136,19 @@ public void testSingleNode() throws Exception { metastorage.remove("key"); assertNull(metastorage.read("key")); + + stopGrid(0); + + try { + metastorage.writeAsync("key", "value").get(10, TimeUnit.SECONDS); + + fail("Exception is expected"); + } + catch (Exception e) { + assertTrue(X.hasCause(e, NodeStoppingException.class)); + + assertTrue(e.getMessage().contains("Node is stopping.")); + } } /** diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java index d63439587b536..392c2b9d65234 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.ignite.cache.BreakRebalanceChainTest; import org.apache.ignite.cache.CircledRebalanceTest; +import org.apache.ignite.cache.NoUnnecessaryRebalancesTest; import org.apache.ignite.cache.NotOptimizedRebalanceTest; import org.apache.ignite.cache.RebalanceAfterResettingLostPartitionTest; import org.apache.ignite.cache.RebalanceCancellationTest; @@ -115,6 +116,7 @@ public static List> suite(Collection ignoredTests) { GridTestUtils.addTestIfNeeded(suite, OffHeapLockStackTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, HistoricalReservationTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, CircledRebalanceTest.class, ignoredTests); + GridTestUtils.addTestIfNeeded(suite, NoUnnecessaryRebalancesTest.class, ignoredTests); GridTestUtils.addTestIfNeeded(suite, IgnitePdsCacheEntriesExpirationTest.class, ignoredTests); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java index 19d15c4c803ed..050d3819589c2 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/defragmentation/IndexingDefragmentation.java @@ -40,7 +40,6 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIoResolver; -import org.apache.ignite.internal.processors.cache.persistence.tree.util.InsertLast; import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.h2.database.H2Tree; @@ -158,6 +157,8 @@ public void defragment( for (int i = 0; i < segments; i++) { H2Tree tree = oldH2Idx.treeForRead(i); + newIdx.treeForRead(i).enableSequentialWriteMode(); + treeIterator.iterate(tree, oldCachePageMem, (theTree, io, pageAddr, idx) -> { cancellationChecker.run(); @@ -396,7 +397,7 @@ public BPlusLeafIoDelegate(IO io) { /** * H2CacheRow with stored index values */ - private static class H2CacheRowWithIndex extends H2CacheRow implements InsertLast { + private static class H2CacheRowWithIndex extends H2CacheRow { /** List of index values. */ private final List values; @@ -406,6 +407,7 @@ public H2CacheRowWithIndex(GridH2RowDescriptor desc, CacheDataRow row, List + + + + + 4.0.0 + + + 0.24 + + + + ignite-parent + org.apache.ignite + 1 + ../../../parent + + + ignite-ml-catboost-model-parser + 2.10.0-SNAPSHOT + http://ignite.apache.org + + + + + maven-dependency-plugin + + + copy-libs + package + + copy-dependencies + + + false + + fastutil,h2,ignite-shmem,annotations,arpack_combined_all, + cache-api,commons-math3,commons-rng-client-api,commons-rng-core, + commons-rng-simple,core,ignite-ml + + + + + + + + + + + org.apache.ignite + ignite-ml + ${project.version} + + + + ai.catboost + catboost-prediction + ${catboost.version} + + + + org.apache.ignite + ignite-tools + ${project.version} + test + + + + diff --git a/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModel.java b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModel.java new file mode 100644 index 0000000000000..674cb1fcbb1fe --- /dev/null +++ b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModel.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost; + +import ai.catboost.CatBoostError; +import ai.catboost.CatBoostModel; +import org.apache.ignite.ml.inference.Model; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CatboostClassificationModel imported and wrapped to be compatible with Apache Ignite infrastructure. + */ +public class CatboostClassificationModel implements Model { + /** Logger. */ + private static final Logger logger = LoggerFactory.getLogger(CatboostClassificationModel.class); + + /** Catboost model. */ + private final CatBoostModel model; + + /** + * Constructs a new instance of Catboost model wrapper. + * + * @param model Catboost Model + */ + public CatboostClassificationModel(CatBoostModel model) { + this.model = model; + } + + /** {@inheritDoc} */ + @Override public Double predict(NamedVector input) { + float[] floatInput = new float[input.size()]; + int index = 0; + for (String key: model.getFeatureNames()) { + floatInput[index] = (float) input.get(key); + index++; + } + + try { + double predict = model.predict(floatInput, model.getFeatureNames()) + .get(0, 0); + // use formula based on https://github.com/catboost/benchmarks/blob/61d62512f751325a14dd885bb71f8c2dabf7e24b/quality_benchmarks/catboost_experiment.py#L77 + return Math.pow(1 + Math.exp(-predict), -1); + } catch (CatBoostError e) { + throw new RuntimeException(e.getMessage()); + } + } + + /** {@inheritDoc} */ + @Override public void close() { + try { + model.close(); + } catch (CatBoostError e) { + logger.error(e.getMessage()); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModelParser.java b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModelParser.java new file mode 100644 index 0000000000000..f929f6f549fb4 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostClassificationModelParser.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import ai.catboost.CatBoostError; +import ai.catboost.CatBoostModel; +import org.apache.ignite.ml.inference.parser.ModelParser; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; + +/** + * Catboost Classification model parser. + */ +public class CatboostClassificationModelParser implements + ModelParser { + /** */ + private static final long serialVersionUID = -8425510352746936163L; + + /** {@inheritDoc} */ + @Override public CatboostClassificationModel parse(byte[] mdl) { + try (InputStream inputStream = new ByteArrayInputStream(mdl)) { + return new CatboostClassificationModel(CatBoostModel.loadModel(inputStream)); + } catch (IOException | CatBoostError e) { + throw new RuntimeException("Failed to parse model " + e.getMessage(), e); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModel.java b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModel.java new file mode 100644 index 0000000000000..bc9d7b6d3904a --- /dev/null +++ b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModel.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost; + +import ai.catboost.CatBoostError; +import ai.catboost.CatBoostModel; +import org.apache.ignite.ml.inference.Model; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CatboostRegressionModel imported and wrapped to be compatible with Apache Ignite infrastructure. + */ +public class CatboostRegressionModel implements Model { + /** Logger. */ + private static final Logger logger = LoggerFactory.getLogger(CatboostRegressionModel.class); + + /** Catboost model. */ + private final CatBoostModel model; + + /** + * Constructs a new instance of Catboost model wrapper. + * + * @param model Catboost Model + */ + public CatboostRegressionModel(CatBoostModel model) { + this.model = model; + } + + /** {@inheritDoc} */ + @Override public Double predict(NamedVector input) { + float[] floatInput = new float[input.size()]; + int index = 0; + for (String key: model.getFeatureNames()) { + floatInput[index] = (float) input.get(key); + index++; + } + + try { + return model.predict(floatInput, model.getFeatureNames()) + .get(0, 0); + } catch (CatBoostError e) { + throw new RuntimeException(e.getMessage()); + } + } + + /** {@inheritDoc} */ + @Override public void close() { + try { + model.close(); + } catch (CatBoostError e) { + logger.error(e.getMessage()); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModelParser.java b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModelParser.java new file mode 100644 index 0000000000000..afa9ef1bcd4fb --- /dev/null +++ b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/CatboostRegressionModelParser.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import ai.catboost.CatBoostError; +import ai.catboost.CatBoostModel; +import org.apache.ignite.ml.inference.parser.ModelParser; +import org.apache.ignite.ml.math.primitives.vector.NamedVector; + +/** + * Catboost Regression model parser. + */ +public class CatboostRegressionModelParser implements + ModelParser { + /** */ + private static final long serialVersionUID = -8425510352746936163L; + + /** {@inheritDoc} */ + @Override public CatboostRegressionModel parse(byte[] mdl) { + try (InputStream inputStream = new ByteArrayInputStream(mdl)) { + return new CatboostRegressionModel(CatBoostModel.loadModel(inputStream)); + } catch (IOException | CatBoostError e) { + throw new RuntimeException("Failed to parse model " + e.getMessage(), e); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/package-info.java b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/package-info.java new file mode 100644 index 0000000000000..bd7ddc775c9fb --- /dev/null +++ b/modules/ml/catboost-model-parser/src/main/java/org/apache/ignite/ml/catboost/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * + * Base package for XGBoost model parser, correspondent DTOs and util classes. + */ + +package org.apache.ignite.ml.catboost; diff --git a/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/IgniteMLCatboostTestSuite.java b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/IgniteMLCatboostTestSuite.java new file mode 100644 index 0000000000000..5d752952c88e0 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/IgniteMLCatboostTestSuite.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost; + +import org.apache.ignite.ml.catboost.parser.CatboostClassificationModelParserTest; +import org.apache.ignite.ml.catboost.parser.CatboostRegressionModelParserTest; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +/** Test suite for all module tests. */ +@RunWith(Suite.class) +@Suite.SuiteClasses({ + CatboostClassificationModelParserTest.class, + CatboostRegressionModelParserTest.class +}) +public class IgniteMLCatboostTestSuite { + // No-op. +} diff --git a/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostClassificationModelParserTest.java b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostClassificationModelParserTest.java new file mode 100644 index 0000000000000..93bfdd2b490bc --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostClassificationModelParserTest.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost.parser; + +import java.net.URL; +import java.util.HashMap; + +import org.apache.ignite.ml.catboost.CatboostClassificationModel; +import org.apache.ignite.ml.catboost.CatboostClassificationModelParser; +import org.apache.ignite.ml.inference.builder.SingleModelBuilder; +import org.apache.ignite.ml.inference.builder.SyncModelBuilder; +import org.apache.ignite.ml.inference.reader.FileSystemModelReader; +import org.apache.ignite.ml.inference.reader.ModelReader; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Tests for {@link CatboostClassificationModelParser}. + */ +public class CatboostClassificationModelParserTest { + /** Test model resource name. */ + private static final String TEST_MODEL_RESOURCE = "models/model_clf.cbm"; + + /** Parser. */ + private final CatboostClassificationModelParser parser = new CatboostClassificationModelParser(); + + /** Model builder. */ + private final SyncModelBuilder mdlBuilder = new SingleModelBuilder(); + + /** End-to-end test for {@code parse()} and {@code predict()} methods. */ + @Test + public void testParseAndPredict() { + URL url = CatboostClassificationModelParserTest.class.getClassLoader().getResource(TEST_MODEL_RESOURCE); + if (url == null) + throw new IllegalStateException("File not found [resource_name=" + TEST_MODEL_RESOURCE + "]"); + + ModelReader reader = new FileSystemModelReader(url.getPath()); + + try ( + CatboostClassificationModel mdl = mdlBuilder.build(reader, parser)) { + HashMap input = new HashMap<>(); + input.put("ACTION", 1.0); + input.put("RESOURCE", 39353.0); + input.put("MGR_ID", 85475.0); + input.put("ROLE_ROLLUP_1", 117961.0); + input.put("ROLE_ROLLUP_2", 118300.0); + input.put("ROLE_DEPTNAME", 123472.0); + input.put("ROLE_TITLE", 117905.0); + input.put("ROLE_FAMILY_DESC", 117906.0); + input.put("ROLE_FAMILY", 290919.0); + input.put("ROLE_CODE", 117908.0); + double prediction = mdl.predict(VectorUtils.of(input)); + + assertEquals(0.9928904609329371, prediction, 1e-5); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostRegressionModelParserTest.java b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostRegressionModelParserTest.java new file mode 100644 index 0000000000000..a4482ecef8072 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/java/org/apache/ignite/ml/catboost/parser/CatboostRegressionModelParserTest.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.catboost.parser; + +import java.net.URL; +import java.util.HashMap; + +import org.apache.ignite.ml.catboost.CatboostRegressionModel; +import org.apache.ignite.ml.catboost.CatboostRegressionModelParser; +import org.apache.ignite.ml.inference.builder.SingleModelBuilder; +import org.apache.ignite.ml.inference.builder.SyncModelBuilder; +import org.apache.ignite.ml.inference.reader.FileSystemModelReader; +import org.apache.ignite.ml.inference.reader.ModelReader; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +/** + * Tests for {@link CatboostRegressionModelParser}. + */ +public class CatboostRegressionModelParserTest { + /** Test model resource name. */ + private static final String TEST_MODEL_RESOURCE = "models/model_reg.cbm"; + + /** Parser. */ + private final CatboostRegressionModelParser parser = new CatboostRegressionModelParser(); + + /** Model builder. */ + private final SyncModelBuilder mdlBuilder = new SingleModelBuilder(); + + /** End-to-end test for {@code parse()} and {@code predict()} methods. */ + @Test + public void testParseAndPredict() { + URL url = CatboostRegressionModelParserTest.class.getClassLoader().getResource(TEST_MODEL_RESOURCE); + if (url == null) + throw new IllegalStateException("File not found [resource_name=" + TEST_MODEL_RESOURCE + "]"); + + ModelReader reader = new FileSystemModelReader(url.getPath()); + + try (CatboostRegressionModel mdl = mdlBuilder.build(reader, parser)) { + HashMap input = new HashMap<>(); + input.put("f_0", 0.02731d); + input.put("f_1", 0.0d); + input.put("f_2", 7.07d); + input.put("f_3", 0d); + input.put("f_4", 0.469d); + input.put("f_5", 6.421d); + input.put("f_6", 78.9d); + input.put("f_7", 4.9671d); + input.put("f_8", 2d); + input.put("f_9", 242.0d); + input.put("f_10", 17.8d); + input.put("f_11", 396.9d); + input.put("f_12", 9.14d); + double prediction = mdl.predict(VectorUtils.of(input)); + + assertEquals(21.164552741740483, prediction, 1e-5); + } + } +} diff --git a/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample-expected-results.csv b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample-expected-results.csv new file mode 100644 index 0000000000000..b5c34f5fcaa01 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample-expected-results.csv @@ -0,0 +1,4 @@ +0.9928904609329371 +0.9963369818846654 +0.9775200762137463 +0.9491935983699706 diff --git a/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample.csv b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample.csv new file mode 100644 index 0000000000000..2ef456830b0b6 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge-sample.csv @@ -0,0 +1,5 @@ +ACTION,RESOURCE,MGR_ID,ROLE_ROLLUP_1,ROLE_ROLLUP_2,ROLE_DEPTNAME,ROLE_TITLE,ROLE_FAMILY_DESC,ROLE_FAMILY,ROLE_CODE +1,39353,85475,117961,118300,123472,117905,117906,290919,117908 +1,17183,1540,117961,118343,123125,118536,118536,308574,118539 +1,36724,14457,118219,118220,117884,117879,267952,19721,117880 +1,36135,5396,117961,118343,119993,118321,240983,290919,118322 diff --git a/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge.csv b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge.csv new file mode 100644 index 0000000000000..b252c449e0358 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/datasets/amazon-employee-access-challenge.csv @@ -0,0 +1,1000 @@ +ACTION,RESOURCE,MGR_ID,ROLE_ROLLUP_1,ROLE_ROLLUP_2,ROLE_DEPTNAME,ROLE_TITLE,ROLE_FAMILY_DESC,ROLE_FAMILY,ROLE_CODE +1,39353,85475,117961,118300,123472,117905,117906,290919,117908 +1,17183,1540,117961,118343,123125,118536,118536,308574,118539 +1,36724,14457,118219,118220,117884,117879,267952,19721,117880 +1,36135,5396,117961,118343,119993,118321,240983,290919,118322 +1,42680,5905,117929,117930,119569,119323,123932,19793,119325 +0,45333,14561,117951,117952,118008,118568,118568,19721,118570 +1,25993,17227,117961,118343,123476,118980,301534,118295,118982 +1,19666,4209,117961,117969,118910,126820,269034,118638,126822 +1,31246,783,117961,118413,120584,128230,302830,4673,128231 +1,78766,56683,118079,118080,117878,117879,304519,19721,117880 +1,4675,3005,117961,118413,118481,118784,117906,290919,118786 +1,15030,94005,117902,118041,119238,119093,138522,119095,119096 +1,79954,46608,118315,118463,122636,120773,123148,118960,120774 +1,4675,50997,91261,118026,118202,119962,168365,118205,119964 +1,95836,18181,117961,118343,118514,118321,117906,290919,118322 +1,19484,6657,118219,118220,118221,117885,117886,117887,117888 +1,114267,23136,117961,118052,119742,118321,117906,290919,118322 +1,35197,57715,117961,118446,118701,118702,118703,118704,118705 +1,86316,7002,117961,118343,123125,118278,132715,290919,118279 +1,27785,5636,117961,118413,122007,118321,117906,290919,118322 +1,37427,5220,117961,118300,118458,120006,303717,118424,120008 +1,15672,111936,117961,118300,118783,117905,240983,290919,117908 +1,92885,744,117961,118300,119181,118777,279443,308574,118779 +1,1020,85475,117961,118300,120410,118321,117906,290919,118322 +1,4675,7551,117961,118052,118867,118259,117906,290919,118261 +1,41334,28253,118315,118463,123089,118259,128796,290919,118261 +1,77385,14829,117961,118052,119986,117905,117906,290919,117908 +1,20273,11506,118216,118587,118846,179731,128361,117887,117973 +1,78098,46556,118090,118091,117884,118568,165015,19721,118570 +1,79328,4219,117961,118300,120312,120313,144958,118424,120315 +1,23921,4953,117961,118343,119598,120344,310997,118424,120346 +1,34687,815,117961,118300,123719,117905,117906,290919,117908 +1,43452,169112,117902,118041,119781,118563,121024,270488,118565 +1,33248,4929,117961,118300,118825,118826,226343,118424,118828 +1,78282,7445,117961,118343,122299,118054,121350,117887,118055 +1,17183,794,118752,119070,117945,280788,152940,292795,119082 +1,38658,1912,119134,119135,118042,120097,174445,270488,120099 +1,14354,50368,117926,118266,117884,118568,281735,19721,118570 +1,45019,1080,117961,118327,118378,120952,120953,118453,120954 +1,13878,1541,117961,118225,123173,120812,123174,118638,120814 +1,14570,46805,117929,117930,117920,118568,281735,19721,118570 +0,74310,49521,117961,118300,118301,119849,235245,118638,119851 +1,6977,1398,117961,118300,120722,118784,130735,290919,118786 +1,31613,5899,117961,118327,120318,118777,296252,308574,118779 +1,1020,21127,117961,118052,119408,118777,279443,308574,118779 +1,32270,3887,117961,118343,120347,120348,265969,118295,120350 +1,19629,19645,117961,118413,118481,118784,240983,290919,118786 +1,15702,1938,117961,118300,118066,120560,304465,118643,120562 +1,113037,5396,117961,118343,119993,120773,118959,118960,120774 +1,20279,17695,117890,117891,117878,117879,117879,19721,117880 +1,80746,16690,117961,118446,119064,122022,131302,119221,122024 +1,80263,36145,117961,118052,120304,307024,311622,118331,118332 +1,73753,70062,117961,118386,118746,117905,117906,290919,117908 +1,39883,7551,117961,118052,118867,117905,172635,290919,117908 +1,25993,7023,117961,117962,119223,118259,118260,290919,118261 +0,78106,50613,117916,118150,118810,118568,159905,19721,118570 +1,33150,1915,117961,118300,119181,118784,117906,290919,118786 +1,34817,5899,117961,118327,120318,118641,240982,118643,118644 +1,28354,3860,117961,118446,120317,118321,117906,290919,118322 +1,33642,13196,117951,117952,117941,117879,117897,19721,117880 +1,26430,56310,118212,118580,117895,117896,117913,117887,117898 +1,28149,50120,91261,118026,119507,118321,117906,290919,118322 +1,40867,6736,117961,117969,6725,122290,268766,6725,122292 +1,20293,273476,117926,118266,117920,118568,310732,19721,118570 +1,36020,2163,118219,118220,120694,118777,130218,308574,118779 +1,60006,16821,117961,118225,120535,118396,269406,118398,118399 +0,35043,14800,117961,117962,118352,118784,117906,290919,118786 +1,17308,4088,117961,118300,118458,118728,223125,118295,118730 +0,15716,18073,118256,118257,118623,118995,286106,292795,118997 +1,39883,55956,118555,118178,119262,117946,119727,292795,117948 +1,42031,88387,118315,118463,118522,119172,121927,118467,119174 +1,27124,2318,117961,118327,118933,117905,117906,290919,117908 +1,35498,18454,117961,118343,119598,125171,257115,118424,125173 +1,79168,58465,118602,118603,117941,117885,119621,117887,117888 +1,2252,782,117961,118413,127522,118784,240983,290919,118786 +1,45652,7338,117961,118225,119924,118321,118448,290919,118322 +1,23921,4145,117961,118300,120026,307024,303717,118331,118332 +1,95247,50690,118269,118270,117878,118568,118568,19721,118570 +1,78844,15645,117961,118052,122392,128903,160695,292795,128905 +1,19481,10627,118106,118107,119565,179731,155780,117887,117973 +1,18380,44022,117961,117962,122215,127782,130085,290919,127783 +1,37734,58406,117975,117976,117884,117885,117913,117887,117888 +1,3853,17550,117961,118446,118684,118321,117906,290919,118322 +1,278393,7076,117961,118225,120323,119093,136840,119095,119096 +1,35625,6454,117961,118343,118856,117905,240983,290919,117908 +1,35066,17465,91261,118026,118202,118278,118260,290919,118279 +1,3853,5043,117961,118300,118458,120006,310997,118424,120008 +1,41569,16671,117961,118052,118706,118523,310608,118331,118525 +1,25862,46224,117961,118327,118378,120952,143223,118453,120954 +1,75078,45963,117961,118386,118896,122645,309858,119221,122647 +1,1020,1483,117961,117962,118840,118641,306399,118643,118644 +0,22956,3967,117961,118052,118706,118321,117906,290919,118322 +1,20364,2612,117961,118386,123901,117905,117906,290919,117908 +1,28943,7547,117961,118052,118933,118784,213944,290919,118786 +1,75329,17414,118752,119070,118042,118043,151099,270488,118046 +1,41569,70066,91261,118026,118202,117905,117906,290919,117908 +1,4684,50806,117961,118446,119961,118259,118260,290919,118261 +1,77943,4478,117961,118386,118692,118321,117906,290919,118322 +1,38860,15541,118573,118574,118556,280788,127423,292795,119082 +1,74142,2371,117961,117962,118501,120702,308462,118504,120704 +1,58928,17091,117961,118225,118403,118054,287351,117887,118055 +1,79309,17128,117961,118300,119984,118396,300044,118398,118399 +0,36480,15510,118169,118170,118171,130479,185784,119784,130481 +1,16591,44053,117961,117962,117904,119949,123170,290919,119951 +1,77429,14810,117961,118413,120370,118321,117906,290919,118322 +1,80550,5659,117961,118300,124725,118321,117906,290919,118322 +1,25814,1065,117961,118327,121589,118641,306399,118643,118644 +1,15805,4438,117961,118386,121668,118777,279443,308574,118779 +1,4675,59029,118555,118178,118320,118321,117906,290919,118322 +1,56665,4636,117902,117903,119181,120952,130134,118453,120954 +1,73756,3371,117902,118041,118042,118043,118044,270488,118046 +1,90518,7252,117961,117962,118352,117905,117906,290919,117908 +0,80501,19717,117961,117962,118352,118321,117906,290919,118322 +1,81480,96704,117910,117911,117884,117879,117886,19721,117880 +1,3069,770,117961,118300,119181,118451,130134,118453,118454 +1,79021,5568,121785,121786,118492,132692,174445,270488,132694 +1,33642,50323,117980,117981,117941,118568,281735,19721,118570 +1,23553,20899,117961,118340,117970,179731,179731,117887,117973 +1,42085,5715,91261,118026,119362,119363,307210,118667,119365 +1,38655,17614,117902,118041,118042,118043,118044,270488,118046 +1,25330,19702,118269,118270,117878,117879,117879,19721,117880 +1,1098,6993,117961,118343,118514,117905,240983,290919,117908 +0,23096,29762,117961,118052,120398,122067,130238,118424,122069 +1,34433,50976,117961,118413,119968,117905,117906,290919,117908 +1,78607,7560,117961,118343,119181,118321,117906,290919,118322 +1,79121,225257,120342,120343,119076,118422,124426,118424,118425 +1,42347,98689,117902,117903,132530,117905,117906,290919,117908 +1,34575,4013,117961,118300,118514,118321,240983,290919,118322 +1,19730,4311,119062,119091,118514,118777,279443,308574,118779 +1,103462,15413,117961,118343,120270,311867,130382,118478,118479 +1,39262,51748,118315,118316,225010,118685,279443,308574,118687 +1,16333,111936,117961,118300,118783,117905,240983,290919,117908 +1,15007,770,117961,118300,120722,118451,130134,118453,118454 +1,75787,15387,117887,118178,118575,119849,212678,118638,119851 +1,45790,1215,117961,118327,120171,118863,126488,118398,118865 +1,20349,46254,117961,118327,126310,118641,306399,118643,118644 +1,73752,70062,117961,118386,118746,117905,117906,290919,117908 +1,6964,5432,117961,118327,118391,118641,123881,118643,118644 +1,25231,1216,117902,117903,118507,119529,205305,119006,119531 +1,34633,4264,118887,118888,118979,118980,199144,118295,118982 +1,28472,28163,118315,118316,120361,118321,175154,290919,118322 +1,24015,14731,117961,118446,118447,119346,302830,4673,119348 +1,75813,75640,118315,118463,119214,118321,117906,290919,118322 +1,74161,2290,117902,117903,118403,128422,122352,118453,128424 +1,30628,7987,117961,118327,120383,118321,149826,290919,118322 +1,4675,17394,117961,118413,122007,118321,117906,290919,118322 +1,45444,124899,117961,118327,120171,118890,182227,118398,118892 +1,3853,5030,117961,118343,120126,120497,223125,118424,120499 +1,32997,2286,117902,117903,277693,117905,117906,290919,117908 +1,31613,7629,117961,118327,120383,118784,213944,290919,118786 +1,42031,28245,118315,118463,123089,117905,130219,290919,117908 +1,6302,50581,118114,118115,117884,117885,117913,117887,117888 +1,79850,4004,117961,118343,118856,117905,117906,290919,117908 +1,79647,5360,117961,118446,122550,118321,117906,290919,118322 +1,7678,15417,117961,119256,119257,126684,130913,292795,126685 +1,4675,49577,117961,118327,118933,118784,200462,290919,118786 +0,77977,3811,120864,121013,118395,118396,269406,118398,118399 +1,44812,5432,117961,118327,118391,118784,117906,290919,118786 +1,80472,120822,119596,119597,120823,120647,311441,118398,120649 +0,14570,23304,117961,118327,124921,137969,271378,118612,137970 +1,43519,6890,117961,118446,120317,117905,117906,290919,117908 +1,75834,8343,119301,119302,119303,117905,120828,290919,117908 +1,98054,36,117961,118413,119968,118321,117906,290919,118322 +1,80222,44038,117902,117903,118910,119346,302830,4673,119348 +1,31299,3118,117961,118300,118458,120789,309123,118424,120791 +1,78327,79402,117983,117984,117878,118568,292195,19721,118570 +1,19722,4511,117961,118300,119181,118321,117906,290919,118322 +1,1937,6216,117961,118327,118328,130857,161026,308574,130858 +1,75901,4945,117961,118300,118360,119849,148676,118638,119851 +1,25993,139593,117902,118041,119781,130479,179826,119784,130481 +0,20312,18044,117961,118386,118522,118321,117906,290919,118322 +1,4675,71189,117961,118386,128823,118259,125206,290919,118261 +1,40919,38364,118315,118316,118464,130857,156150,308574,130858 +1,74242,7398,117961,118225,118616,117905,240983,290919,117908 +1,80215,217,117961,118413,120370,118321,240983,290919,118322 +1,29058,3651,119062,119091,123125,124152,300136,118424,124154 +1,79092,25557,117961,118300,121951,128422,130134,118453,128424 +1,15913,2061,117961,118327,120383,118641,120319,118643,118644 +1,25098,2003,118441,118442,118378,118054,121603,117887,118055 +1,25312,69981,118595,118596,118706,118172,130384,249618,118175 +1,1151,50117,117961,118327,118507,118863,122008,118398,118865 +1,79092,70264,117961,118343,118437,124435,118362,118363,124436 +1,44644,50997,117961,118052,120539,118321,118448,290919,118322 +1,86292,8715,117961,118052,119986,118321,117906,290919,118322 +1,4675,8563,117961,118300,118783,117905,117906,290919,117908 +1,34580,1398,117961,118300,120722,118784,130735,290919,118786 +0,18913,273476,117926,118266,117920,118568,310732,19721,118570 +1,16321,7014,117961,118446,119961,118278,118260,290919,118279 +1,80780,4272,117961,118343,120666,123045,123107,120518,123047 +0,44976,7691,118555,118178,117945,117946,119114,292795,117948 +1,41235,59000,118181,118182,117941,118568,292195,19721,118570 +1,7543,6257,117961,118343,120722,118784,117906,290919,118786 +1,3853,20560,117876,117877,117878,117879,117879,19721,117880 +1,34396,34375,117961,117969,189629,179731,123099,117887,117973 +1,41643,13854,117961,118052,118821,118278,117906,290919,118279 +1,80057,7084,117961,118386,121961,118318,130278,118205,118319 +1,38964,4341,117961,118343,118957,118321,117906,290919,118322 +1,16619,14731,117961,118446,118447,121594,170665,4673,121596 +1,4675,4362,118212,118213,126955,118685,120316,308574,118687 +1,79228,13400,117983,117984,117878,117879,117879,19721,117880 +1,36480,15541,118573,118574,118556,280788,127423,292795,119082 +1,20279,50625,117916,118150,117884,117885,117913,117887,117888 +1,42873,4590,117961,118343,118395,123067,269406,118398,123068 +1,37942,11439,117935,117936,117878,117885,117913,117887,117888 +1,36674,14633,118106,118107,117884,117879,117886,19721,117880 +1,25831,55161,118752,119070,118754,119885,118806,118474,119887 +1,35196,16643,119134,119135,118701,119587,284481,118704,119589 +1,39518,7822,117961,118327,118492,118777,174445,308574,118779 +1,41649,4084,117961,118052,118992,118321,117906,290919,118322 +0,7543,4554,117961,118300,118514,118259,118260,290919,118261 +1,75078,75114,118315,118463,118522,307024,294485,118331,118332 +1,16153,79105,119062,119063,124211,120516,123107,120518,120519 +1,33155,1915,117961,118300,119181,118784,117906,290919,118786 +1,7543,23144,117961,118225,118403,120690,120690,290919,120692 +1,42942,5235,120864,121013,121961,118523,246167,118331,118525 +1,13878,54261,117890,117891,117878,118568,123862,19721,118570 +1,20292,50448,117926,118124,117884,117879,117886,19721,117880 +1,42031,2071,117961,118386,118501,118502,130777,118504,118505 +1,6914,5518,117961,118300,118514,118259,132715,290919,118261 +1,75834,21033,117961,118300,123749,118777,279443,308574,118779 +1,39233,5659,117961,118300,124725,117905,117906,290919,117908 +1,40867,87910,91261,118026,119507,118278,118260,290919,118279 +1,25213,1311,117961,117962,118910,118451,123503,118453,118454 +1,55554,1448,117902,117903,120722,117905,117906,290919,117908 +1,75078,3230,117961,118386,121961,120348,249620,118295,120350 +1,31299,1605,117961,118327,128935,149337,205238,118398,149339 +1,79948,4932,117961,118225,119824,123737,147851,118960,123738 +1,78845,81794,117961,118052,119972,122551,159682,118762,122552 +1,38470,72734,118079,118080,117878,117885,118177,117887,117888 +1,45976,16527,91261,118026,118202,118321,117906,290919,118322 +1,74818,4274,117961,118327,118744,120591,183734,119095,120593 +1,70452,13401,117980,117981,118246,179731,141712,117887,117973 +1,40438,17640,117961,118052,118706,118318,120689,118205,118319 +1,20279,72322,117916,118011,117884,117885,117913,117887,117888 +0,967,2855,118169,118170,119262,117946,125711,292795,117948 +1,36772,51491,118074,23779,117884,117879,117886,19721,117880 +1,43431,120000,117961,118225,119136,120056,255657,118474,120058 +1,4675,69637,119062,119091,118535,117905,117906,290919,117908 +1,25993,4933,117961,118300,118458,307024,301218,118331,118332 +1,25231,1038,118887,118888,118889,118863,307233,118398,118865 +1,6977,5697,117961,118386,118692,118784,117906,290919,118786 +1,99461,1088,117961,118327,119830,117905,117906,290919,117908 +1,42881,1988,117961,118446,16232,117905,117906,290919,117908 +1,80973,15746,117961,118052,119742,118321,118448,290919,118322 +1,17825,13836,119920,119921,142145,128230,302830,4673,128231 +1,43876,58954,118079,118080,117878,117885,118177,117887,117888 +1,74161,7586,117961,118225,118403,118321,117906,290919,118322 +1,24890,3692,117961,118413,126229,121594,207709,4673,121596 +1,6977,70056,117961,118225,118403,118777,279443,308574,118779 +0,107225,15389,117961,118225,122870,119928,129017,118331,119929 +1,44909,4659,117961,118225,120663,118784,147114,290919,118786 +1,74283,25618,117961,118327,118320,118523,310608,118331,118525 +1,34075,221,117961,118413,120370,118321,118448,290919,118322 +1,75078,46608,118315,118463,122636,120773,123148,118960,120774 +1,42093,2594,117961,118300,123472,118259,118260,290919,118261 +1,3057,1398,117961,118300,120722,118321,117906,290919,118322 +1,27416,1549,118084,118085,117920,118129,132803,118131,118132 +1,74347,33266,117961,118300,123719,117905,240983,290919,117908 +0,21751,56199,117961,118052,118881,117905,117906,290919,117908 +1,73562,13845,117961,117962,120526,118784,310608,290919,118786 +1,29304,98229,117975,117976,117884,117879,117886,19721,117880 +1,39353,85475,117961,118300,120410,118784,117906,290919,118786 +1,25744,6225,117961,118343,120722,118784,117906,290919,118786 +1,78816,6216,117961,118052,118328,124886,306399,118643,124888 +1,38714,3692,117961,118413,120370,121594,170665,4673,121596 +1,38840,7915,117961,118300,118597,118834,127160,118424,118836 +1,39395,49423,117961,118052,123144,118396,269406,118398,118399 +1,1003,7220,117961,118225,120551,117905,186454,290919,117908 +1,34886,4639,117961,118343,118856,118278,117906,290919,118279 +1,4675,13871,117961,118300,118514,118321,240983,290919,118322 +1,42093,70128,118595,118596,81476,118834,153821,118424,118836 +1,17308,4933,117961,118343,118458,120006,311622,118424,120008 +1,16301,217,117961,118413,120370,118321,117906,290919,118322 +1,42976,4713,117961,118052,120398,120344,311360,118424,120346 +1,75272,4918,117961,118300,123055,120344,311360,118424,120346 +0,27416,50601,117916,117917,117941,117879,117886,19721,117880 +1,35376,28163,118315,118316,120361,118321,175154,290919,118322 +1,98033,4549,117961,118225,120050,117905,117906,290919,117908 +1,18072,57715,117961,118446,118701,118702,129679,118704,118705 +1,44592,7081,117961,118225,119924,118685,120316,308574,118687 +1,22481,30533,91261,118026,119507,123648,123107,120518,123650 +1,80473,6454,117961,118343,118856,120773,165589,118960,120774 +1,79121,55119,119062,119091,123125,134095,300136,118474,118475 +1,41594,91342,117961,118327,118933,209874,311622,308574,209875 +1,75338,8280,118212,118580,117895,117896,117897,117887,117898 +1,15922,123020,117961,118300,118514,118321,117906,290919,118322 +1,16333,7021,117961,118300,118783,118784,287626,290919,118786 +1,42972,49323,118990,118991,118992,118321,117906,290919,118322 +1,80202,3000,117961,118225,120551,118321,118448,290919,118322 +0,917,5313,117961,118386,120356,120357,201020,118424,120359 +1,45481,17270,117961,118300,128830,118826,150790,118424,118828 +1,6094,15015,117961,118413,122007,118321,117906,290919,118322 +1,45035,61948,118163,118164,117878,117879,208308,19721,117880 +1,302049,2685,122880,122974,117945,117946,149467,292795,117948 +1,79299,17059,117902,118041,118556,118274,119248,292795,118276 +1,28294,4100,118290,118291,124668,119928,278266,118331,119929 +1,32270,5178,117961,118343,118660,118826,158101,118424,118828 +1,4684,60121,121785,121786,119922,118784,203564,290919,118786 +1,79092,7212,117961,118300,124725,118784,117906,290919,118786 +1,17308,5039,117961,118343,123125,119849,282370,118638,119851 +1,98464,53208,118602,118603,118810,310825,149047,117887,121395 +1,32270,1334,117961,117962,118910,118054,118368,117887,118055 +1,40189,5210,117961,118343,119598,119433,133686,118424,119435 +1,37793,1923,117902,117903,118783,117905,117906,290919,117908 +1,16997,15945,118290,118291,121617,124799,152247,121620,124801 +1,80674,49453,117961,118343,118660,118912,309291,118424,118914 +1,81478,5504,117961,118300,118514,118685,132642,308574,118687 +1,77178,3225,117961,118327,118929,118321,117906,290919,118322 +1,32270,18236,117961,118052,120304,122297,301475,118331,118899 +1,17795,54684,117961,117962,118910,118777,279443,308574,118779 +1,14570,57816,118219,118220,118810,118811,128802,19793,118813 +1,15064,2363,117961,117962,141383,118368,118368,117887,118486 +1,42919,6222,117961,118052,118746,117905,117906,290919,117908 +1,17308,5043,117961,118300,118458,118523,129545,118331,118525 +1,30999,2942,117961,118343,118700,118784,117906,290919,118786 +1,3853,4716,118290,118291,120026,120344,165069,118424,120346 +1,75834,49384,117961,118343,118609,118728,301534,118295,118730 +1,34536,55941,117961,118327,122109,117905,117906,290919,117908 +0,4675,58719,117961,118386,119954,118318,168365,118205,118319 +0,79092,108732,119062,119091,118684,122989,131688,119006,122991 +1,915,770,117961,118300,120722,118451,142600,118453,118454 +1,27082,141,117961,118225,122273,117905,240983,290919,117908 +1,23921,17261,117961,118343,118833,118054,124356,117887,118055 +1,23194,1398,117961,118300,120722,118321,117906,290919,118322 +1,70086,76523,117983,117984,117878,118568,292195,19721,118570 +1,79092,6454,117961,118343,118856,118321,117906,290919,118322 +1,78833,1334,117961,117962,118910,119949,230830,290919,119951 +1,15361,27,117961,118413,120370,118321,117906,290919,118322 +1,25993,4918,117961,118300,123055,120344,311360,118424,120346 +0,971,4219,117961,118300,120312,120313,157234,118424,120315 +1,73112,27104,117961,118327,118507,118054,120716,117887,118055 +1,3853,114489,117961,118413,119968,118784,117906,290919,118786 +1,34498,4125,118887,118888,121108,120006,311746,118424,120008 +1,18394,14855,117961,118300,123719,118451,153802,118453,118454 +1,20349,25813,117961,118300,118066,120988,213772,118643,120989 +1,77343,2858,118219,118220,118344,127847,128352,118347,127848 +1,25993,1409,117961,118300,118437,118321,117906,290919,118322 +1,39511,4118,117876,117877,117878,118568,292195,19721,118570 +1,20303,718,117961,118300,119181,118259,126363,290919,118261 +1,34950,91342,117961,118327,118933,307024,311622,118331,118332 +1,79121,4736,117961,118300,118301,307024,204593,118331,118332 +1,74343,52105,118595,118596,81476,118422,170384,118424,118425 +1,33146,1271,117961,118327,118507,118863,127778,118398,118865 +1,3853,49302,117961,118343,118514,118321,240983,290919,118322 +1,80920,3973,118887,118888,118631,118912,309291,118424,118914 +0,77203,15621,117961,119256,120943,126684,126684,292795,126685 +0,16495,26009,117961,118446,119986,118777,279443,308574,118779 +1,38655,4993,117961,118300,118979,118912,309291,118424,118914 +1,107522,25262,117961,118327,118320,123067,137949,118398,123068 +1,4675,23339,117961,118300,118783,118321,118406,290919,118322 +1,34073,217,117961,118413,120370,118321,117906,290919,118322 +1,80685,17694,117961,118343,119598,117905,117906,290919,117908 +1,81261,5496,91261,118026,118202,117905,117906,290919,117908 +1,108223,4474,117961,118446,119064,117905,117906,290919,117908 +1,90526,3889,117961,118386,121883,117905,117906,290919,117908 +1,45089,1475,91261,118026,118450,118641,123881,118643,118644 +1,52683,56007,117980,118076,117912,118568,117886,19721,118570 +1,34026,5456,117961,118225,118403,118321,117906,290919,118322 +1,42031,92216,118315,118316,120764,118465,175082,118467,118468 +1,34628,2034,117961,118052,118328,299559,118204,118205,163732 +1,13878,12456,117935,117936,117878,117879,117879,19721,117880 +1,80826,5396,117961,118343,119993,120773,118959,118960,120774 +1,33616,8234,117961,118300,118783,117905,117906,290919,117908 +1,917,1030,117961,118343,120722,118361,118362,118363,118364 +1,4675,111936,117961,118300,118783,118784,117906,290919,118786 +1,84071,15746,117961,118052,119742,118321,117906,290919,118322 +1,80160,6482,117961,118052,118821,118321,117906,290919,118322 +1,38482,350,118216,118217,117941,117885,117913,117887,117888 +1,82277,7278,91261,118026,118202,118321,117906,290919,118322 +1,32270,25293,117961,118386,123072,121414,130802,118704,121416 +1,18293,17787,118114,118115,118846,179731,140758,117887,117973 +1,80540,3119,117961,118300,120312,119849,236236,118638,119851 +1,42713,19642,117961,118413,118481,118321,117906,290919,118322 +1,256,6440,118216,118217,117941,118568,281735,19721,118570 +1,33111,3000,117961,118225,120551,117905,240983,290919,117908 +1,38239,51165,117983,117984,117878,118568,120877,19721,118570 +1,36429,77306,119280,119281,117945,280788,280788,292795,119082 +1,3066,770,117961,118300,120722,118747,130134,118453,118749 +1,42085,17058,117961,118300,119488,118728,301534,118295,118730 +1,72191,16243,117961,118327,118933,118841,128747,118643,118843 +1,34958,2061,117961,118327,120383,118641,306399,118643,118644 +1,74627,14349,118976,118977,117895,117879,118250,19721,117880 +1,40204,20382,117961,118300,124942,118784,117906,290919,118786 +1,91620,19760,117961,117962,118352,118321,117906,290919,118322 +1,4685,47,119920,119921,176153,117905,117906,290919,117908 +1,25322,123367,117932,117933,117878,117879,117897,19721,117880 +1,80536,3656,117961,118300,118631,118924,153285,118667,118926 +1,77951,96511,117922,117923,118810,122927,121125,118131,122929 +1,32270,23225,117961,118300,119984,120647,311441,118398,120649 +1,77295,56137,117961,118327,118862,118396,269406,118398,118399 +1,31441,16972,118887,118888,119987,119849,240725,118638,119851 +1,43876,56596,118079,118080,117878,117879,304519,19721,117880 +1,34924,25262,117961,118327,118320,118685,310608,308574,118687 +1,25990,4263,121785,121786,119142,118321,117906,290919,118322 +1,38713,15612,117902,118041,118556,280788,280788,292795,119082 +1,15915,54248,117961,118327,120559,120618,304465,118643,120619 +1,45990,7705,117887,118178,117945,259173,141966,292795,118943 +1,17308,72287,126918,126919,118042,118043,118043,270488,118046 +1,80193,1325,117961,117969,118910,118054,118054,117887,118055 +0,25279,15390,117961,118300,118301,120344,311360,118424,120346 +0,79562,13876,117961,118386,118404,118841,240982,118643,118843 +1,18418,6732,117961,117969,6725,120527,275449,6725,120529 +0,80874,4589,117961,118327,120383,117905,240983,290919,117908 +1,73756,2685,122880,122974,117945,117946,149467,292795,117948 +1,29697,19956,117961,118327,274241,118777,133772,308574,118779 +1,78185,15683,117961,118300,118957,118777,279443,308574,118779 +1,34156,3692,117961,118413,126229,128230,302830,4673,128231 +1,7678,17059,117902,118041,117945,259173,191237,292795,118943 +1,76545,3558,117902,118041,118042,118043,118044,270488,118046 +1,44641,5320,118290,118291,119598,120344,260942,118424,120346 +1,80368,5140,119062,119091,121710,117905,117906,290919,117908 +1,41594,3225,117961,118327,118929,118685,279443,308574,118687 +1,42085,22458,117961,117962,120677,307024,306404,118331,118332 +1,43056,2610,117961,118327,121979,118321,240983,290919,118322 +1,26437,56310,118212,118580,117895,117896,117913,117887,117898 +1,40997,19286,117961,118225,121716,118636,310589,118638,118639 +1,43431,18185,117961,118225,129617,121414,269394,118704,121416 +1,33111,4542,117961,118225,120551,128230,302830,4673,128231 +1,43280,3281,117961,118225,119238,122849,120324,119095,122850 +1,75834,1912,119134,119135,118042,117896,287998,117887,117898 +1,102270,111936,117961,118300,118783,117905,117906,290919,117908 +1,34865,3722,117961,118300,120059,119849,124718,118638,119851 +1,23921,4821,117961,118343,119598,118980,304760,118295,118982 +1,23096,17598,117961,118300,118631,135809,176829,118331,135811 +1,44594,2014,117961,117962,117904,120773,127525,118960,120774 +0,39879,55642,118256,118257,117945,118995,118806,292795,118997 +1,74466,7398,117961,118225,119924,118321,117906,290919,118322 +1,4675,4567,117961,118300,118514,118318,168365,118205,118319 +1,22282,19697,117935,117936,117878,117885,117937,117887,117888 +1,45338,2378,117876,117877,117878,247659,163031,19721,247660 +1,41495,7539,117961,118343,119987,118321,117906,290919,118322 +1,39932,2819,119062,119091,120410,118028,310589,117887,118030 +1,42093,5002,117961,118343,119598,118422,300136,118424,118425 +1,16421,4584,117961,118300,120297,117905,117906,290919,117908 +1,107802,4511,117961,118300,119181,118784,117906,290919,118786 +1,41325,14810,117961,118413,120370,117905,117906,290919,117908 +1,28294,7519,91261,118026,118202,118784,124402,290919,118786 +1,29697,49096,118212,159716,118378,120952,159717,118453,120954 +1,15018,133801,119062,119091,118535,117905,117906,290919,117908 +1,74302,55636,117916,118011,117920,119192,139264,119184,119194 +1,20293,547,117926,117927,117884,117885,117913,117887,117888 +1,78882,52056,118212,118213,119734,153957,153958,118667,153959 +0,42019,4362,117961,118343,126955,118777,279443,308574,118779 +1,74310,7579,117961,118300,120722,118685,123004,308574,118687 +1,33993,30527,117961,118225,120551,119928,127692,118331,119929 +1,19722,8074,117961,118300,119181,118321,117906,290919,118322 +1,6977,25607,117961,118343,118856,118259,125889,290919,118261 +1,20292,27891,117926,117927,117920,124313,124314,120134,124315 +1,25964,214,117961,118343,6104,118321,117906,290919,118322 +1,81350,2908,117961,118343,118979,118834,133686,118424,118836 +1,33232,4378,117961,118327,118507,118054,120238,117887,118055 +1,79363,47104,117890,117891,117878,118568,120877,19721,118570 +1,90436,5244,117961,118343,119598,117905,117906,290919,117908 +1,14960,58476,117929,117930,117920,117885,117913,117887,117888 +1,33149,1350,117961,118052,120096,118321,117906,290919,118322 +1,45974,51740,118752,119070,119121,280788,130913,292795,119082 +0,5173,46526,117961,118300,119984,118396,269406,118398,118399 +1,18605,15406,117961,118225,118403,118054,118054,117887,118055 +1,6977,3032,118212,118213,123201,117905,125206,290919,117908 +1,681,89939,117961,118413,122007,128230,302830,4673,128231 +1,87082,18220,117961,118052,118867,118321,117906,290919,118322 +1,39879,5570,117961,119256,119257,117946,118806,292795,117948 +1,6705,50040,117961,118327,118391,117905,117906,290919,117908 +1,20351,2113,117951,117952,117920,118568,124610,19721,118570 +1,74195,4566,119062,119091,118992,179731,117906,117887,117973 +1,1098,7079,117961,118343,118514,118685,120316,308574,118687 +1,40420,49987,118090,118091,117920,118568,133542,19721,118570 +1,23702,4554,117961,118300,118514,118259,118260,290919,118261 +1,100514,3838,117961,118225,119924,118321,117906,290919,118322 +1,4675,59029,118555,118178,118320,118784,117906,290919,118786 +1,15714,51746,118752,118753,117945,118274,118806,292795,118276 +1,79121,5046,117961,118052,120356,126516,300136,118612,126518 +1,20294,53396,117916,118150,117920,118568,281735,19721,118570 +1,79092,127738,117961,118225,118403,118451,130134,118453,118454 +1,6055,23305,117961,118327,118862,118890,125128,118398,118892 +1,41296,4638,117961,118343,119987,119172,256190,118467,119174 +1,38480,31061,118212,118580,117895,117896,117897,117887,117898 +1,75787,15387,118555,118178,118575,119849,212678,118638,119851 +1,4675,97016,119062,119091,118957,118278,118260,290919,118279 +1,79813,49323,118990,118991,118992,118321,117906,290919,118322 +1,15907,782,117961,118413,277693,117905,117906,290919,117908 +1,18332,13411,118023,118024,118810,122952,305581,19793,122954 +1,42093,7858,91261,118026,119362,119363,307210,118667,119365 +1,80515,3085,117961,118300,125821,118834,251171,118424,118836 +1,83226,23338,117961,118052,118867,122129,270992,121916,122131 +1,35376,18686,117961,118386,128823,117905,290919,290919,117908 +1,6170,1437,118212,118213,118437,118636,192211,118638,118639 +1,34480,5238,118290,118291,118053,132692,134889,270488,132694 +1,4675,2944,117961,118343,119796,119743,132775,3130,119745 +0,36646,27873,117978,117979,117884,117879,117886,19721,117880 +1,3853,51235,120864,121013,124133,120344,303717,118424,120346 +1,73214,782,117961,118413,127522,118784,124402,290919,118786 +1,19944,15993,117961,117969,121617,131849,149815,121620,131851 +1,38725,7505,117961,118300,119195,117905,117906,290919,117908 +1,20897,7492,117961,118225,117963,118811,286597,19793,118813 +1,34548,25194,117961,118343,123125,117905,117906,290919,117908 +0,23497,52314,117961,118300,118301,118980,306795,118295,118982 +1,23965,5030,117961,118343,120126,119433,133686,118424,119435 +1,1020,25558,118990,118991,118992,307024,294485,118331,118332 +1,16496,7388,117961,118225,119924,118318,168365,118205,118319 +1,44649,49574,117961,118052,118391,120773,118959,118960,120774 +1,80809,49423,117961,118052,123144,118890,203706,118398,118892 +1,34498,18450,117961,118052,118821,117905,117906,290919,117908 +1,1281,13791,117961,117962,19666,118368,129597,117887,118486 +1,17308,4395,117961,118300,118597,118422,150827,118424,118425 +1,26360,59765,117961,118386,121961,118912,309291,118424,118914 +1,78703,15516,117961,118052,120304,118702,132675,118704,118705 +1,31475,96704,117910,117911,117884,117879,117886,19721,117880 +1,117594,197,117961,118413,118414,118784,240983,290919,118786 +1,35068,15399,117983,117984,117878,118396,139898,118398,118399 +1,80756,15542,117961,118300,118395,118396,300044,118398,118399 +1,28105,3281,117961,118225,120323,120591,120592,119095,120593 +1,73756,4728,117961,118343,123125,118685,120316,308574,118687 +1,91087,2343,119691,119692,118635,122967,311441,119695,122969 +1,20351,2017,117961,118327,121645,118054,159996,117887,118055 +1,3853,217,117961,118413,120370,118321,240983,290919,118322 +1,79285,18047,117902,118041,119195,118028,118368,117887,118030 +1,5545,15398,118169,118170,118575,130479,139169,119784,130481 +1,99881,4994,117961,118300,118514,117905,117906,290919,117908 +1,76446,4620,117961,118300,119181,118784,121873,290919,118786 +1,41378,7683,117887,118178,117945,259173,201292,292795,118943 +1,15064,148198,117961,118300,118631,118054,120238,117887,118055 +0,16033,32795,118256,118257,118623,118995,286106,292795,118997 +1,35896,52972,118212,118213,118507,118863,157370,118398,118865 +1,13878,27873,117978,117979,117884,117879,117886,19721,117880 +1,24885,770,117961,118300,119181,128230,302830,4673,128231 +1,75327,53010,130570,119256,118042,118043,129174,270488,118046 +1,4675,75926,117961,118343,6104,118054,168337,117887,118055 +1,77466,93568,118976,118977,117895,118194,118195,117887,118196 +1,28856,6083,117961,117962,122224,118784,117906,290919,118786 +1,79121,131441,117961,118343,119598,119433,133686,118424,119435 +1,80799,19642,117961,118413,118481,117905,117906,290919,117908 +1,43663,8415,117961,118052,118881,117905,117906,290919,117908 +1,75334,18213,117961,118386,118692,118451,200947,118453,118454 +1,45790,51746,118752,119070,119121,118274,118806,292795,118276 +1,6042,6046,117961,118446,120317,118321,117906,290919,118322 +1,38704,98757,117959,117960,117941,117885,117913,117887,117888 +1,3853,59765,117961,118386,121961,118912,309291,118424,118914 +1,278393,4145,117961,118300,120026,118834,223125,118424,118836 +1,114621,4571,117961,118225,119924,118685,279443,308574,118687 +1,31246,7001,117961,118327,118933,128230,302830,4673,128231 +1,34924,3225,117961,118327,118929,118259,118260,290919,118261 +1,278393,15390,117961,118300,120026,118054,118054,117887,118055 +1,25231,21859,117961,118327,118507,118396,158043,118398,118399 +0,79602,41795,118315,118316,118317,129561,216140,3130,129563 +1,43452,3486,118752,119070,118042,118043,118043,270488,118046 +1,17849,58535,117961,117969,130192,123400,130319,19793,123402 +1,27797,70062,117961,118386,118746,118321,117906,290919,118322 +1,80674,3918,117961,118343,118660,119433,133686,118424,119435 +1,3661,2210,117961,126102,118378,119192,300603,119184,119194 +1,110042,1540,117961,118343,123125,118536,118536,308574,118539 +1,78598,1350,117961,118052,122938,118321,117906,290919,118322 +1,39159,7285,117961,118225,118403,119849,310589,118638,119851 +1,27785,5001,117961,118300,123055,120006,128578,118424,120008 +1,31441,4375,117961,118343,128113,118826,257115,118424,118828 +1,34498,120340,120342,120343,119076,118834,124426,118424,118836 +1,27230,49850,117961,118413,122007,117905,118260,290919,117908 +1,14354,20648,118219,118220,117941,117879,117897,19721,117880 +1,25993,50015,117961,118343,119598,118422,300136,118424,118425 +1,80762,7525,117961,118225,120551,118321,117906,290919,118322 +1,97017,141,117961,118225,122273,120773,118959,118960,120774 +1,45380,46104,117961,118327,120559,120988,304465,118643,120989 +1,34950,7032,117961,118225,120551,118318,168365,118205,118319 +1,920,1398,117961,118300,120722,118784,130735,290919,118786 +1,25993,56058,118212,118213,118458,118912,309291,118424,118914 +1,41594,102574,117961,118327,118929,118459,294325,249618,118461 +1,80723,50751,117961,118225,120535,123670,305057,121916,123672 +1,20312,7364,117902,117903,119181,117905,118036,290919,117908 +1,73972,71186,117961,118386,118522,117905,134559,290919,117908 +1,25287,15626,117961,118386,129128,120516,307024,120518,120519 +1,4675,106630,118990,118991,119142,118321,117906,290919,118322 +1,74047,76568,117961,118386,118522,117905,117906,290919,117908 +1,15064,124033,124034,124035,117945,117946,125533,292795,117948 +0,18913,27518,117929,117930,119569,179731,194138,117887,117973 +1,17226,30523,117961,118343,119984,123067,269406,118398,123068 +1,75078,28253,118315,118463,123089,117905,130219,290919,117908 +1,25993,25276,117961,118343,121747,118321,117906,290919,118322 +1,34950,56193,117961,118300,120410,118784,117906,290919,118786 +1,30570,4478,117961,118386,118692,118321,117906,290919,118322 +1,75078,88640,117961,118052,120096,118523,306404,118331,118525 +1,28943,2034,117961,118052,118328,118054,118054,117887,118055 +1,3853,23200,117961,118343,123125,118784,117906,290919,118786 +1,78598,5269,119596,119597,120356,122067,205243,118424,122069 +1,846,2610,117961,118327,121979,118321,117906,290919,118322 +1,30364,4549,117961,118225,120050,117905,117906,290919,117908 +1,36174,3918,117961,118343,118660,118826,158101,118424,118828 +1,31374,7703,117943,117944,117945,118274,129282,292795,118276 +1,33146,3118,117961,118300,118458,120789,309123,118424,120791 +1,43002,5496,91261,118026,118202,118321,117906,290919,118322 +1,78882,52056,118212,118213,119734,153957,119248,118667,153959 +1,15342,4468,117961,118413,119968,118321,117906,290919,118322 +1,73756,4718,121785,121786,120317,118321,117906,290919,118322 +1,3661,69115,120864,121013,118450,119192,300603,119184,119194 +1,112669,118619,117961,118300,119142,118784,147114,290919,118786 +1,43280,7076,117961,118225,119238,119093,136840,119095,119096 +1,42976,71391,117961,118386,118522,118318,168365,118205,118319 +1,39264,7684,118573,118574,118623,118995,129731,292795,118997 +1,1581,1728,117961,118327,118507,118890,124964,118398,118892 +1,43727,25900,119134,119135,119136,120056,283527,118474,120058 +1,78311,67615,117932,117933,117878,117879,117897,19721,117880 +1,74018,1350,117961,118052,120096,117905,117906,290919,117908 +1,43147,2930,117961,118343,118700,118321,118448,290919,118322 +1,14900,7398,117961,118225,118616,117905,240983,290919,117908 +1,30952,27054,117961,117969,19666,118368,118368,117887,118486 +1,23503,122272,117961,118225,122273,118321,240983,290919,118322 +1,42680,44022,117961,117962,122215,117905,240983,290919,117908 +1,7543,7337,117961,118300,124725,118361,149305,118363,118364 +1,33149,1652,117961,118327,118507,179731,179731,117887,117973 +1,39555,7072,117961,117962,118352,118321,117906,290919,118322 +1,34817,26321,117961,118225,118320,118321,117906,290919,118322 +1,25993,5701,117961,118052,120709,118207,174445,270488,118209 +1,25993,59000,118181,118182,117920,118568,117897,19721,118570 +1,78518,1249,117961,118386,118910,118321,117906,290919,118322 +1,33054,4933,117961,118300,118458,120006,311622,118424,120008 +1,74593,18213,117961,118386,118746,118747,160002,118453,118749 +1,74196,18450,117961,118052,118821,117905,240983,290919,117908 +1,44909,3526,117961,118225,124130,119093,120324,119095,119096 +1,37651,50308,118090,118091,117884,117885,117913,117887,117888 +1,17308,4821,117961,118343,119598,120348,255097,118295,120350 +1,31732,4110,119596,119597,118292,118422,191064,118424,118425 +1,42085,4910,117961,118343,118911,118912,284092,118424,118914 +1,16071,70264,117961,118343,120722,118777,279443,308574,118779 +1,38719,74449,118079,118080,117878,117879,118177,19721,117880 +0,19751,7284,91261,118026,118202,118321,117906,290919,118322 +1,44858,17560,118752,118753,117945,280788,280788,292795,119082 +1,25570,1903,117961,118343,118514,118685,122058,308574,118687 +1,44874,13800,117961,118225,118403,118685,126297,308574,118687 +1,74817,4549,117961,118225,120050,117905,117906,290919,117908 +1,75639,70691,118315,118463,119214,118321,117906,290919,118322 +1,4605,70186,117961,117969,19666,118890,127642,118398,118892 +1,78878,8106,117961,118327,120559,120560,304465,118643,120562 +1,391,666,117926,118124,117920,117885,117913,117887,117888 +1,20293,27461,117926,118124,117884,117879,117886,19721,117880 +1,28941,116406,117961,117962,120677,122067,297566,118424,122069 +1,278393,18450,117961,118052,118821,117905,240983,290919,117908 +1,75078,78408,117961,118386,125884,307024,255097,118331,118332 +1,37789,6481,117961,118343,123454,118321,117906,290919,118322 +1,34056,13854,117961,118052,118979,307024,306404,118331,118332 +1,19998,110900,119134,119135,123173,117896,173667,117887,117898 +1,4675,16561,117961,118327,120318,117905,117906,290919,117908 +1,40997,2291,117961,118225,120663,118777,279443,308574,118779 +1,25993,6999,117961,118343,121747,118321,117906,290919,118322 +1,14354,5905,117929,117930,117920,119323,123932,19793,119325 +1,31504,19271,117961,118225,119924,118784,117906,290919,118786 +1,19752,18686,117961,118386,128823,118321,117906,290919,118322 +1,23164,7647,117961,118300,119181,117905,117906,290919,117908 +1,15368,4098,117961,118343,119598,119433,133686,118424,119435 +1,41314,51372,117961,118343,120666,118777,279443,308574,118779 +1,33217,1467,117961,118343,118833,118834,127160,118424,118836 +1,41265,5186,118887,118888,124656,118912,309291,118424,118914 +1,40867,5196,117961,118386,121949,118777,279443,308574,118779 +0,79602,27214,117961,118386,118733,118523,310608,118331,118525 +1,28307,4567,117961,118300,118514,118278,118260,290919,118279 +1,34989,5249,117961,118386,123144,118396,269406,118398,118399 +0,43423,5423,117961,118327,118391,118321,117906,290919,118322 +1,75078,40932,118315,118463,118464,119172,162182,118467,119174 +1,45790,55789,121785,121786,120299,121122,217580,118612,121124 +1,17249,4904,119170,119171,121747,118834,127405,118424,118836 +1,23971,158757,120342,120343,118395,118396,266864,118398,118399 +1,44814,1938,117961,118300,118066,120988,134839,118643,120989 +1,72024,26009,117961,118052,119986,118685,120316,308574,118687 +0,845,13836,119920,119921,142145,128230,302830,4673,128231 +1,74412,16756,119062,130600,139677,117905,117906,290919,117908 +0,3069,15412,117961,118343,127705,119849,170027,118638,119851 +1,34950,7525,117961,118225,120551,118784,147114,290919,118786 +1,22430,7388,117961,118225,119924,118318,168365,118205,118319 +1,75708,15642,117961,118386,118522,119997,278014,118131,119998 +1,6977,221,117961,118413,120370,118784,240983,290919,118786 +1,6977,13419,117961,118300,120059,120812,305057,118638,120814 +1,16985,743,117961,118343,118437,118321,117906,290919,118322 +1,75687,49952,117932,117933,117878,120628,126012,118131,120629 +0,41932,2710,118290,118291,120823,118396,269406,118398,118399 +1,33147,21037,117961,117969,19666,179731,257952,117887,117973 +1,79346,58581,117959,117960,117920,118568,135898,19721,118570 +1,16145,1030,117961,118343,120722,124435,127527,118363,124436 +1,87711,40509,117961,118225,118403,119743,141396,3130,119745 +1,27124,5899,117961,118327,120318,118641,120319,118643,118644 +1,31995,17835,117961,118225,118403,128230,302830,4673,128231 +1,30837,5249,117961,118386,123144,118396,233714,118398,118399 +1,79092,1482,117961,117962,118840,118841,123378,118643,118843 +1,79814,7553,117961,118225,118403,118321,117906,290919,118322 +0,38480,51382,118079,118080,117878,117879,118177,19721,117880 +1,4675,6021,117961,118413,122007,124886,190212,118643,124888 +1,33054,53081,117961,118343,119598,119433,133686,118424,119435 +0,39149,3648,117961,118300,118631,118980,301534,118295,118982 +1,75078,257604,117961,118386,119214,117905,117906,290919,117908 +1,16018,1550,117961,118225,123173,118054,119601,117887,118055 +1,15714,3606,118169,118170,118179,126684,204891,292795,126685 +1,38391,52423,119665,119666,117895,117896,117897,117887,117898 +1,23194,13441,117961,118225,122870,124886,128018,118643,124888 +1,6723,2631,117961,118327,120624,117905,117906,290919,117908 +1,21373,7915,117961,118300,118597,118834,127160,118424,118836 +1,32270,7434,117961,118343,118514,117905,117906,290919,117908 +1,17308,3656,117961,118300,119890,118054,118054,117887,118055 +1,6719,4549,117961,118225,120050,117905,117906,290919,117908 +1,14912,770,117961,118300,119181,118451,130134,118453,118454 +1,79323,3120,117961,118300,120312,120313,120314,118424,120315 +1,40630,15450,117916,117917,117941,117879,117886,19721,117880 +1,42132,6235,117961,118343,118437,124194,249872,118363,124196 +1,17226,15682,117961,118300,118957,118321,117906,290919,118322 +1,34800,60019,117943,117944,118575,259173,298522,292795,118943 +1,45014,16850,117961,118225,122298,119093,120324,119095,119096 +1,13878,13234,117922,117923,117920,122142,122142,19721,122143 +1,80642,57719,117961,118343,149210,126516,217580,118612,126518 +1,33152,21037,117961,117969,19666,179731,257952,117887,117973 +1,45143,8204,117961,118052,118867,118054,305057,117887,118055 +1,17249,5015,117961,118300,118395,118890,311441,118398,118892 +1,7543,3000,117961,118225,120551,118321,118448,290919,118322 +1,14914,4584,117961,118300,120297,118321,117906,290919,118322 +1,1023,16557,117961,118327,120318,117905,117906,290919,117908 +1,3853,3046,117961,118413,118481,118685,120679,308574,118687 +1,42360,8368,91261,118026,118202,117905,117906,290919,117908 +1,14914,8717,117961,118225,118403,118451,130134,118453,118454 +1,75834,4853,118887,118888,120126,118172,301534,249618,118175 +1,25993,5220,117961,118300,118458,120006,303717,118424,120008 +1,77295,1088,117961,118327,119830,117905,117906,290919,117908 +1,75210,3332,117961,118343,121747,118784,130735,290919,118786 +1,42093,3608,117961,118052,120671,120418,232183,249618,120419 +1,79121,132399,120342,120343,119076,118422,124426,118424,118425 +1,80676,5234,117961,118343,118660,119433,133686,118424,119435 +1,25703,23125,117961,118225,119136,119137,128585,118474,119139 +1,39883,16671,117961,118052,118706,118523,310608,118331,118525 +1,70452,58710,117980,117981,117884,117879,117886,19721,117880 +1,25278,4124,117961,118300,127849,118054,310608,117887,118055 +0,43410,70078,91261,118026,118063,128422,137673,118453,128424 +1,103511,4023,117961,118446,119064,117905,240983,290919,117908 +1,41758,2948,118315,118316,225010,118321,117906,290919,118322 +1,40853,7520,117961,118343,124725,117905,240983,290919,117908 +1,17249,4322,117961,118343,118833,118834,309123,118424,118836 +1,83255,5524,117961,118343,118514,117905,240983,290919,117908 +1,1020,57862,118658,118659,123606,128093,128094,119184,128095 +1,78749,17713,117961,118225,122273,118321,117906,290919,118322 +1,42085,4716,118290,118291,120026,120344,165069,118424,120346 +1,39331,17389,91261,118026,118692,118321,117906,290919,118322 +1,116247,1755,117961,117962,119223,150752,137949,118643,150754 +1,75324,3287,118752,119070,118042,118043,118043,270488,118046 +1,32032,3236,117961,118327,129972,117905,240983,290919,117908 +1,80206,27147,117961,118343,122299,119949,121350,290919,119951 +1,73815,71385,118315,118463,118522,119962,120689,118205,119964 +1,77659,79403,117983,117984,117878,117879,117897,19721,117880 +1,34430,5506,117961,118343,118514,118321,117906,290919,118322 +1,41671,16565,91261,118026,118063,118028,240337,117887,118030 +1,28472,215806,118315,118316,123656,118321,242891,290919,118322 +1,70790,8074,117961,118300,119181,118321,117906,290919,118322 +1,1937,2014,117961,117962,117904,120773,236750,118960,120774 +1,78849,15887,117961,118052,120356,123737,305057,118960,123738 +1,79092,13820,117961,118343,119796,119849,219966,118638,119851 +1,79092,57596,118181,118182,117920,118568,124610,19721,118570 +1,33248,2908,117961,118343,118979,120344,303717,118424,120346 +1,43247,7022,119062,119091,118535,118321,117906,290919,118322 +1,27356,119350,117961,118386,118522,119351,123417,3130,119353 +1,43416,32241,117961,118327,118933,118321,240983,290919,118322 +1,42972,17244,117961,118052,120398,120006,311622,118424,120008 +1,33154,2014,117961,117962,118910,120773,127525,118960,120774 +1,1281,74967,117961,117962,117904,127782,160080,290919,127783 +1,7543,23136,117961,118052,119742,117905,117906,290919,117908 +1,78399,60097,118212,118213,121014,122645,208559,119221,122647 +1,78972,5313,117961,118386,120356,120357,201020,118424,120359 +1,44812,1088,117961,118327,119830,118321,117906,290919,118322 +1,16037,2683,120864,120865,118171,122129,281792,121916,122131 +1,25442,77495,118192,118193,117895,118568,281735,19721,118570 +1,15064,3211,117961,118446,120368,118054,118054,117887,118055 +0,18411,71897,118990,118991,118992,117905,117906,290919,117908 +1,78602,2641,117961,118327,118391,118321,117906,290919,118322 +1,92326,26312,138798,138799,120539,118321,117906,290919,118322 +1,15356,8686,117961,118327,119830,118321,117906,290919,118322 +1,42093,51096,119596,119597,124266,118728,272276,118295,118730 +1,40440,7014,117961,118446,119961,118278,118260,290919,118279 +0,33642,6982,117961,118300,118783,118784,213944,290919,118786 +1,79652,7578,117961,118343,120722,118784,121873,290919,118786 +1,25749,17727,118169,118170,118171,118172,118173,249618,118175 +1,80207,14889,117961,118300,118437,118321,117906,290919,118322 +0,79295,46526,117961,118300,119984,118396,269406,118398,118399 +1,77425,4638,117961,118343,119987,117905,117906,290919,117908 +1,980,4498,117890,118102,117878,134118,211100,119788,134120 +1,44451,44022,117961,117962,122215,127782,130085,290919,127783 +1,42367,14455,118006,118007,117941,117899,117897,19721,117900 +1,81316,54618,117961,118052,118992,118784,118785,290919,118786 +1,90528,2286,117902,117903,277693,120773,118959,118960,120774 +1,4675,50976,117961,118413,119968,118321,117906,290919,118322 +1,79329,16024,117961,118300,119984,120647,311441,118398,120649 +0,80195,19832,117961,118327,123757,118321,117906,290919,118322 +0,77300,57531,118582,118583,117945,118274,249704,292795,118276 +1,75901,17612,117961,118343,119598,120006,310997,118424,120008 +0,28441,54618,117961,118052,118821,118321,117906,290919,118322 +0,37732,65460,118079,118080,117878,117879,118177,19721,117880 +1,6042,5504,117961,118300,118514,117905,117906,290919,117908 +1,16088,3838,117961,118225,119924,118321,117906,290919,118322 +1,43452,7790,118573,118574,117945,117946,120541,292795,117948 +1,20364,3119,117961,118300,120312,119849,236236,118638,119851 +1,38704,12732,117910,117911,117912,117885,117913,117887,117888 +1,75834,122272,117961,118225,122273,118321,240983,290919,118322 +1,4675,205,117961,118386,118746,118321,240983,290919,118322 +1,4675,1928,117961,118300,123719,117905,117906,290919,117908 +1,77357,14731,117961,118446,118447,118321,117906,290919,118322 +1,15020,2518,117961,118300,19772,118777,164026,308574,118779 +1,32270,21303,117961,118300,119890,307024,152866,118331,118332 +1,20222,46663,118256,118257,118623,118995,118806,292795,118997 +1,78229,7023,117961,117962,119223,118259,118260,290919,118261 +1,20226,794,118752,119070,117945,280788,152940,292795,119082 +1,79092,17199,117961,117969,118970,118890,152357,118398,118892 +1,36252,273476,117926,118266,117920,118568,310732,19721,118570 +1,73756,17549,117961,118446,119064,122297,301475,118331,118899 +1,19949,19952,117961,118327,118529,118054,118054,117887,118055 +1,80947,4583,117961,118413,120370,118321,240983,290919,118322 +0,81274,46526,117961,118300,119984,118396,269406,118398,118399 +1,34583,7578,117961,118343,120722,117905,117906,290919,117908 +1,15064,25937,203209,119256,119142,179731,128635,117887,117973 +1,29693,75693,4292,124335,274241,130060,128198,119772,130062 +1,79092,15990,117961,118413,125133,119849,128470,118638,119851 +1,15710,1530,117961,118300,119984,118890,311441,118398,118892 +1,15679,1080,117961,118327,118378,118451,151171,118453,118454 +1,33642,80413,118163,118164,117878,117879,117879,19721,117880 +1,30570,60789,117961,118386,118522,118536,118537,308574,118539 +1,36267,35191,117961,118446,118447,118777,279443,308574,118779 +1,74593,7411,117961,118386,118746,118451,130134,118453,118454 +1,25565,1755,117961,117962,119223,125793,146749,118643,125795 +1,13878,13262,118219,118220,118221,117879,117879,19721,117880 +1,80213,1334,117961,117962,118910,118451,130134,118453,118454 +1,71236,5048,117961,118300,118458,119928,310608,118331,119929 +1,20226,2440,118752,119070,119121,280788,280788,292795,119082 +1,44722,4576,117961,118327,118391,118321,235272,290919,118322 +1,71236,5211,118290,118291,119837,118980,261436,118295,118982 +1,57888,7647,117961,118300,119181,118321,117906,290919,118322 +1,4675,17790,117961,118225,119924,117905,117906,290919,117908 +1,33054,59765,117961,118386,118317,118523,294485,118331,118525 +0,5353,2305,117961,117962,119223,128230,302830,4673,128231 +1,3853,2343,119691,119692,118635,122967,311441,119695,122969 +1,78693,25631,117961,118343,120291,118293,118302,118295,118296 +1,23994,13828,117961,118300,118395,118890,125128,118398,118892 +1,16767,4663,117961,118225,122298,120591,132696,119095,120593 +1,983,1466,117961,118300,123749,125010,157062,118424,125012 +1,20364,3990,117961,117962,118409,122645,151214,119221,122647 +1,28117,5278,118887,118888,125440,120006,303717,118424,120008 +1,25993,3954,117961,118343,120291,118422,300136,118424,118425 +1,37785,101834,119134,119135,121716,120611,125083,249618,120613 +1,23921,120340,120342,120343,119076,118834,311236,118424,118836 +1,74726,51235,120864,121013,124133,120344,303717,118424,120346 +1,78165,17988,117961,117969,118816,120647,129715,118398,120649 +1,42457,17199,117961,117969,118970,118890,311441,118398,118892 +1,75443,17227,117961,118343,118979,120006,255097,118424,120008 +1,40867,83216,91261,118026,123656,118278,151426,290919,118279 +1,80763,5717,117961,118343,124725,118321,117906,290919,118322 +0,34215,4084,117961,118052,118992,118321,240983,290919,118322 +1,74611,17156,117961,118300,118810,123082,197687,19793,123084 +1,18072,15389,117961,118225,122870,118636,310589,118638,118639 +1,4675,4642,117961,118225,120551,118321,133936,290919,118322 +1,77996,84772,117961,118327,121694,122022,306795,119221,122024 +1,31224,53176,117961,117969,118970,118890,311441,118398,118892 +1,42093,60790,117961,118052,118867,118523,311622,118331,118525 +1,79092,5490,117961,117962,5488,120618,304465,118643,120619 +1,70299,7014,117961,118446,119961,118278,118260,290919,118279 +1,16443,7338,117961,118225,119924,118321,118448,290919,118322 +1,43069,17270,117961,118300,128830,118826,150790,118424,118828 +1,80893,5604,117961,118343,119598,118685,155651,308574,118687 +1,34832,49024,132839,145248,118552,118054,132841,117887,118055 +1,27356,7069,117961,118386,123844,118784,121926,290919,118786 +1,13878,25772,118006,118007,117941,117899,117897,19721,117900 +1,278393,4837,117961,118300,120026,118054,119601,117887,118055 +1,23921,4004,117961,118343,118856,118321,117906,290919,118322 +1,25553,66400,117910,117911,117920,123191,123191,19721,123192 +1,43680,13047,118095,118096,118008,118568,118568,19721,118570 +1,23966,14855,117961,118300,123719,118451,153802,118453,118454 +1,704,25237,117961,117962,118910,118777,279443,308574,118779 +1,18382,16566,117961,118327,120559,120618,304465,118643,120619 +1,20701,87,117961,118446,118447,118361,122092,118363,118364 +1,33111,7525,117961,118225,120551,117905,117906,290919,117908 +1,23921,15390,117961,118300,120026,118054,118054,117887,118055 +1,25554,49961,117961,118225,118403,117905,117906,290919,117908 +1,78122,50255,5110,117954,117895,118568,281735,19721,118570 +0,76533,6071,117961,118413,122007,118321,117906,290919,118322 +0,21447,31093,117961,118343,118810,118674,123679,19793,118676 +1,38972,18096,118256,118257,118623,118995,286106,292795,118997 +1,4675,3332,117961,118343,121747,118784,130735,290919,118786 +1,20268,31984,117910,118855,117912,117879,117886,19721,117880 +1,23086,215806,118315,118316,123656,118321,211648,290919,118322 +1,6690,3000,117961,118225,120551,117905,117906,290919,117908 +1,34924,28590,117961,118327,120685,118321,117906,290919,118322 +1,45143,15904,117961,118052,118053,118636,121809,118638,118639 +1,75834,15474,118169,118170,121023,118172,130464,249618,118175 +1,73966,50952,117961,118327,118929,117905,117906,290919,117908 +1,34801,2803,117961,117962,126930,119587,145659,118704,119589 +1,72345,59748,117961,118386,120361,118321,117906,290919,118322 +1,79954,17828,117961,118300,118514,128230,302830,4673,128231 +1,38719,50213,118079,118080,117878,117885,118177,117887,117888 +1,22682,7014,117961,118446,119961,118278,118260,290919,118279 +1,86309,6277,117961,118343,118437,117905,240983,290919,117908 +1,28149,5697,117961,118386,118692,118784,117906,290919,118786 +1,40061,15012,118212,118213,123201,117905,123202,290919,117908 +1,93625,3967,117961,118052,118706,118321,117906,290919,118322 +1,28532,26229,117916,118011,117941,117879,117886,19721,117880 +0,25270,71202,117961,118386,118522,128764,256805,118612,128765 +1,27800,52127,118212,118213,120356,123045,277693,120518,123047 +1,25287,205,117961,118386,118746,118321,117906,290919,118322 +1,42127,2837,118887,118888,118631,118422,300136,118424,118425 +1,38392,19429,117918,117919,117884,117879,117886,19721,117880 +1,79363,1030,117961,118343,120722,118361,118362,118363,118364 +1,25740,18450,117961,118052,118821,118321,118321,290919,118322 +1,38392,47641,117929,117930,117884,117879,117886,19721,117880 +1,15806,3868,117961,118052,120096,117905,117906,290919,117908 +1,40069,4449,117961,118300,118825,118054,118054,117887,118055 +1,43395,802,118752,118753,117945,280788,280788,292795,119082 +1,20897,17858,117961,118386,118673,117985,120203,19793,117987 +1,28507,58466,118602,118603,117920,118568,171084,19721,118570 +1,974,7578,117961,118343,120722,118321,240983,290919,118322 +1,17308,4729,117961,118300,125821,118980,301534,118295,118982 +1,34924,25255,117961,118327,120299,120300,292871,120302,120303 +1,26956,18686,117961,118386,121883,118321,117906,290919,118322 +1,45019,46224,117961,118327,118378,120952,143223,118453,120954 +1,20733,1549,118084,118085,117920,118568,310732,19721,118570 +1,6155,67326,117910,117911,117912,118568,198040,19721,118570 +1,17308,52140,118887,118888,124656,118980,236007,118295,118982 +1,31441,16817,117961,118386,123144,123067,300044,118398,123068 +1,41992,54308,126974,126975,118518,118043,118756,270488,118046 +1,33232,4378,117961,118327,118507,118863,122008,118398,118865 +1,19998,20183,118752,119070,119136,119587,176103,118704,119589 +1,80156,205,117961,118386,118746,118321,117906,290919,118322 +1,32270,2946,117961,118300,119796,118685,120316,308574,118687 +1,16219,70297,117961,118327,118744,118321,117906,290919,118322 +1,19399,46788,117929,117940,117920,118568,163031,19721,118570 +1,25270,18454,117961,118343,119598,125171,257115,118424,125173 +1,34895,23111,119691,119692,118635,122967,117906,119695,122969 +1,14354,47104,117890,117891,117878,117879,117879,19721,117880 +1,16503,46503,120140,120141,118378,120952,120953,118453,120954 +1,17249,52098,119062,119091,118957,117905,117906,290919,117908 +1,22680,18450,117961,118052,118821,118321,117906,290919,118322 +1,4675,8430,117961,118225,118403,118321,227996,290919,118322 +1,17849,17856,117961,117962,117963,123400,143238,19793,123402 +1,967,20219,119280,119281,117945,280788,280788,292795,119082 +1,77300,15459,118169,118170,135245,259173,143905,292795,118943 +1,39182,25465,118023,118024,117895,118568,157237,19721,118570 +1,79316,46526,117961,118300,119984,120647,311441,118398,120649 +1,5372,3966,117961,118343,122012,118321,240983,290919,118322 +0,75901,51345,117961,118052,120417,118702,125047,118704,118705 +1,34924,6220,117961,118327,121645,120690,120691,290919,120692 +1,79121,5288,119596,119597,149666,120357,149667,118424,120359 +1,4675,5511,117961,118386,119954,117905,117906,290919,117908 +1,80771,4413,117961,118343,118833,117905,117906,290919,117908 +1,79713,6982,117961,118300,118783,118321,117906,290919,118322 +1,99461,8686,117961,118327,119830,118321,117906,290919,118322 +0,20284,60823,118953,118954,117941,117879,117886,19721,117880 +1,16345,44918,117902,117903,118450,117905,118036,290919,117908 +1,30865,7560,117961,118343,119181,118321,117906,290919,118322 +1,17849,1533,117961,117969,118800,118801,160694,19793,118803 +1,4675,8453,117961,118343,123125,307024,311622,118331,118332 +1,35525,17002,5110,117954,117895,118568,157237,19721,118570 +1,17308,1903,117961,118343,118514,118685,122058,308574,118687 +1,41475,14731,117961,118446,118447,118784,213944,290919,118786 +1,44722,16741,117961,118327,119995,117905,117906,290919,117908 +1,43273,51058,117902,118041,134257,118028,118368,117887,118030 +1,25416,11529,117929,117940,117941,117879,117886,19721,117880 +1,31441,14800,117961,117962,118352,118784,117906,290919,118786 +1,38782,36327,117961,118327,122070,122129,305057,121916,122131 +1,76416,25329,118212,118213,124449,119849,164945,118638,119851 +1,17308,15620,122880,122974,118042,118043,118043,270488,118046 +1,31823,4272,117961,118343,120666,123045,123107,120518,123047 +0,36926,15412,117961,118343,127705,119849,170027,118638,119851 +1,27727,3281,117961,118225,275600,119093,120324,119095,119096 +1,91749,249,117961,118225,118403,117905,117906,290919,117908 +1,23567,7338,117961,118225,119924,118321,152852,290919,118322 +1,43822,189,117961,118413,121639,117905,240983,290919,117908 +1,33642,93163,118163,118164,117878,117879,117879,19721,117880 +1,39883,15422,117961,118052,122392,130479,209981,119784,130481 +1,38704,58694,117916,118011,117912,117885,117913,117887,117888 +1,75954,3608,117961,118052,120671,119849,139343,118638,119851 +1,80381,783,117961,118413,127522,121594,126485,4673,121596 +1,15805,18213,117961,118386,125016,118451,126309,118453,118454 +1,75078,71391,117961,118386,118522,118278,229232,290919,118279 +1,80537,5739,117961,118446,118684,127108,235721,118667,127110 +1,31694,15572,118169,118170,117945,117946,288088,292795,117948 +1,38704,58694,117916,118011,117912,117879,117897,19721,117880 +1,6711,6021,120140,120141,122007,118641,123881,118643,118644 +1,5389,16541,117961,118225,120551,117905,117906,290919,117908 +1,33147,2908,117961,118343,118979,118980,301534,118295,118982 +1,40891,51138,117961,118413,119968,118321,240983,290919,118322 +1,76626,17759,117961,118413,122007,118685,120316,308574,118687 +1,79611,50736,117961,118052,118821,117905,117906,290919,117908 +1,27623,46224,117961,118327,118378,120952,120953,118453,120954 +1,33149,21037,117961,117969,19666,179731,257952,117887,117973 +1,17308,3993,117961,118343,120126,120497,223125,118424,120499 +1,14662,5240,118887,118888,127284,118912,309291,118424,118914 +0,23499,15390,117961,118300,118301,120344,311360,118424,120346 +1,23966,52105,118595,118596,81476,118422,149098,118424,118425 +1,43006,6137,117961,118225,119924,118259,118260,290919,118261 +1,81043,6454,117961,118343,118856,117905,172635,290919,117908 +1,37672,50366,118138,118139,117884,117885,117913,117887,117888 +1,43300,7339,117961,118300,118360,118361,118362,118363,118364 +1,3853,5330,117961,118052,119408,118523,310608,118331,118525 +1,74302,49987,118090,118091,117920,118568,133542,19721,118570 +1,25328,1553,117961,118225,123173,118054,120514,117887,118055 +0,27416,82626,118090,118091,117912,117885,117913,117887,117888 +1,74142,46254,117961,118327,126310,124886,123881,118643,124888 +1,6725,6846,117961,117969,6725,126264,146516,6725,126266 +1,32223,12757,118120,118121,117920,119323,123932,19793,119325 +1,35017,131441,117961,118343,119598,119433,133686,118424,119435 +1,80754,16024,117961,118300,119984,123067,269406,118398,123068 +1,34950,5432,117961,118327,118391,118321,117906,290919,118322 +1,32586,5320,118290,118291,119598,120344,260942,118424,120346 +1,4673,89939,117961,118413,122007,128230,302830,4673,128231 diff --git a/modules/ml/catboost-model-parser/src/test/resources/fit_script/.gitignore b/modules/ml/catboost-model-parser/src/test/resources/fit_script/.gitignore new file mode 100644 index 0000000000000..bb91442e8fd65 --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/fit_script/.gitignore @@ -0,0 +1,2 @@ +venv/* +catboost_info/* \ No newline at end of file diff --git a/modules/ml/catboost-model-parser/src/test/resources/fit_script/README.md b/modules/ml/catboost-model-parser/src/test/resources/fit_script/README.md new file mode 100644 index 0000000000000..57d511414077e --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/fit_script/README.md @@ -0,0 +1,19 @@ +# Fit catboost regression / classification model + +1. Install packages from `install.txt` + ``` + pip install -r install.txt + ``` + +2. Run `train_clf.py` and use: + - Model file `model_clf.cbm` + - Stdout `Parameters` & `Expected predict` for unit test + - Test prediction in examples `amazon-employee-access-challenge-sample-expected-results.csv` + +Full version of dataset (amazon-employee-access-challenge.csv) avaliable +at https://www.kaggle.com/c/amazon-employee-access-challenge/data + +3. Run `train_reg.py` and use: + - Model file `model_reg.cbm` + - Stdout `Parameters` & `Expected predict` for unit test + - Test prediction in examples `boston_housing_dataset-catboost-expected-results.txt` diff --git a/modules/ml/catboost-model-parser/src/test/resources/fit_script/install.txt b/modules/ml/catboost-model-parser/src/test/resources/fit_script/install.txt new file mode 100644 index 0000000000000..0408b6ca126af --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/fit_script/install.txt @@ -0,0 +1,3 @@ +pandas==1.1.4 +catboost==0.24.2 +numpy==1.19.4 \ No newline at end of file diff --git a/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_clf.py b/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_clf.py new file mode 100644 index 0000000000000..75d3dd36dd3af --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_clf.py @@ -0,0 +1,71 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import catboost +import numpy as np +import pandas as pd + +DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../datasets/amazon-employee-access-challenge.csv") +DATA_SAMPLE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../datasets/amazon-employee-access-challenge-sample.csv") +DATA_SAMPLE_PREDICT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../datasets/amazon-employee-access-challenge-sample-expected-results.csv") +MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../models/model_clf.cbm") + + +def main(): + # load dataset + df = pd.read_csv(DATA_PATH) + + target = 'ACTION' + features = [ + column + for column in df.columns + if column != target + ] + + # fit model + model = catboost.CatBoost({ + 'loss_function': 'Logloss', + 'verbose': False, + 'random_seed': 0 + }) + model.fit(df[features], df[target]) + model.save_model(MODEL_PATH) + + # predict on sample + df_sample = pd.read_csv(DATA_SAMPLE_PATH) + predicts = model.predict(df_sample) + predicts = np.power(1 + np.exp(-predicts), -1) + pd.DataFrame({'x': predicts}).to_csv(DATA_SAMPLE_PREDICT_PATH, index=False, header=False) + + # predict on one sample + print('Parameters:') + r = df_sample[:1].to_dict('records') + for k, v in r[0].items(): + print(f'input.put("{k}", {v}.0);') + + print('Expected predict:') + print(np.power(1 + np.exp(-model.predict(df_sample[:1])[0]), -1)) + + # Use formula for classification probability = power(1 + exp(-predict), -1) + # ref: https://github.com/catboost/benchmarks/blob/61d62512f751325a14dd885bb71f8c2dabf7e24b/quality_benchmarks/catboost_experiment.py + + +if __name__ == '__main__': + main() diff --git a/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_reg.py b/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_reg.py new file mode 100644 index 0000000000000..44cc58318aa1d --- /dev/null +++ b/modules/ml/catboost-model-parser/src/test/resources/fit_script/train_reg.py @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import catboost +import pandas as pd + +DATA_PATH = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset.txt') +MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), + "../models/model_reg.cbm") +DATA_SAMPLE_PREDICT_PATH = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '../../../../../../../examples/src/main/resources/datasets/boston_housing_dataset-catboost-expected-results' + '.txt') + + +def main(): + # load dataset + features = [ + f'f_{i}' + for i in range(13) + ] + target = 'target' + + df = pd.read_csv(DATA_PATH, names=features + ['target']) + + # fit model + model = catboost.CatBoost({ + 'loss_function': 'RMSE', + 'verbose': False, + 'random_seed': 0 + }) + model.fit(df[features], df[target]) + model.save_model(MODEL_PATH) + + # predict on sample + predicts = model.predict(df[features]) + pd.DataFrame({ + 'x': predicts + }).to_csv(DATA_SAMPLE_PREDICT_PATH, index=False, header=False) + + # predict on one sample + print('Parameters:') + r = df[:1][features].to_dict('records') + for k, v in r[0].items(): + print(f'input.put("{k}", {v}d);') + + print('Expected predict:') + print(model.predict(df[:1])[0]) + + +if __name__ == '__main__': + main() diff --git a/modules/ml/catboost-model-parser/src/test/resources/models/model_clf.cbm b/modules/ml/catboost-model-parser/src/test/resources/models/model_clf.cbm new file mode 100644 index 0000000000000..f915c27cd6b87 Binary files /dev/null and b/modules/ml/catboost-model-parser/src/test/resources/models/model_clf.cbm differ diff --git a/modules/ml/catboost-model-parser/src/test/resources/models/model_reg.cbm b/modules/ml/catboost-model-parser/src/test/resources/models/model_reg.cbm new file mode 100644 index 0000000000000..d311a529143fd Binary files /dev/null and b/modules/ml/catboost-model-parser/src/test/resources/models/model_reg.cbm differ diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderPartitionData.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderPartitionData.java index 019c36cc5782b..edb889cecd696 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderPartitionData.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderPartitionData.java @@ -18,6 +18,7 @@ package org.apache.ignite.ml.preprocessing.encoding; import java.util.Map; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetCounter; /** * Partition data used in Encoder preprocessor. @@ -29,6 +30,9 @@ public class EncoderPartitionData implements AutoCloseable { /** Frequencies of categories for label presented as strings. */ private Map labelFrequencies; + /** Target encoding meta of categories for label presented as strings. */ + private TargetCounter[] targetCounters; + /** * Constructs a new instance of String Encoder partition data. */ @@ -53,6 +57,15 @@ public Map labelFrequencies() { return labelFrequencies; } + /** + * Gets the map of target encoding meta by value in partition for label. + * + * @return The target encoding meta. + */ + public TargetCounter[] targetCounters() { + return targetCounters; + } + /** * Sets the array of maps of frequencies by value in partition for each feature in the dataset. * @@ -75,6 +88,12 @@ public EncoderPartitionData withLabelFrequencies(Map labelFrequ return this; } + /** */ + public EncoderPartitionData withTargetCounters(TargetCounter[] targetCounters) { + this.targetCounters = targetCounters; + return this; + } + /** */ @Override public void close() { // Do nothing, GC will clean up. diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java index c5a88aebe08ad..c9679ccf70803 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java @@ -37,6 +37,9 @@ import org.apache.ignite.ml.preprocessing.encoding.label.LabelEncoderPreprocessor; import org.apache.ignite.ml.preprocessing.encoding.onehotencoder.OneHotEncoderPreprocessor; import org.apache.ignite.ml.preprocessing.encoding.stringencoder.StringEncoderPreprocessor; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetCounter; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetEncoderPreprocessor; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetEncodingMeta; import org.apache.ignite.ml.structures.LabeledVector; import org.jetbrains.annotations.NotNull; @@ -56,6 +59,18 @@ public class EncoderTrainer implements PreprocessingTrainer { /** Encoder sorting strategy. */ private EncoderSortingStrategy encoderSortingStgy = EncoderSortingStrategy.FREQUENCY_DESC; + /** Index of target for target encoding */ + private Integer targetLabelIndex; + + /** Smoting param for target encoding */ + private Double smoothing = 1d; + + /** Min samples leaf for target encoding */ + private Integer minSamplesLeaf = 1; + + /** Min category size for target concoding */ + private Long minCategorySize = 10L; + /** {@inheritDoc} */ @Override public EncoderPreprocessor fit( LearningEnvironmentBuilder envBuilder, @@ -82,7 +97,17 @@ public class EncoderTrainer implements PreprocessingTrainer { partData.withLabelFrequencies(lbFrequencies); } - else { + else if (encoderType == EncoderType.TARGET_ENCODER) { + TargetCounter[] targetCounter = null; + + while (upstream.hasNext()) { + UpstreamEntry entity = upstream.next(); + LabeledVector row = basePreprocessor.apply(entity.getKey(), entity.getValue()); + + targetCounter = updateTargetCountersForNextRow(row, targetCounter); + } + partData.withTargetCounters(targetCounter); + } else { // This array will contain not null values for handled indices Map[] categoryFrequencies = null; @@ -107,6 +132,8 @@ public class EncoderTrainer implements PreprocessingTrainer { return new LabelEncoderPreprocessor<>(calculateEncodingValuesForLabelsByFrequencies(dataset), basePreprocessor); case FREQUENCY_ENCODER: return new FrequencyEncoderPreprocessor<>(calculateEncodingFrequencies(dataset), basePreprocessor, handledIndices); + case TARGET_ENCODER: + return new TargetEncoderPreprocessor<>(calculateTargetEncodingFrequencies(dataset), basePreprocessor, handledIndices); default: throw new IllegalStateException("Define the type of the resulting prerocessor."); } @@ -116,6 +143,86 @@ public class EncoderTrainer implements PreprocessingTrainer { } } + /** + * Calculates encoding frequencies as avarage category target on amount of rows in dataset. + * + * NOTE: The amount of rows is calculated as sum of absolute frequencies. + * + * @param dataset Dataset. + * @return Encoding frequency for each feature. + */ + private TargetEncodingMeta[] calculateTargetEncodingFrequencies(Dataset dataset) { + TargetCounter[] targetCounters = dataset.compute( + EncoderPartitionData::targetCounters, + (a, b) -> { + if (a == null) + return b; + + if (b == null) + return a; + + assert a.length == b.length; + + for (int i = 0; i < a.length; i++) { + if (handledIndices.contains(i)) { + int finalI = i; + b[i].setTargetSum(a[i].getTargetSum() + b[i].getTargetSum()); + b[i].setTargetCount(a[i].getTargetCount() + b[i].getTargetCount()); + a[i].getCategoryCounts() + .forEach((k, v) -> b[finalI].getCategoryCounts().merge(k, v, Long::sum)); + a[i].getCategoryTargetSum() + .forEach((k, v) -> b[finalI].getCategoryTargetSum().merge(k, v, Double::sum)); + } + } + return b; + } + ); + + TargetEncodingMeta[] targetEncodingMetas = new TargetEncodingMeta[targetCounters.length]; + for (int i = 0; i < targetCounters.length; i++) { + if (handledIndices.contains(i)) { + TargetCounter targetCounter = targetCounters[i]; + + targetEncodingMetas[i] = new TargetEncodingMeta() + .withGlobalMean(targetCounter.getTargetSum() / targetCounter.getTargetCount()) + .withCategoryMean(calculateCategoryTargetEncodingFrequency(targetCounter)); + } + } + return targetEncodingMetas; + } + + /** + * Calculates encoding frequencies as avarage category target on amount of rows in dataset. + * + * @param targetCounter target Counter. + * @return Encoding frequency for each category. + */ + private Map calculateCategoryTargetEncodingFrequency(TargetCounter targetCounter) { + double prior = targetCounter.getTargetSum() / + targetCounter.getTargetCount(); + + return targetCounter.getCategoryTargetSum().entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + value -> { + double targetSum = targetCounter.getCategoryTargetSum() + .get(value.getKey()); + long categorySize = targetCounter.getCategoryCounts() + .get(value.getKey()); + + if (categorySize < minCategorySize) { + return prior; + } else { + double categoryMean = targetSum / categorySize; + + double smoove = 1 / (1 + + Math.exp(-(categorySize - minSamplesLeaf) / smoothing)); + return prior * (1 - smoove) + categoryMean * smoove; + } + } + )); + } + /** * Calculates encoding frequencies as frequency divided on amount of rows in dataset. * @@ -343,6 +450,82 @@ else if (lbVal instanceof Double) return categoryFrequencies; } + /** + * Updates frequencies by values and features. + * + * @param row Feature vector. + * @param targetCounters Holds the frequencies of categories by values and features. + * @return target counter. + */ + private TargetCounter[] updateTargetCountersForNextRow(LabeledVector row, + TargetCounter[] targetCounters) { + if (targetCounters == null) + targetCounters = initializeTargetCounters(row); + else + assert targetCounters.length == row.size() : "Base preprocessor must return exactly " + + targetCounters.length + " features"; + + double targetValue = row.features().get(targetLabelIndex); + + for (int i = 0; i < targetCounters.length; i++) { + if (handledIndices.contains(i)) { + String strVal; + Object featureVal = row.features().getRaw(i); + + if (featureVal.equals(Double.NaN)) { + strVal = EncoderPreprocessor.KEY_FOR_NULL_VALUES; + row.features().setRaw(i, strVal); + } + else if (featureVal instanceof String) + strVal = (String)featureVal; + else if (featureVal instanceof Number) + strVal = String.valueOf(featureVal); + else if (featureVal instanceof Boolean) + strVal = String.valueOf(featureVal); + else + throw new RuntimeException("The type " + featureVal.getClass() + " is not supported for the feature values."); + + TargetCounter targetCounter = targetCounters[i]; + targetCounter.setTargetCount(targetCounter.getTargetCount() + 1); + targetCounter.setTargetSum(targetCounter.getTargetSum() + targetValue); + + Map categoryCounts = targetCounter.getCategoryCounts(); + + if (categoryCounts.containsKey(strVal)) { + categoryCounts.put(strVal, categoryCounts.get(strVal) + 1); + } else { + categoryCounts.put(strVal, 1L); + } + + Map categoryTargetSum = targetCounter.getCategoryTargetSum(); + if (categoryTargetSum.containsKey(strVal)) { + categoryTargetSum.put(strVal, categoryTargetSum.get(strVal) + targetValue); + } else { + categoryTargetSum.put(strVal, targetValue); + } + } + } + return targetCounters; + } + + /** + * Initialize target counters for handled indices only. + * + * @param row Features vector. + * @return target counter. + */ + private TargetCounter[] initializeTargetCounters(LabeledVector row) { + TargetCounter[] targetCounter = new TargetCounter[row.size()]; + + for (int i = 0; i < row.size(); i++) { + if (handledIndices.contains(i)) { + targetCounter[i] = new TargetCounter(); + } + } + + return targetCounter; + } + /** * Add the index of encoded feature. * @@ -383,7 +566,48 @@ public EncoderTrainer withEncoderType(EncoderType type) { * @return The changed trainer. */ public EncoderTrainer withEncodedFeatures(Set handledIndices) { - this.handledIndices = handledIndices; + this.handledIndices.addAll(handledIndices); + return this; + } + + /** + * Sets the target label index. + * @param targetLabelIndex Index of target label. + * @return The changed trainer. + */ + public EncoderTrainer labeled(Integer targetLabelIndex) { + this.targetLabelIndex = targetLabelIndex; + return this; + } + + /** + * Sets the smoothing for target encoding. + * @param smoothing smoothing value. + * @return The changed trainer. + */ + public EncoderTrainer smoothing(Double smoothing) { + this.smoothing = smoothing; + return this; + } + + /** + * Sets the minSamplesLeaf for target encoding. + * @param minSamplesLeaf min samples leaf. + * @return The changed trainer. + */ + public EncoderTrainer minSamplesLeaf(Integer minSamplesLeaf) { + this.minSamplesLeaf = minSamplesLeaf; + return this; + } + + /** + * Sets the min category size for category target encoding. + * Category less then minCategorySize will be encoded with avarage target value. + * @param minCategorySize min samples leaf. + * @return The changed trainer. + */ + public EncoderTrainer minCategorySize(Long minCategorySize) { + this.minCategorySize = minCategorySize; return this; } } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderType.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderType.java index 3d17809376ab1..e96c0539c7d43 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderType.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderType.java @@ -33,5 +33,8 @@ public enum EncoderType { FREQUENCY_ENCODER, /** Label encoder. */ - LABEL_ENCODER + LABEL_ENCODER, + + /** Target encoder. */ + TARGET_ENCODER, } diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetCounter.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetCounter.java new file mode 100644 index 0000000000000..d2f8065bdba36 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetCounter.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.preprocessing.encoding.target; + +import java.util.HashMap; +import java.util.Map; + +/** + * Counter for encode category. + */ +public class TargetCounter { + /** */ + private Double targetSum = 0d; + + /** */ + private Long targetCount = 0L; + + /** */ + private final Map categoryCounts = new HashMap<>(); + + /** */ + private final Map categoryTargetSum = new HashMap<>(); + + /** */ + public Double getTargetSum() { + return targetSum; + } + + /** */ + public void setTargetSum(Double targetSum) { + this.targetSum = targetSum; + } + + /** */ + public Long getTargetCount() { + return targetCount; + } + + /** */ + public void setTargetCount(Long targetCount) { + this.targetCount = targetCount; + } + + /** */ + public Map getCategoryCounts() { + return categoryCounts; + } + + /** */ + public void setCategoryCounts(Map categoryCounts) { + this.categoryCounts.putAll(categoryCounts); + } + + /** */ + public Map getCategoryTargetSum() { + return categoryTargetSum; + } + + /** */ + public void setCategoryTargetSum(Map categoryTargetSum) { + this.categoryTargetSum.putAll(categoryTargetSum); + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncoderPreprocessor.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncoderPreprocessor.java new file mode 100644 index 0000000000000..765fc8b792236 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncoderPreprocessor.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.preprocessing.encoding.target; + +import java.util.Set; +import org.apache.ignite.ml.math.primitives.vector.VectorUtils; +import org.apache.ignite.ml.preprocessing.Preprocessor; +import org.apache.ignite.ml.preprocessing.encoding.EncoderPreprocessor; +import org.apache.ignite.ml.structures.LabeledVector; + +/** + * Preprocessing function that makes Target encoding. + * + * The Target Encoder Preprocessor encodes string values (categories) to double values + * in range [0.0, 1], where the value will be presented as a regularized mean target value of + * a category. + * + * alpha = 1 / (1 + exp(-(categorySize - min_samples_leaf) / beta)) + * encodedValue = globalTargetMean * (1 - alpha) + categoryTargetMean * alpha + * if categorySize == 1 then use globalTargetMean + * + * min_samples_leaf - minimum samples to take category average into account. + * beta - smoothing effect to balance categorical average vs prior. Higher value means + * stronger regularization. + * + * ref: https://dl.acm.org/doi/10.1145/507533.507538 + * + *

+ * This preprocessor can transform multiple columns which indices are handled during training process. + * These indexes could be defined via .withEncodedFeature(featureIndex) call. + *

+ *

+ * NOTE: it does not add new column but change data in-place. + *

+ * + * @param Type of a key in {@code upstream} data. + * @param Type of a value in {@code upstream} data. + */ +public class TargetEncoderPreprocessor extends EncoderPreprocessor { + /** */ + protected static final long serialVersionUID = 6237711236382623481L; + + /** Filling values. */ + protected final TargetEncodingMeta[] targetCounters; + + /** + * Constructs a new instance of Frequency Encoder preprocessor. + * + * @param basePreprocessor Base preprocessor. + * @param handledIndices Handled indices. + */ + public TargetEncoderPreprocessor(TargetEncodingMeta[] targetCounters, + Preprocessor basePreprocessor, Set handledIndices) { + super(null, basePreprocessor, handledIndices); + this.targetCounters = targetCounters; + } + + /** + * Applies this preprocessor. + * + * @param k Key. + * @param v Value. + * @return Preprocessed row. + */ + @Override public LabeledVector apply(K k, V v) { + LabeledVector tmp = basePreprocessor.apply(k, v); + double[] res = new double[tmp.size()]; + + for (int i = 0; i < res.length; i++) { + Object tmpObj = tmp.getRaw(i); + + if (handledIndices.contains(i)) { + if (targetCounters[i].getCategoryMean().containsKey(tmpObj.toString())) { + res[i] = targetCounters[i].getCategoryMean().get(tmpObj.toString()); + } else { + res[i] = targetCounters[i].getGlobalMean(); + } + } else + res[i] = (double)tmpObj; + } + + return new LabeledVector(VectorUtils.of(res), tmp.label()); + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncodingMeta.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncodingMeta.java new file mode 100644 index 0000000000000..7bcd650caedcf --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/TargetEncodingMeta.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.preprocessing.encoding.target; + +import java.util.Collections; +import java.util.Map; + +/** + * Metadata for encode category. + */ +public class TargetEncodingMeta { + /** */ + private Double globalMean; + + /** */ + private Map categoryMean; + + /** */ + public TargetEncodingMeta withGlobalMean(Double globalMean) { + this.globalMean = globalMean; + + return this; + } + + /** */ + public TargetEncodingMeta withCategoryMean(Map categoryMean) { + this.categoryMean = categoryMean; + + return this; + } + + /** */ + public Double getGlobalMean() { + return globalMean; + } + + /** */ + public Map getCategoryMean() { + return Collections.unmodifiableMap(categoryMean); + } +} diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/package-info.java new file mode 100644 index 0000000000000..6ea3de6fea738 --- /dev/null +++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/target/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * + * Contains frequency encoding preprocessor. + */ +package org.apache.ignite.ml.preprocessing.encoding.target; diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/TargetEncoderPreprocessorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/TargetEncoderPreprocessorTest.java new file mode 100644 index 0000000000000..6fed4a654b805 --- /dev/null +++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/TargetEncoderPreprocessorTest.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.ml.preprocessing.encoding; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.HashSet; +import org.apache.ignite.ml.dataset.feature.extractor.Vectorizer; +import org.apache.ignite.ml.dataset.feature.extractor.impl.DummyVectorizer; +import org.apache.ignite.ml.math.primitives.vector.Vector; +import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetEncoderPreprocessor; +import org.apache.ignite.ml.preprocessing.encoding.target.TargetEncodingMeta; +import org.junit.Test; +import static org.junit.Assert.assertArrayEquals; + +/** + * Tests for {@link TargetEncoderPreprocessor}. + */ +public class TargetEncoderPreprocessorTest { + /** Tests {@code apply()} method. */ + @Test + public void testApply() { + Vector[] data = new Vector[] { + new DenseVector(new Serializable[] {"1", "Moscow", "A"}), + new DenseVector(new Serializable[] {"2", "Moscow", "B"}), + new DenseVector(new Serializable[] {"3", "Moscow", "B"}), + }; + + Vectorizer vectorizer = new DummyVectorizer<>(0, 1, 2); + + TargetEncoderPreprocessor preprocessor = new TargetEncoderPreprocessor<>( + new TargetEncodingMeta[]{ + // feature 0 + new TargetEncodingMeta() + .withGlobalMean(0.5) + .withCategoryMean(new HashMap() { + { + put("1", 1.0); // category "1" avg mean = 1.0 + put("2", 0.0); // category "2" avg mean = 0.0 + } + }), + // feature 1 + new TargetEncodingMeta() + .withGlobalMean(0.1) + .withCategoryMean(new HashMap() {}), + // feature 2 + new TargetEncodingMeta() + .withGlobalMean(0.1) + .withCategoryMean(new HashMap() { + { + put("A", 1.0); // category "A" avg mean 1.0 + put("B", 2.0); // category "B" avg mean 2.0 + } + }) + }, + vectorizer, + new HashSet() { + { + add(0); + add(1); + add(2); + } + }); + + double[][] postProcessedData = new double[][] { + { + 1.0, // "1" contains in dict => use category mean 1.0 + 0.1, // "Moscow" not contains in dict => use global 0.1 + 1.0 // "A" contains in dict => use category mean 1.0 + }, + { + 0.0, // "2" contains in dict => use category mean 0.0 + 0.1, // "Moscow" not contains in dict => use global 0.1 + 2.0 // "B" contains in dict => use category mean 2.0 + }, + { + 0.5, // "3" not contains in dict => use global mean 0.5 + 0.1, // "Moscow" not contains in dict => use global 0.1 + 2.0 // "B" contains in dict => use category mean 2.0 + }, + }; + + for (int i = 0; i < data.length; i++) + assertArrayEquals(postProcessedData[i], preprocessor.apply(i, data[i]).features().asArray(), 1e-8); + } +} diff --git a/modules/platforms/cpp/odbc-test/include/test_utils.h b/modules/platforms/cpp/odbc-test/include/test_utils.h index 1f2aeec79b7e2..6cac2afa15c49 100644 --- a/modules/platforms/cpp/odbc-test/include/test_utils.h +++ b/modules/platforms/cpp/odbc-test/include/test_utils.h @@ -121,18 +121,20 @@ namespace ignite_test * * @param handleType Type of the handle. * @param handle Handle. + * @param idx Error record index. * @return Error state. */ - std::string GetOdbcErrorState(SQLSMALLINT handleType, SQLHANDLE handle); + std::string GetOdbcErrorState(SQLSMALLINT handleType, SQLHANDLE handle, int idx = 1); /** * Extract error message. * * @param handleType Type of the handle. * @param handle Handle. + * @param idx Error record index. * @return Error message. */ - std::string GetOdbcErrorMessage(SQLSMALLINT handleType, SQLHANDLE handle); + std::string GetOdbcErrorMessage(SQLSMALLINT handleType, SQLHANDLE handle, int idx = 1); /** * @return Test config directory path. diff --git a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp index 0519f612bc30d..278a0664ec16c 100644 --- a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp @@ -243,4 +243,24 @@ BOOST_AUTO_TEST_CASE(ConnectionAttributeLoginTimeout) BOOST_REQUIRE_EQUAL(timeout, 42); } +/** + * Check that environment returns expected version of ODBC standard. + * + * 1. Start node. + * 2. Establish connection using ODBC driver. + * 3. Get current ODBC version from env handle. + * 4. Check that version is of the expected value. + */ +BOOST_AUTO_TEST_CASE(TestSQLGetEnvAttrDriverVersion) +{ + Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache"); + + SQLINTEGER version; + SQLRETURN ret = SQLGetEnvAttr(env, SQL_ATTR_ODBC_VERSION, &version, 0, 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env); + + BOOST_CHECK_EQUAL(version, SQL_OV_ODBC3); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/authentication_test.cpp b/modules/platforms/cpp/odbc-test/src/authentication_test.cpp index 5a77370b484e4..e082413eae9f4 100644 --- a/modules/platforms/cpp/odbc-test/src/authentication_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/authentication_test.cpp @@ -85,8 +85,6 @@ struct AuthenticationTestSuiteFixture : odbc::OdbcTestSuite */ static std::string MakeConnectionString(const std::string& user, const std::string& pass) { - std::string cfgDirPath = GetTestConfigDir(); - std::stringstream connectString; connectString << @@ -122,6 +120,95 @@ BOOST_AUTO_TEST_CASE(TestConnectionDefaultAuthSuccess) InsertTestBatch(11, 20, 9); } +/** + * Check that connection with UID and PWD arguments established successfully. + * + * 1. Start test node with configured authentication. + * 2. Establish connection using UID and PWD arguments. Check that it established successfully. + * 3. Check that connection can be used successfully for SQL insert and select operations. + */ +BOOST_AUTO_TEST_CASE(TestConnectionLegacyAuthSuccess) +{ + std::stringstream comp; + + comp << + "DRIVER={Apache Ignite};" + "ADDRESS=127.0.0.1:11110;" + "SCHEMA=cache;" + "UID=" << defaultUser << ";" + "PWD=" << defaultPass << ";"; + + std::string connStr = comp.str(); + + Connect(connStr); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + +/** + * Check that connection with UID, USER, PWD and PASSWORD arguments established successfully. + * + * 1. Start test node with configured authentication. + * 2. Establish connection using UID, USER, PWD and PASSWORD arguments. Check that it established successfully. + * 3. Check that connection returns warning that password and user arguments duplicated. + * 4. Check that connection can be used successfully for SQL insert and select operations. + */ +BOOST_AUTO_TEST_CASE(TestConnectionBothAuthSuccess) +{ + std::stringstream comp; + + comp << + "DRIVER={Apache Ignite};" + "ADDRESS=127.0.0.1:11110;" + "SCHEMA=cache;" + "UID=" << defaultUser << ";" + "PWD=" << defaultPass << ";" + "USER=" << defaultUser << ";" + "PASSWORD=" << defaultPass << ";"; + + std::string connStr = comp.str(); + + Prepare(); + + // Connect string + std::vector connectStr0(connStr.begin(), connStr.end()); + + SQLCHAR outstr[ODBC_BUFFER_SIZE]; + SQLSMALLINT outstrlen; + + // Connecting to ODBC server. + SQLRETURN ret = SQLDriverConnect(dbc, NULL, &connectStr0[0], static_cast(connectStr0.size()), + outstr, sizeof(outstr), &outstrlen, SQL_DRIVER_COMPLETE); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_DBC, dbc)); + + BOOST_CHECK_EQUAL(ret, SQL_SUCCESS_WITH_INFO); + + std::string message = GetOdbcErrorMessage(SQL_HANDLE_DBC, dbc); + + BOOST_CHECK(!message.empty()); + + BOOST_CHECK(message.find("01S02") != std::string::npos); + BOOST_CHECK(message.find("Re-writing PASSWORD (have you specified it several times?") != std::string::npos); + + message = GetOdbcErrorMessage(SQL_HANDLE_DBC, dbc, 2); + + BOOST_CHECK(!message.empty()); + + BOOST_CHECK(message.find("01S02") != std::string::npos); + BOOST_CHECK(message.find("Re-writing USER (have you specified it several times?") != std::string::npos); + + // Allocate a statement handle + SQLAllocHandle(SQL_HANDLE_STMT, dbc, &stmt); + + BOOST_REQUIRE(stmt != NULL); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + BOOST_AUTO_TEST_CASE(TestConnectionAuthReject) { std::string state = ExpectConnectionReject(MakeConnectionString("unknown", "unknown")); diff --git a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp index 04f76928b5082..05a0e46eb9a86 100644 --- a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp @@ -131,6 +131,212 @@ struct MetaQueriesTestSuiteFixture : public odbc::OdbcTestSuite cache2 = grid.GetCache("cache2"); } + /** + * Check result set column metadata using SQLDescribeCol. + * + * @param stmt Statement. + * @param idx Index. + * @param expName Expected name. + * @param expDataType Expected data type. + * @param expSize Expected column size. + * @param expScale Expected column scale. + * @param expNullability expected nullability. + */ + void CheckColumnMetaWithSQLDescribeCol(SQLHSTMT stmt, SQLUSMALLINT idx, const std::string& expName, + SQLSMALLINT expDataType, SQLULEN expSize, SQLSMALLINT expScale, SQLSMALLINT expNullability) + { + std::vector name(ODBC_BUFFER_SIZE); + SQLSMALLINT nameLen = 0; + SQLSMALLINT dataType = 0; + SQLULEN size; + SQLSMALLINT scale; + SQLSMALLINT nullability; + + SQLRETURN ret = SQLDescribeCol(stmt, idx, &name[0], (SQLSMALLINT)name.size(), &nameLen, &dataType, &size, &scale, &nullability); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_GE(nameLen, 0); + BOOST_CHECK_LE(nameLen, static_cast(ODBC_BUFFER_SIZE)); + + std::string nameStr(name.begin(), name.begin() + nameLen); + + BOOST_CHECK_EQUAL(nameStr, expName); + BOOST_CHECK_EQUAL(dataType, expDataType); + BOOST_CHECK_EQUAL(size, expSize); + BOOST_CHECK_EQUAL(scale, expScale); + BOOST_CHECK_EQUAL(nullability, expNullability); + } + + /** + * @param func Function to call before tests. May be PrepareQuery or ExecQuery. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Execute or prepare statement. + * 5. Check presicion and scale of every column using SQLDescribeCol. + */ + template + void CheckSQLDescribeColPrecisionAndScale(F func) + { + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=PUBLIC"); + + SQLRETURN ret = ExecQuery( + "create table TestScalePrecision(" + " id int primary key," + " dec1 decimal(3,0)," + " dec2 decimal(42,12)," + " dec3 decimal," + " char1 char(3)," + " char2 char(42)," + " char3 char," + " vchar varchar" + ")"); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = ExecQuery( + "insert into " + "TestScalePrecision(id, dec1, dec2, dec3, char1, char2, char3, vchar) " + "values (1, 12, 160.23, -1234.56789, 'TST', 'Lorem Ipsum', 'Some test value', 'Some test varchar')"); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = (this->*func)("select id, dec1, dec2, dec3, char1, char2, char3, vchar from PUBLIC.TestScalePrecision"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLSMALLINT columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 8); + + CheckColumnMetaWithSQLDescribeCol(stmt, 1, "ID", SQL_INTEGER, 10, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 2, "DEC1", SQL_DECIMAL, 3, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 3, "DEC2", SQL_DECIMAL, 42, 12, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 4, "DEC3", SQL_DECIMAL, 65535, 32767, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 5, "CHAR1", SQL_VARCHAR, 3, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 6, "CHAR2", SQL_VARCHAR, 42, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 7, "CHAR3", SQL_VARCHAR, 2147483647, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLDescribeCol(stmt, 8, "VCHAR", SQL_VARCHAR, 2147483647, 0, SQL_NULLABLE_UNKNOWN); + } + + /** + * Check result set column metadata using SQLColAttribute. + * + * @param stmt Statement. + * @param idx Index. + * @param expName Expected name. + * @param expDataType Expected data type. + * @param expSize Expected column size. + * @param expScale Expected column scale. + * @param expNullability expected nullability. + */ + void CheckColumnMetaWithSQLColAttribute(SQLHSTMT stmt, SQLUSMALLINT idx, const std::string& expName, + SQLLEN expDataType, SQLULEN expSize, SQLLEN expScale, SQLLEN expNullability) + { + std::vector name(ODBC_BUFFER_SIZE); + SQLSMALLINT nameLen = 0; + SQLLEN dataType = 0; + SQLLEN size; + SQLLEN scale; + SQLLEN nullability; + + SQLRETURN ret = SQLColAttribute(stmt, idx, SQL_DESC_NAME, &name[0], (SQLSMALLINT)name.size(), &nameLen, 0); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, idx, SQL_DESC_TYPE, 0, 0, 0, &dataType); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, idx, SQL_DESC_PRECISION, 0, 0, 0, &size); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, idx, SQL_DESC_SCALE, 0, 0, 0, &scale); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLColAttribute(stmt, idx, SQL_DESC_NULLABLE, 0, 0, 0, &nullability); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_GE(nameLen, 0); + BOOST_CHECK_LE(nameLen, static_cast(ODBC_BUFFER_SIZE)); + + std::string nameStr(name.begin(), name.begin() + nameLen); + + BOOST_CHECK_EQUAL(nameStr, expName); + BOOST_CHECK_EQUAL(dataType, expDataType); + BOOST_CHECK_EQUAL(size, expSize); + BOOST_CHECK_EQUAL(scale, expScale); + BOOST_CHECK_EQUAL(nullability, expNullability); + } + + /** + * @param func Function to call before tests. May be PrepareQuery or ExecQuery. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Execute or prepare statement. + * 5. Check presicion and scale of every column using SQLColAttribute. + */ + template + void CheckSQLColAttributePrecisionAndScale(F func) + { + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=PUBLIC"); + + SQLRETURN ret = ExecQuery( + "create table TestScalePrecision(" + " id int primary key," + " dec1 decimal(3,0)," + " dec2 decimal(42,12)," + " dec3 decimal," + " char1 char(3)," + " char2 char(42)," + " char3 char," + " vchar varchar" + ")"); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = ExecQuery( + "insert into " + "TestScalePrecision(id, dec1, dec2, dec3, char1, char2, char3, vchar) " + "values (1, 12, 160.23, -1234.56789, 'TST', 'Lorem Ipsum', 'Some test value', 'Some test varchar')"); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = (this->*func)("select id, dec1, dec2, dec3, char1, char2, char3, vchar from PUBLIC.TestScalePrecision"); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + SQLSMALLINT columnCount = 0; + + ret = SQLNumResultCols(stmt, &columnCount); + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + BOOST_CHECK_EQUAL(columnCount, 8); + + CheckColumnMetaWithSQLColAttribute(stmt, 1, "ID", SQL_INTEGER, 10, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 2, "DEC1", SQL_DECIMAL, 3, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 3, "DEC2", SQL_DECIMAL, 42, 12, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 4, "DEC3", SQL_DECIMAL, 65535, 32767, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 5, "CHAR1", SQL_VARCHAR, 3, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 6, "CHAR2", SQL_VARCHAR, 42, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 7, "CHAR3", SQL_VARCHAR, 2147483647, 0, SQL_NULLABLE_UNKNOWN); + CheckColumnMetaWithSQLColAttribute(stmt, 8, "VCHAR", SQL_VARCHAR, 2147483647, 0, SQL_NULLABLE_UNKNOWN); + } + /** * Destructor. */ @@ -670,4 +876,58 @@ BOOST_AUTO_TEST_CASE(TestSQLNumResultColsAfterSQLPrepare) BOOST_CHECK_EQUAL(columnCount, 4); } +/** + * Check that SQLDescribeCol return valid scale and precision for columns of different type after Prepare. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Prepare statement. + * 5. Check presicion and scale of every column using SQLDescribeCol. + */ +BOOST_AUTO_TEST_CASE(TestSQLDescribeColPrecisionAndScaleAfterPrepare) +{ + CheckSQLDescribeColPrecisionAndScale(&OdbcTestSuite::PrepareQuery); +} + +/** + * Check that SQLDescribeCol return valid scale and precision for columns of different type after Execute. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Execute statement. + * 5. Check presicion and scale of every column using SQLDescribeCol. */ +BOOST_AUTO_TEST_CASE(TestSQLDescribeColPrecisionAndScaleAfterExec) +{ + CheckSQLDescribeColPrecisionAndScale(&OdbcTestSuite::ExecQuery); +} + +/** + * Check that SQLColAttribute return valid scale and precision for columns of different type after Prepare. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Prepare statement. + * 5. Check presicion and scale of every column using SQLColAttribute. + */ +BOOST_AUTO_TEST_CASE(TestSQLColAttributePrecisionAndScaleAfterPrepare) +{ + CheckSQLColAttributePrecisionAndScale(&OdbcTestSuite::PrepareQuery); +} + +/** + * Check that SQLColAttribute return valid scale and precision for columns of different type after Execute. + * + * 1. Start node. + * 2. Connect to node using ODBC. + * 3. Create table with decimal and char columns with specified size and scale. + * 4. Execute statement. + * 5. Check presicion and scale of every column using SQLColAttribute. */ +BOOST_AUTO_TEST_CASE(TestSQLColAttributePrecisionAndScaleAfterExec) +{ + CheckSQLColAttributePrecisionAndScale(&OdbcTestSuite::ExecQuery); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/test_utils.cpp b/modules/platforms/cpp/odbc-test/src/test_utils.cpp index 6cdaed2a50d59..1519e738ede20 100644 --- a/modules/platforms/cpp/odbc-test/src/test_utils.cpp +++ b/modules/platforms/cpp/odbc-test/src/test_utils.cpp @@ -40,7 +40,7 @@ namespace ignite_test std::string(reinterpret_cast(message), reallen)); } - std::string GetOdbcErrorState(SQLSMALLINT handleType, SQLHANDLE handle) + std::string GetOdbcErrorState(SQLSMALLINT handleType, SQLHANDLE handle, int idx) { SQLCHAR sqlstate[7] = {}; SQLINTEGER nativeCode; @@ -48,12 +48,12 @@ namespace ignite_test SQLCHAR message[ODBC_BUFFER_SIZE]; SQLSMALLINT reallen = 0; - SQLGetDiagRec(handleType, handle, 1, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen); + SQLGetDiagRec(handleType, handle, idx, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen); return std::string(reinterpret_cast(sqlstate)); } - std::string GetOdbcErrorMessage(SQLSMALLINT handleType, SQLHANDLE handle) + std::string GetOdbcErrorMessage(SQLSMALLINT handleType, SQLHANDLE handle, int idx) { SQLCHAR sqlstate[7] = {}; SQLINTEGER nativeCode; @@ -61,7 +61,7 @@ namespace ignite_test SQLCHAR message[ODBC_BUFFER_SIZE]; SQLSMALLINT reallen = 0; - SQLGetDiagRec(handleType, handle, 1, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen); + SQLGetDiagRec(handleType, handle, idx, sqlstate, &nativeCode, message, ODBC_BUFFER_SIZE, &reallen); std::string res(reinterpret_cast(sqlstate)); diff --git a/modules/platforms/cpp/odbc-test/src/utility_test.cpp b/modules/platforms/cpp/odbc-test/src/utility_test.cpp index 7fe602c50b739..58469bd601aa9 100644 --- a/modules/platforms/cpp/odbc-test/src/utility_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/utility_test.cpp @@ -90,4 +90,68 @@ BOOST_AUTO_TEST_CASE(TestUtilityWriteReadString) BOOST_REQUIRE(outStr4.empty()); } +void CheckDecimalWriteRead(const std::string& val) +{ + using namespace ignite::impl::binary; + using namespace ignite::impl::interop; + using namespace ignite::common; + using namespace ignite::utility; + + InteropUnpooledMemory mem(1024); + InteropOutputStream outStream(&mem); + BinaryWriterImpl writer(&outStream, 0); + + Decimal decimal(val); + + WriteDecimal(writer, decimal); + + outStream.Synchronize(); + + InteropInputStream inStream(&mem); + BinaryReaderImpl reader(&inStream); + + Decimal out; + ReadDecimal(reader, out); + + std::stringstream converter; + converter << out; + + std::string res = converter.str(); + + BOOST_CHECK_EQUAL(res, val); +} + +/** + * Check that Decimal writing and reading works as expected. + * + * 1. Create Decimal value. + * 2. Write using standard serialization algorithm. + * 3. Read using standard de-serialization algorithm. + * 4. Check that initial and read value are equal. + * + * Repeat with the following values: 0, 1, -1, 0.1, -0.1, 42, -42, 160, -160, 34729864879625196, -34729864879625196, + * 3472986487.9625196, -3472986487.9625196, 3472.9864879625196, -3472.9864879625196, 0.34729864879625196, + * -0.34729864879625196 + */ +BOOST_AUTO_TEST_CASE(TestUtilityWriteReadDecimal) +{ + CheckDecimalWriteRead("0"); + CheckDecimalWriteRead("1"); + CheckDecimalWriteRead("-1"); + CheckDecimalWriteRead("0.1"); + CheckDecimalWriteRead("-0.1"); + CheckDecimalWriteRead("42"); + CheckDecimalWriteRead("-42"); + CheckDecimalWriteRead("160"); + CheckDecimalWriteRead("-160"); + CheckDecimalWriteRead("34729864879625196"); + CheckDecimalWriteRead("-34729864879625196"); + CheckDecimalWriteRead("3472986487.9625196"); + CheckDecimalWriteRead("-3472986487.9625196"); + CheckDecimalWriteRead("3472.9864879625196"); + CheckDecimalWriteRead("-3472.9864879625196"); + CheckDecimalWriteRead("0.34729864879625196"); + CheckDecimalWriteRead("-0.34729864879625196"); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h b/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h index 1fdedf653e6e3..de02cb45d94ec 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h @@ -98,6 +98,12 @@ namespace ignite /** Connection attribute keyword for password attribute. */ static const std::string password; + /** Connection attribute keyword for username attribute. */ + static const std::string uid; + + /** Connection attribute keyword for password attribute. */ + static const std::string pwd; + /** Connection attribute keyword for nestedTxMode attribute. */ static const std::string nestedTxMode; }; diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/dsn_config.h b/modules/platforms/cpp/odbc/include/ignite/odbc/dsn_config.h index dbad9b5b91e5f..640046f5d00c0 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/dsn_config.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/dsn_config.h @@ -53,9 +53,10 @@ namespace ignite * * @param dsn DSN name. * @param config Configuration. + * @param diag Diagnostic collector. */ - void ReadDsnConfiguration(const char* dsn, config::Configuration& config); + void ReadDsnConfiguration(const char* dsn, config::Configuration& config, diagnostic::DiagnosticRecordStorage *diag); } } -#endif //_IGNITE_ODBC_DSN_CONFIG \ No newline at end of file +#endif //_IGNITE_ODBC_DSN_CONFIG diff --git a/modules/platforms/cpp/odbc/os/win/src/system_dsn.cpp b/modules/platforms/cpp/odbc/os/win/src/system_dsn.cpp index 733d0cd871db7..0672911da8786 100644 --- a/modules/platforms/cpp/odbc/os/win/src/system_dsn.cpp +++ b/modules/platforms/cpp/odbc/os/win/src/system_dsn.cpp @@ -189,7 +189,7 @@ BOOL INSTAPI ConfigDSN(HWND hwndParent, WORD req, LPCSTR driver, LPCSTR attribut Configuration loaded(config); - ReadDsnConfiguration(dsn.c_str(), loaded); + ReadDsnConfiguration(dsn.c_str(), loaded, &diag); if (!DisplayConfigureDsnWindow(hwndParent, loaded)) return FALSE; @@ -218,4 +218,4 @@ BOOL INSTAPI ConfigDSN(HWND hwndParent, WORD req, LPCSTR driver, LPCSTR attribut } return TRUE; -} \ No newline at end of file +} diff --git a/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp b/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp index fb779f69ac783..a93e3b575bdb2 100644 --- a/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp +++ b/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp @@ -51,6 +51,8 @@ namespace ignite const std::string ConnectionStringParser::Key::sslCaFile = "ssl_ca_file"; const std::string ConnectionStringParser::Key::user = "user"; const std::string ConnectionStringParser::Key::password = "password"; + const std::string ConnectionStringParser::Key::uid = "uid"; + const std::string ConnectionStringParser::Key::pwd = "pwd"; const std::string ConnectionStringParser::Key::nestedTxMode = "nested_tx_mode"; ConnectionStringParser::ConnectionStringParser(Configuration& cfg): @@ -417,12 +419,24 @@ namespace ignite { cfg.SetDriver(value); } - else if (lKey == Key::user) + else if (lKey == Key::user || lKey == Key::uid) { + if (!cfg.GetUser().empty() && diag) + { + diag->AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Re-writing USER (have you specified it several times?"); + } + cfg.SetUser(value); } - else if (lKey == Key::password) + else if (lKey == Key::password || lKey == Key::pwd) { + if (!cfg.GetPassword().empty() && diag) + { + diag->AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Re-writing PASSWORD (have you specified it several times?"); + } + cfg.SetPassword(value); } else if (lKey == Key::nestedTxMode) diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index a5beb0ca224f8..b073580656c8a 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -119,7 +119,7 @@ namespace ignite { std::string dsn = config.GetDsn(); - ReadDsnConfiguration(dsn.c_str(), config); + ReadDsnConfiguration(dsn.c_str(), config, &GetDiagnosticRecords()); } return InternalEstablish(config); diff --git a/modules/platforms/cpp/odbc/src/dsn_config.cpp b/modules/platforms/cpp/odbc/src/dsn_config.cpp index dcdb8f4392b92..8f1a6df1616b8 100644 --- a/modules/platforms/cpp/odbc/src/dsn_config.cpp +++ b/modules/platforms/cpp/odbc/src/dsn_config.cpp @@ -72,10 +72,8 @@ namespace ignite std::string res(buf.GetData()); - if (res == unique) - return val; - - val.SetValue(res); + if (res != unique) + val.SetValue(res); return val; } @@ -104,7 +102,7 @@ namespace ignite return res; } - void ReadDsnConfiguration(const char* dsn, Configuration& config) + void ReadDsnConfiguration(const char* dsn, Configuration& config, diagnostic::DiagnosticRecordStorage* diag) { SettableValue address = ReadDsnString(dsn, ConnectionStringParser::Key::address); @@ -112,7 +110,7 @@ namespace ignite { std::vector endPoints; - ParseAddress(address.GetValue(), endPoints, 0); + ParseAddress(address.GetValue(), endPoints, diag); config.SetAddresses(endPoints); } @@ -219,4 +217,4 @@ namespace ignite config.SetNestedTxMode(NestedTxMode::FromString(nestedTxModeStr.GetValue(), config.GetNestedTxMode())); } } -} \ No newline at end of file +} diff --git a/modules/platforms/cpp/odbc/src/meta/column_meta.cpp b/modules/platforms/cpp/odbc/src/meta/column_meta.cpp index 476f6a6309c7a..203b4adf01827 100644 --- a/modules/platforms/cpp/odbc/src/meta/column_meta.cpp +++ b/modules/platforms/cpp/odbc/src/meta/column_meta.cpp @@ -163,7 +163,7 @@ namespace ignite if (scale == -1) return false; - value = common::LexicalCast(precision); + value = common::LexicalCast(scale); return true; } diff --git a/modules/platforms/cpp/odbc/src/odbc.cpp b/modules/platforms/cpp/odbc/src/odbc.cpp index b9107fcbb13ee..f297ec44e9935 100644 --- a/modules/platforms/cpp/odbc/src/odbc.cpp +++ b/modules/platforms/cpp/odbc/src/odbc.cpp @@ -325,7 +325,7 @@ namespace ignite LOG_MSG("DSN: " << dsn); - odbc::ReadDsnConfiguration(dsn.c_str(), config); + odbc::ReadDsnConfiguration(dsn.c_str(), config, &connection->GetDiagnosticRecords()); connection->Establish(config); @@ -1144,6 +1144,7 @@ namespace ignite using odbc::Environment; LOG_MSG("SQLSetEnvAttr called"); + LOG_MSG("Attribute: " << attr << ", Value: " << (size_t)value); Environment *environment = reinterpret_cast(env); @@ -1174,7 +1175,7 @@ namespace ignite return SQL_INVALID_HANDLE; SqlLen outResLen; - ApplicationDataBuffer outBuffer(OdbcNativeType::AI_DEFAULT, valueBuf, + ApplicationDataBuffer outBuffer(OdbcNativeType::AI_SIGNED_LONG, valueBuf, static_cast(valueBufLen), &outResLen); environment->GetAttribute(attr, outBuffer); diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index 9253030f839a8..cc508e3d70455 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -1096,6 +1096,8 @@ namespace ignite { const meta::ColumnMetaVector *meta = GetMeta(); + LOG_MSG("Collumn ID: " << colIdx << ", Attribute ID: " << attrId); + if (!meta) return SqlResult::AI_ERROR; diff --git a/modules/platforms/cpp/odbc/src/utility.cpp b/modules/platforms/cpp/odbc/src/utility.cpp index 498490c402dfb..c060a0adaca2a 100644 --- a/modules/platforms/cpp/odbc/src/utility.cpp +++ b/modules/platforms/cpp/odbc/src/utility.cpp @@ -111,10 +111,18 @@ namespace ignite unscaled.MagnitudeToBytes(magnitude); - if (unscaled.GetSign() == -1) - magnitude[0] |= -0x80; + int8_t addBit = unscaled.GetSign() == -1 ? -0x80 : 0; - writer.WriteInt32(magnitude.GetSize()); + if (magnitude[0] < 0) + { + writer.WriteInt32(magnitude.GetSize() + 1); + writer.WriteInt8(addBit); + } + else + { + writer.WriteInt32(magnitude.GetSize()); + magnitude[0] |= addBit; + } impl::binary::BinaryUtils::WriteInt8Array(writer.GetStream(), magnitude.GetData(), magnitude.GetSize()); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj index 19dcc801658d0..e7fb47cbe3d91 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj @@ -111,6 +111,7 @@ + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityBackupFilterTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityBackupFilterTest.cs new file mode 100644 index 0000000000000..fc72fe2d6694f --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityBackupFilterTest.cs @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Cache.Affinity +{ + using System.Collections.Generic; + using System.Linq; + using Apache.Ignite.Core.Cache.Affinity.Rendezvous; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Cluster; + using NUnit.Framework; + + /// + /// Tests for . + /// + public class AffinityBackupFilterTest + { + /** */ + private const string Rack = "Rack"; + + /** */ + private const string NodeIdx = "Node_Idx"; + + /// + /// Fixture set up. + /// + [TestFixtureSetUp] + public void FixtureSetUp() + { + for (int i = 0; i < 4; i++) + { + var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + UserAttributes = new Dictionary + { + {Rack, i < 2 ? 0 : 1}, + {NodeIdx, i} + }, + AutoGenerateIgniteInstanceName = true + }; + + Ignition.Start(cfg); + } + } + + /// + /// Fixture tear down. + /// + [TestFixtureTearDown] + public void FixtureTearDown() + { + Ignition.StopAll(true); + } + + /// + /// Tests that the presence of + /// affects backup nodes affinity assignment. + /// + [Test] + public void TestBackupFilterPlacesBackupsToDifferentRacks() + { + var ign = Ignition.GetAll().First(); + + var cacheCfg1 = new CacheConfiguration("c1") + { + Backups = 1 + }; + + var cacheCfg2 = new CacheConfiguration("c2") + { + Backups = 1, + AffinityFunction = new RendezvousAffinityFunction + { + Partitions = 12, + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = new[] {Rack} + } + } + }; + + var cache1 = ign.CreateCache(cacheCfg1); + var cache2 = ign.CreateCache(cacheCfg2); + + var aff = ign.GetAffinity(cache1.Name); + var aff2 = ign.GetAffinity(cache2.Name); + + var placement1 = GetPlacementString(aff.MapPartitionToPrimaryAndBackups(1)); + var placement2 = GetPlacementString(aff2.MapPartitionToPrimaryAndBackups(1)); + + Assert.AreEqual( + "Primary: Node 1 in Rack 0, Backup: Node 0 in Rack 0", + placement1, + "Without backup filter both backups are in the same rack."); + + Assert.AreEqual( + "Primary: Node 1 in Rack 0, Backup: Node 2 in Rack 1", + placement2, + "With backup filter backups are in different racks."); + } + + /// + /// Gets the placement string. + /// + private static string GetPlacementString(IList primaryAndBackup) + { + var primary = primaryAndBackup.First(); + var backup = primaryAndBackup.Last(); + + return string.Format( + "Primary: Node {0} in Rack {1}, Backup: Node {2} in Rack {3}", + primary.Attributes[NodeIdx], + primary.Attributes[Rack], + backup.Attributes[NodeIdx], + backup.Attributes[Rack]); + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs index 2edb31ea6639d..ca1aa756aff22 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionSpringTest.cs @@ -76,6 +76,24 @@ public void TestDynamicCache() ValidateAffinityFunction(Grid.GetCache("dyn-cache2-2")); } + /// + /// Tests that config is propagated from Spring XML to .NET object model. + /// + [Test] + public void TestSpringConfigPropagation() + { + var cfg = Grid.GetCache("cache-with-backup-filter").GetConfiguration(); + var aff = cfg.AffinityFunction as RendezvousAffinityFunction; + + Assert.IsNotNull(aff); + Assert.AreEqual(256, aff.Partitions); + + var filter = aff.AffinityBackupFilter as ClusterNodeAttributeAffinityBackupFilter; + + Assert.IsNotNull(filter); + Assert.AreEqual(new[]{"AVAILABILITY_ZONE", "REGION"}, filter.AttributeNames); + } + /// /// Validates the affinity function. /// @@ -183,4 +201,4 @@ public override IEnumerable> AssignPartitions(Affinity } } } -} \ No newline at end of file +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionTest.cs index 1cad49b2ed9a8..36396511b72b3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Affinity/AffinityFunctionTest.cs @@ -50,6 +50,9 @@ public class AffinityFunctionTest /** */ private const int PartitionCount = 10; + /** */ + private const string BackupFilterAttrName = "DC"; + /** */ private static readonly ConcurrentBag RemovedNodes = new ConcurrentBag(); @@ -76,12 +79,19 @@ public void FixtureSetUp() { AffinityFunction = new RendezvousAffinityFunctionEx {Bar = "test"} } - } + }, + UserAttributes = new Dictionary{{BackupFilterAttrName, 1}} }; _ignite = Ignition.Start(cfg); - _ignite2 = Ignition.Start(new IgniteConfiguration(TestUtils.GetTestConfiguration()) {IgniteInstanceName = "grid2"}); + var cfg2 = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + IgniteInstanceName = "grid2", + UserAttributes = new Dictionary{{BackupFilterAttrName, 2}} + }; + + _ignite2 = Ignition.Start(cfg2); AffinityTopologyVersion waitingTop = new AffinityTopologyVersion(2, 1); @@ -98,9 +108,10 @@ public void FixtureTearDown() try { // Check that affinity handles are present: - // TestDynamicCachePredefined and TestSimpleInheritance do not produce extra handles, so "-2" here. - TestUtils.AssertHandleRegistryHasItems(_ignite, _ignite.GetCacheNames().Count - 2, 0); - TestUtils.AssertHandleRegistryHasItems(_ignite2, _ignite.GetCacheNames().Count - 2, 0); + // TestDynamicCachePredefined, TestSimpleInheritance, TestSimpleInheritanceWithBackupFilter + // do not produce extra handles, so "-3" here. + TestUtils.AssertHandleRegistryHasItems(_ignite, _ignite.GetCacheNames().Count - 3, 0); + TestUtils.AssertHandleRegistryHasItems(_ignite2, _ignite.GetCacheNames().Count - 3, 0); // Destroy all caches _ignite.GetCacheNames().ToList().ForEach(_ignite.DestroyCache); @@ -109,7 +120,7 @@ public void FixtureTearDown() // Check that all affinity functions got released TestUtils.AssertHandleRegistryIsEmpty(1000, _ignite, _ignite2); } - finally + finally { Ignition.StopAll(true); } @@ -140,7 +151,7 @@ public void TestDynamicCache() })); VerifyCacheAffinity(_ignite2.GetCache(cacheName)); - + // Verify context for new cache var lastCtx = Contexts.Where(x => x.GetPreviousAssignment(1) == null) .OrderBy(x => x.DiscoveryEvent.Timestamp).Last(); @@ -259,7 +270,14 @@ public void TestInheritRendezvousAffinity() _ignite.GetCache(CacheNameRendezvous), _ignite.CreateCache(new CacheConfiguration(CacheNameRendezvous + "2") { - AffinityFunction = new RendezvousAffinityFunctionEx {Bar = "test"} + AffinityFunction = new RendezvousAffinityFunctionEx + { + Bar = "test", + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = new[] {BackupFilterAttrName} + } + } }) }; @@ -279,6 +297,17 @@ public void TestInheritRendezvousAffinity() // Check config var func = (RendezvousAffinityFunctionEx)cache.GetConfiguration().AffinityFunction; Assert.AreEqual("test", func.Bar); + + if (cache.Name == CacheNameRendezvous) + { + Assert.IsNull(func.AffinityBackupFilter); + } + else + { + var filter = func.AffinityBackupFilter as ClusterNodeAttributeAffinityBackupFilter; + Assert.IsNotNull(filter); + CollectionAssert.AreEqual(new[]{BackupFilterAttrName}, filter.AttributeNames); + } } } @@ -301,6 +330,81 @@ public void TestSimpleInheritance() Assert.AreEqual(4, aff.GetPartition(34)); } + /// + /// Tests the AffinityFunction with simple inheritance and a backup filter: none of the methods are overridden, + /// so there are no callbacks, and user object is not passed over the wire. + /// + [Test] + public void TestSimpleInheritanceWithBackupFilter() + { + var cache = _ignite.CreateCache(new CacheConfiguration(TestUtils.TestName) + { + AffinityFunction = new SimpleOverride + { + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = new[] {BackupFilterAttrName} + } + } + }); + + var aff = cache.GetConfiguration().AffinityFunction as RendezvousAffinityFunction; + Assert.IsNotNull(aff); + + var filter = aff.AffinityBackupFilter as ClusterNodeAttributeAffinityBackupFilter; + Assert.IsNotNull(filter); + CollectionAssert.AreEqual(new[] {BackupFilterAttrName}, filter.AttributeNames); + } + + /// + /// Tests that custom backup filters are not allowed. + /// + [Test] + public void TestCustomBackupFilterThrowsNotSupportedException() + { + var cfg = new CacheConfiguration(TestUtils.TestName) + { + AffinityFunction = new RendezvousAffinityFunction + { + AffinityBackupFilter = new CustomBackupFilter() + } + }; + + var ex = Assert.Throws(() => _ignite.CreateCache(cfg)); + + var expectedMessage = string.Format( + "Unsupported RendezvousAffinityFunction.AffinityBackupFilter: '{0}'. " + + "Only predefined implementations are supported: 'ClusterNodeAttributeAffinityBackupFilter'", + typeof(CustomBackupFilter).FullName); + + Assert.AreEqual(expectedMessage, ex.Message); + } + + /// + /// Tests that backup filter requires a non-empty attribute set. + /// + [Test] + public void TestBackupFilterWithNullAttributesThrowsException([Values(true, false)] bool nullOrEmpty) + { + var cfg = new CacheConfiguration(TestUtils.TestName) + { + AffinityFunction = new RendezvousAffinityFunction + { + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = nullOrEmpty ? null : new List() + } + } + }; + + var ex = Assert.Throws(() => _ignite.CreateCache(cfg)); + + var expectedMessage = + "'ClusterNodeAttributeAffinityBackupFilter.AttributeNames' argument should not be null or empty."; + + StringAssert.StartsWith(expectedMessage, ex.Message); + } + [Serializable] private class SimpleAffinityFunction : IAffinityFunction { @@ -412,5 +516,13 @@ public override int Partitions public override bool ExcludeNeighbors { get; set; } } + + /// + /// Custom backup filter. + /// + private class CustomBackupFilter : IAffinityBackupFilter + { + // No-op. + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs index 9d59a5036a02f..ed1b7e20ce231 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs @@ -131,10 +131,10 @@ public void TestDefaultsAreSameInJava() var springConfig = _ignite.GetCache(SpringCacheName).GetConfiguration(); var ignoredProps = new[] {"AffinityFunction"}; - + AssertExtensions.ReflectionEqual(springConfig, new CacheConfiguration(SpringCacheName), ignoredProperties: new HashSet(ignoredProps)); - + AssertConfigIsDefault(springConfig); } @@ -171,7 +171,7 @@ public void TestCreateFromConfiguration() // Can't create existing cache Assert.Throws(() => _ignite.CreateCache(cfg)); - + // Check put-get cache[1] = new Entity { Foo = 1 }; Assert.AreEqual(1, cache[1].Foo); @@ -284,7 +284,7 @@ private static void AssertConfigIsDefault(CacheConfiguration cfg) Assert.AreEqual(CacheConfiguration.DefaultReadThrough, cfg.ReadThrough); Assert.AreEqual(CacheConfiguration.DefaultCopyOnRead, cfg.CopyOnRead); Assert.AreEqual(CacheConfiguration.DefaultKeepBinaryInStore, cfg.KeepBinaryInStore); - Assert.AreEqual(CacheConfiguration.DefaultStoreConcurrentLoadAllThreshold, + Assert.AreEqual(CacheConfiguration.DefaultStoreConcurrentLoadAllThreshold, cfg.StoreConcurrentLoadAllThreshold); Assert.AreEqual(CacheConfiguration.DefaultRebalanceOrder, cfg.RebalanceOrder); Assert.AreEqual(CacheConfiguration.DefaultRebalanceBatchesPrefetchCount, cfg.RebalanceBatchesPrefetchCount); @@ -376,12 +376,33 @@ private static void AssertConfigsAreEqual(IAffinityFunction x, IAffinityFunction return; } - var px = (AffinityFunctionBase) x; - var py = (AffinityFunctionBase) y; + var px = (RendezvousAffinityFunction) x; + var py = (RendezvousAffinityFunction) y; Assert.AreEqual(px.GetType(), py.GetType()); Assert.AreEqual(px.Partitions, py.Partitions); Assert.AreEqual(px.ExcludeNeighbors, py.ExcludeNeighbors); + + AssertConfigsAreEqual(px.AffinityBackupFilter, py.AffinityBackupFilter); + } + + /// + /// Asserts that two configurations have the same properties. + /// + private static void AssertConfigsAreEqual(IAffinityBackupFilter x, IAffinityBackupFilter y) + { + if (x == null) + { + Assert.IsNull(y); + return; + } + + Assert.AreEqual(x.GetType(), y.GetType()); + + var fx = (ClusterNodeAttributeAffinityBackupFilter) x; + var fy = (ClusterNodeAttributeAffinityBackupFilter) y; + + Assert.AreEqual(fx.AttributeNames, fy.AttributeNames); } /// @@ -627,7 +648,7 @@ public static CacheConfiguration GetCustomCacheConfiguration(string name = null) TableName = "Table1", Fields = new[] { - new QueryField("length", typeof(int)), + new QueryField("length", typeof(int)), new QueryField("name", typeof(string)) {IsKeyField = true, DefaultValue = "defName"}, new QueryField("location", typeof(string)) {NotNull = true}, }, @@ -737,8 +758,8 @@ private static CacheConfiguration GetCustomCacheConfiguration2(string name = nul TableName = "MyTable", Fields = new[] { - new QueryField("length", typeof(int)) {DefaultValue = -1}, - new QueryField("name", typeof(string)), + new QueryField("length", typeof(int)) {DefaultValue = -1}, + new QueryField("name", typeof(string)), new QueryField("location", typeof(string)) {IsKeyField = true} }, Aliases = new [] {new QueryAlias("length", "len") }, @@ -768,12 +789,16 @@ private static CacheConfiguration GetCustomCacheConfiguration2(string name = nul MaxSize = 26, MaxMemorySize = 2501, BatchSize = 33 - }, + }, OnheapCacheEnabled = true, // Required with eviction policy AffinityFunction = new RendezvousAffinityFunction { Partitions = 113, - ExcludeNeighbors = false + ExcludeNeighbors = false, + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = new[] {"foo", "bar"} + } } }; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/Cache/Affinity/affinity-function.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/Cache/Affinity/affinity-function.xml index ca38fbe493793..70b071e25e3be 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/Cache/Affinity/affinity-function.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/Cache/Affinity/affinity-function.xml @@ -105,6 +105,24 @@ + + + + + + + + + + AVAILABILITY_ZONE + REGION + + + + + + + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml index 3ca0b1bcda393..bcc8347536866 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml @@ -20,9 +20,9 @@ 127.1.1.1 @@ -83,7 +83,14 @@ - + + + + foo1 + bar2 + + + @@ -130,11 +137,11 @@ - someId012 @@ -156,7 +163,7 @@ diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs index 2b26173c5be2c..a98cad9251028 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs @@ -186,6 +186,10 @@ public void TestPredefinedXml() Assert.AreEqual(99, af.Partitions); Assert.IsTrue(af.ExcludeNeighbors); + var afBf = af.AffinityBackupFilter as ClusterNodeAttributeAffinityBackupFilter; + Assert.IsNotNull(afBf); + Assert.AreEqual(new[] {"foo1", "bar2"}, afBf.AttributeNames); + var platformCacheConfiguration = cacheCfg.PlatformCacheConfiguration; Assert.AreEqual("int", platformCacheConfiguration.KeyTypeName); Assert.AreEqual("string", platformCacheConfiguration.ValueTypeName); @@ -787,7 +791,11 @@ private static IgniteConfiguration GetTestConfig() AffinityFunction = new RendezvousAffinityFunction { ExcludeNeighbors = true, - Partitions = 48 + Partitions = 48, + AffinityBackupFilter = new ClusterNodeAttributeAffinityBackupFilter + { + AttributeNames = new[] {"foo", "baz", "bar"} + } }, ExpiryPolicyFactory = new MyPolicyFactory(), EnableStatistics = true, diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index 58ea5f7aadb57..c315f08166b5b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -52,6 +52,8 @@ + + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/IAffinityBackupFilter.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/IAffinityBackupFilter.cs new file mode 100644 index 0000000000000..43bf6426b6f8f --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/IAffinityBackupFilter.cs @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Cache.Affinity +{ + using System.Diagnostics.CodeAnalysis; + using Apache.Ignite.Core.Cache.Affinity.Rendezvous; + + /// + /// Represents a backup filter for an affinity function - see + /// . + /// + /// Only one predefined implementation is supported for now: . + /// + [SuppressMessage("Microsoft.Design", "CA1040:AvoidEmptyInterfaces")] + public interface IAffinityBackupFilter + { + // No-op: custom implementations are not supported. + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/ClusterNodeAttributeAffinityBackupFilter.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/ClusterNodeAttributeAffinityBackupFilter.cs new file mode 100644 index 0000000000000..e87f9ed6c75fc --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/ClusterNodeAttributeAffinityBackupFilter.cs @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Cache.Affinity.Rendezvous +{ + using System.Collections.Generic; + using System.Diagnostics.CodeAnalysis; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Cluster; + + /// + /// Attribute-based affinity backup filter, see , + /// . + /// + /// This filter can be used to ensure that, for a given partition, primary and backup nodes are selected from + /// different racks in a datacenter, or from different availability zones in a cloud environment, so that + /// a single hardware failure does not cause data loss. + /// + /// This implementation will discard backups rather than place multiple on the same set of nodes. This avoids + /// trying to cram more data onto remaining nodes when some have failed. + /// + /// This class is constructed with a set of node attribute names, and a candidate node will be rejected if + /// *any* of the previously selected nodes for a partition have identical values for *all* of those attributes + /// on the candidate node. Another way to understand this is the set of attribute values defines the key of a + /// group into which a node is placed, and the primaries and backups for a partition cannot share nodes + /// in the same group. A null attribute is treated as a distinct value, so two nodes with a null attribute will + /// be treated as having the same value. + /// + /// For example, let's say Ignite cluster of 12 nodes is deployed into 3 racks - r1, r2, r3. Ignite nodes + /// have "SITE" attributes set accordingly to "r1", "r2", "r3". For a cache with 1 backup + /// ( set to 1), every partition is assigned to 2 nodes. + /// When the primary node has "SITE" attribute set to "r1", all other nodes with "SITE"="r1" are excluded + /// by this filter when selecting the backup node. + /// + public sealed class ClusterNodeAttributeAffinityBackupFilter : IAffinityBackupFilter + { + /// + /// Gets or sets the attribute names. + /// + [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")] + public ICollection AttributeNames { get; set; } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/RendezvousAffinityFunction.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/RendezvousAffinityFunction.cs index 98ec364559510..e1b46af054b33 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/RendezvousAffinityFunction.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Affinity/Rendezvous/RendezvousAffinityFunction.cs @@ -22,10 +22,22 @@ namespace Apache.Ignite.Core.Cache.Affinity.Rendezvous /// /// Affinity function for partitioned cache based on Highest Random Weight algorithm. /// + // Actual implementation of this class is in Java, see AffinityFunctionSerializer.Write method. [Serializable] public class RendezvousAffinityFunction : AffinityFunctionBase { - // No-op. - // Actual implementation is in Java, see AffinityFunctionSerializer.Write method. + /// + /// Gets or sets an optional backup filter. If provided, then backups will be selected from all nodes + /// that pass this filter. First node being passed to this filter is a node being tested, + /// and the second parameter is a list of nodes that are already assigned for a given partition + /// (primary node is the first in the list). + /// + /// Note that is ignored when + /// is true. + /// + /// Only one predefined implementation is supported for now: + /// . + /// + public IAffinityBackupFilter AffinityBackupFilter { get; set; } } } \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd index 986c39516e1f7..8bb6f877b9561 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd @@ -594,6 +594,32 @@ Affinity function to provide mapping from keys to nodes. + + + + Optional backup filter. If provided, then backups will be selected from all nodes that pass this filter. + + + + + + Attribute names for the backup filter. + + + + + + + + + + + Assembly-qualified type name. + + + + + Number of partitions. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs index 15239c8a4737c..980d338518172 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/Affinity/AffinityFunctionSerializer.cs @@ -29,6 +29,7 @@ namespace Apache.Ignite.Core.Impl.Cache.Affinity using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Impl.Binary; using Apache.Ignite.Core.Impl.Binary.IO; + using Apache.Ignite.Core.Impl.Common; using Apache.Ignite.Core.Impl.Memory; /// @@ -77,6 +78,8 @@ internal static void Write(IBinaryRawWriter writer, IAffinityFunction fun, objec // Do not write user func if there is nothing overridden WriteUserFunc(writer, overrideFlags != UserOverrides.None ? fun : null, userFuncOverride); + + WriteBackupFilter(writer, p); } else { @@ -114,30 +117,25 @@ internal static IAffinityFunction Read(IBinaryRawReader reader) { rendezvous.Partitions = partitions; rendezvous.ExcludeNeighbors = exclNeighbors; + rendezvous.AffinityBackupFilter = ReadBackupFilter(reader); } return userFunc; } Debug.Assert(overrideFlags == UserOverrides.None); - AffinityFunctionBase fun; - switch (typeCode) - { - case TypeCodeRendezvous: - fun = new RendezvousAffinityFunction(); - break; - default: - throw new InvalidOperationException("Invalid AffinityFunction type code: " + typeCode); - } + if (typeCode != TypeCodeRendezvous) + throw new InvalidOperationException("Invalid AffinityFunction type code: " + typeCode); - fun.Partitions = partitions; - fun.ExcludeNeighbors = exclNeighbors; - - return fun; + return new RendezvousAffinityFunction + { + Partitions = partitions, + ExcludeNeighbors = exclNeighbors, + AffinityBackupFilter = ReadBackupFilter(reader) + }; } - /// /// Writes the partitions assignment to a stream. /// @@ -244,6 +242,61 @@ private static void WriteUserFunc(IBinaryRawWriter writer, IAffinityFunction fun writer.WriteObject(func); } + /// + /// Reads the backup filter. + /// + private static ClusterNodeAttributeAffinityBackupFilter ReadBackupFilter(IBinaryRawReader reader) + { + var attrCount = reader.ReadInt(); + + if (attrCount <= 0) + { + return null; + } + + var attrs = new string[attrCount]; + + for (var i = 0; i < attrCount; i++) + { + attrs[i] = reader.ReadString(); + } + + return new ClusterNodeAttributeAffinityBackupFilter{AttributeNames = attrs}; + } + + /// + /// Writes the backup filter. + /// + private static void WriteBackupFilter(IBinaryRawWriter writer, RendezvousAffinityFunction func) + { + if (func.AffinityBackupFilter == null) + { + writer.WriteInt(-1); + return; + } + + var filter = func.AffinityBackupFilter as ClusterNodeAttributeAffinityBackupFilter; + + if (filter == null) + { + throw new NotSupportedException(string.Format( + "Unsupported RendezvousAffinityFunction.AffinityBackupFilter: '{0}'. " + + "Only predefined implementations are supported: '{1}'", + func.AffinityBackupFilter.GetType().FullName, + typeof(ClusterNodeAttributeAffinityBackupFilter).Name)); + } + + IgniteArgumentCheck.NotNullOrEmpty(filter.AttributeNames, + "ClusterNodeAttributeAffinityBackupFilter.AttributeNames"); + + writer.WriteInt(filter.AttributeNames.Count); + + foreach (var attr in filter.AttributeNames) + { + writer.WriteString(attr); + } + } + /// /// Overridden function flags. /// diff --git a/pom.xml b/pom.xml index 5b410fed6e023..d86756a6b582f 100644 --- a/pom.xml +++ b/pom.xml @@ -83,6 +83,7 @@ modules/ml modules/ml/spark-model-parser modules/ml/xgboost-model-parser + modules/ml/catboost-model-parser modules/ml/h2o-model-parser modules/opencensus modules/control-utility