Skip to content

Commit d1910cf

Browse files
jvareninaMario Kevo
authored andcommitted
GEODE-10401: Configurable .drf recovery HashMap overflow threshold (#7828)
Configurable with the jvm parameter: gemfire.disk.drfHashMapOverflowThreshold Default value: 805306368 When configured threshold value is reached, then server will overflow to the new hashmap during the recovery of .drf files. Warning: If you set threshold parameter over 805306368, then uneeded delay will happen due to bug in fastutil dependency.
1 parent bc47b30 commit d1910cf

2 files changed

Lines changed: 107 additions & 16 deletions

File tree

geode-core/src/main/java/org/apache/geode/internal/cache/DiskStoreImpl.java

Lines changed: 45 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -180,13 +180,24 @@ public class DiskStoreImpl implements DiskStore {
180180
public static final String RECOVER_VALUES_SYNC_PROPERTY_NAME =
181181
GeodeGlossary.GEMFIRE_PREFIX + "disk.recoverValuesSync";
182182

183+
/**
184+
* When configured threshold value is reached, then server will overflow to
185+
* the new hashmap during the recovery of .drf files
186+
*/
187+
public static final String DRF_HASHMAP_OVERFLOW_THRESHOLD_NAME =
188+
GeodeGlossary.GEMFIRE_PREFIX + "disk.drfHashMapOverflowThreshold";
189+
183190
/**
184191
* Allows recovering values for LRU regions. By default values are not recovered for LRU regions
185192
* during recovery.
186193
*/
187194
public static final String RECOVER_LRU_VALUES_PROPERTY_NAME =
188195
GeodeGlossary.GEMFIRE_PREFIX + "disk.recoverLruValues";
189196

197+
static final long DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT = 805306368;
198+
static final long DRF_HASHMAP_OVERFLOW_THRESHOLD =
199+
Long.getLong(DRF_HASHMAP_OVERFLOW_THRESHOLD_NAME, DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT);
200+
190201
boolean RECOVER_VALUES = getBoolean(DiskStoreImpl.RECOVER_VALUE_PROPERTY_NAME, true);
191202

192203
boolean RECOVER_VALUES_SYNC = getBoolean(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, false);
@@ -3525,31 +3536,49 @@ public void add(long id) {
35253536
}
35263537

35273538
try {
3528-
if (id > 0 && id <= 0x00000000FFFFFFFFL) {
3529-
currentInts.get().add((int) id);
3539+
if (shouldOverflow(id)) {
3540+
overflowToNewHashMap(id);
35303541
} else {
3531-
currentLongs.get().add(id);
3542+
if (id > 0 && id <= 0x00000000FFFFFFFFL) {
3543+
this.currentInts.get().add((int) id);
3544+
} else {
3545+
this.currentLongs.get().add(id);
3546+
}
35323547
}
35333548
} catch (IllegalArgumentException illegalArgumentException) {
35343549
// See GEODE-8029.
3535-
// Too many entries on the accumulated drf files, overflow and continue.
3550+
// Too many entries on the accumulated drf files, overflow next [Int|Long]OpenHashSet and
3551+
// continue.
3552+
overflowToNewHashMap(id);
3553+
}
3554+
}
3555+
3556+
boolean shouldOverflow(final long id) {
3557+
if (id > 0 && id <= 0x00000000FFFFFFFFL) {
3558+
return currentInts.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
3559+
} else {
3560+
return currentLongs.get().size() == DRF_HASHMAP_OVERFLOW_THRESHOLD;
3561+
}
3562+
}
3563+
3564+
void overflowToNewHashMap(final long id) {
3565+
if (DRF_HASHMAP_OVERFLOW_THRESHOLD == DRF_HASHMAP_OVERFLOW_THRESHOLD_DEFAULT) {
35363566
logger.warn(
35373567
"There is a large number of deleted entries within the disk-store, please execute an offline compaction.");
3568+
}
35383569

3539-
// Overflow to the next [Int|Long]OpenHashSet and continue.
3540-
if (id > 0 && id <= 0x00000000FFFFFFFFL) {
3541-
IntOpenHashSet overflownHashSet = new IntOpenHashSet((int) INVALID_ID);
3542-
allInts.add(overflownHashSet);
3543-
currentInts.set(overflownHashSet);
3570+
if (id > 0 && id <= 0x00000000FFFFFFFFL) {
3571+
IntOpenHashSet overflownHashSet = new IntOpenHashSet((int) INVALID_ID);
3572+
allInts.add(overflownHashSet);
3573+
currentInts.set(overflownHashSet);
35443574

3545-
currentInts.get().add((int) id);
3546-
} else {
3547-
LongOpenHashSet overflownHashSet = new LongOpenHashSet((int) INVALID_ID);
3548-
allLongs.add(overflownHashSet);
3549-
currentLongs.set(overflownHashSet);
3575+
currentInts.get().add((int) id);
3576+
} else {
3577+
LongOpenHashSet overflownHashSet = new LongOpenHashSet((int) INVALID_ID);
3578+
allLongs.add(overflownHashSet);
3579+
currentLongs.set(overflownHashSet);
35503580

3551-
currentLongs.get().add(id);
3552-
}
3581+
currentLongs.get().add(id);
35533582
}
35543583
}
35553584

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
3+
* agreements. See the NOTICE file distributed with this work for additional information regarding
4+
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
5+
* "License"); you may not use this file except in compliance with the License. You may obtain a
6+
* copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software distributed under the License
11+
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12+
* or implied. See the License for the specific language governing permissions and limitations under
13+
* the License.
14+
*/
15+
package org.apache.geode.internal.cache;
16+
17+
import static org.assertj.core.api.Assertions.assertThat;
18+
19+
import java.util.ArrayList;
20+
import java.util.Collections;
21+
import java.util.List;
22+
import java.util.stream.IntStream;
23+
import java.util.stream.LongStream;
24+
25+
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
26+
import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
27+
import org.junit.jupiter.api.Test;
28+
import org.junitpioneer.jupiter.SetSystemProperty;
29+
30+
import org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet;
31+
32+
/**
33+
* Tests DiskStoreImpl.OplogEntryIdSet
34+
*/
35+
public class OplogEntryIdSetDrfHashSetThresholdTest {
36+
@Test
37+
@SetSystemProperty(key = "gemfire.disk.drfHashMapOverflowThreshold", value = "10")
38+
public void addMethodOverflowBasedOnDrfOverflowThresholdParameters() {
39+
40+
int testEntries = 41;
41+
IntOpenHashSet intOpenHashSet = new IntOpenHashSet();
42+
LongOpenHashSet longOpenHashSet = new LongOpenHashSet();
43+
44+
List<IntOpenHashSet> intOpenHashSets =
45+
new ArrayList<>(Collections.singletonList(intOpenHashSet));
46+
List<LongOpenHashSet> longOpenHashSets =
47+
new ArrayList<>(Collections.singletonList(longOpenHashSet));
48+
49+
OplogEntryIdSet oplogEntryIdSet = new OplogEntryIdSet(intOpenHashSets, longOpenHashSets);
50+
IntStream.range(1, testEntries).forEach(oplogEntryIdSet::add);
51+
LongStream.range(0x00000000FFFFFFFFL + 1, 0x00000000FFFFFFFFL + testEntries)
52+
.forEach(oplogEntryIdSet::add);
53+
54+
assertThat(intOpenHashSets).hasSize(4);
55+
assertThat(longOpenHashSets).hasSize(4);
56+
57+
IntStream.range(1, testEntries).forEach(i -> assertThat(oplogEntryIdSet.contains(i)).isTrue());
58+
LongStream.range(0x00000000FFFFFFFFL + 1, 0x00000000FFFFFFFFL + testEntries)
59+
.forEach(i -> assertThat(oplogEntryIdSet.contains(i)).isTrue());
60+
61+
}
62+
}

0 commit comments

Comments
 (0)