id
stringlengths 33
40
| content
stringlengths 662
61.5k
| max_stars_repo_path
stringlengths 85
97
|
---|---|---|
bugs-dot-jar_data_OAK-2389_8079f7b5 | ---
BugID: OAK-2389
Summary: issues with JsopBuilder.encode and .escape
Description: |-
1) escape() escapes many characters that do not need to be escaped (>127)
2) encode() does not encode many control characters that would need to be escaped when read through a JSON parser.
diff --git a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
index a9a8b9a..8489197 100644
--- a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
+++ b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
@@ -285,6 +285,7 @@ public class JsopBuilder implements JsopWriter {
private static void escape(String s, int length, StringBuilder buff) {
for (int i = 0; i < length; i++) {
char c = s.charAt(i);
+ int ic = (int)c;
switch (c) {
case '"':
// quotation mark
@@ -316,10 +317,18 @@ public class JsopBuilder implements JsopWriter {
break;
default:
if (c < ' ') {
- buff.append("\\u00");
- // guaranteed to be 1 or 2 hex digits only
- buff.append(Character.forDigit(c >>> 4, 16));
- buff.append(Character.forDigit(c & 15, 16));
+ buff.append(String.format("\\u%04x", ic));
+ } else if (ic >= 0xD800 && ic <= 0xDBFF) {
+ // isSurrogate(), only available in Java 7
+ if (i < length - 1 && Character.isSurrogatePair(c, s.charAt(i + 1))) {
+ // ok surrogate
+ buff.append(c);
+ buff.append(s.charAt(i + 1));
+ i += 1;
+ } else {
+ // broken surrogate -> escape
+ buff.append(String.format("\\u%04x", ic));
+ }
} else {
buff.append(c);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2389_8079f7b5.diff |
bugs-dot-jar_data_OAK-4036_f4324736 | ---
BugID: OAK-4036
Summary: LuceneIndexProviderService may miss on registering PreExtractedTextProvider
Description: "{{LuceneIndexProviderService}} has an optional dependency on {{PreExtractedTextProvider}}.
In such a case it can happen that bind for the provided is invoked before the activate
is called. In such a case the provider would not be registered."
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
index c8f0cfe..29a893a 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexProviderService.java
@@ -190,7 +190,7 @@ public class LuceneIndexProviderService {
private IndexAugmentorFactory augmentorFactory;
@Reference(policy = ReferencePolicy.DYNAMIC,
- cardinality = ReferenceCardinality.OPTIONAL_MULTIPLE,
+ cardinality = ReferenceCardinality.OPTIONAL_UNARY,
policyOption = ReferencePolicyOption.GREEDY
)
private volatile PreExtractedTextProvider extractedTextProvider;
@@ -441,7 +441,9 @@ public class LuceneIndexProviderService {
PROP_EXTRACTED_TEXT_CACHE_EXPIRY_DEFAULT);
extractedTextCache = new ExtractedTextCache(cacheSizeInMB * ONE_MB, cacheExpiryInSecs);
-
+ if (extractedTextProvider != null){
+ registerExtractedTextProvider(extractedTextProvider);
+ }
CacheStats stats = extractedTextCache.getCacheStats();
if (stats != null){
oakRegs.add(registerMBean(whiteboard,
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4036_f4324736.diff |
bugs-dot-jar_data_OAK-2999_3bf07779 | ---
BugID: OAK-2999
Summary: Index updation fails on updating multivalued property
Description: "On emptying a multivalued property, fulltext index updation fails and
one can search on old values. Following test demonstrates the issue.\nAdded below
test in [LuceneIndexQueryTest.java|https://github.com/apache/jackrabbit-oak/blob/trunk/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexQueryTest.java]
which should pass - \n{code}\n @Test\n public void testMultiValuedPropUpdate()
throws Exception {\n Tree test = root.getTree(\"/\").addChild(\"test\");\n
\ String child = \"child\";\n String mulValuedProp = \"prop\";\n test.addChild(child).setProperty(mulValuedProp,
of(\"foo\",\"bar\"), Type.STRINGS);\n root.commit();\n assertQuery(\n
\ \"/jcr:root//*[jcr:contains(@\" + mulValuedProp + \", 'foo')]\",\n
\ \"xpath\", ImmutableList.of(\"/test/\" + child));\n test.getChild(child).setProperty(mulValuedProp,
new ArrayList<String>(), Type.STRINGS);\n root.commit();\n assertQuery(\n
\ \"/jcr:root//*[jcr:contains(@\" + mulValuedProp + \", 'foo')]\",\n
\ \"xpath\", new ArrayList<String>());\n\n test.getChild(child).setProperty(mulValuedProp,
of(\"bar\"), Type.STRINGS);\n root.commit();\n assertQuery(\n \"/jcr:root//*[jcr:contains(@\"
+ mulValuedProp + \", 'foo')]\",\n \"xpath\", new ArrayList<String>());\n\n
\ }\n{code}"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
index 2023f2d..0f96b8a 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
@@ -100,6 +100,8 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
private boolean propertiesChanged = false;
+ private List<PropertyState> propertiesModified = Lists.newArrayList();
+
private final NodeState root;
/**
@@ -222,12 +224,14 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
@Override
public void propertyChanged(PropertyState before, PropertyState after) {
markPropertyChanged(before.getName());
+ propertiesModified.add(before);
checkAggregates(before.getName());
}
@Override
public void propertyDeleted(PropertyState before) {
markPropertyChanged(before.getName());
+ propertiesModified.add(before);
checkAggregates(before.getName());
}
@@ -332,6 +336,11 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
dirty |= indexAggregates(path, fields, state);
dirty |= indexNullCheckEnabledProps(path, fields, state);
dirty |= indexNotNullCheckEnabledProps(path, fields, state);
+
+ // Check if a node having a single property was modified/deleted
+ if (!dirty) {
+ dirty = indexIfSinglePropertyRemoved();
+ }
if (isUpdate && !dirty) {
// updated the state but had no relevant changes
@@ -578,7 +587,22 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
}
return fieldAdded;
}
-
+
+ private boolean indexIfSinglePropertyRemoved() {
+ boolean dirty = false;
+ for (PropertyState ps : propertiesModified) {
+ PropertyDefinition pd = indexingRule.getConfig(ps.getName());
+ if (pd != null
+ && pd.index
+ && (pd.includePropertyType(ps.getType().tag())
+ || indexingRule.includePropertyType(ps.getType().tag()))) {
+ dirty = true;
+ break;
+ }
+ }
+ return dirty;
+ }
+
/**
* Determine if the property as defined by PropertyDefinition exists or not.
*
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2999_3bf07779.diff |
bugs-dot-jar_data_OAK-1789_08ba79d4 | ---
BugID: OAK-1789
Summary: Upgraded version history has UUIDs as jcr:frozenUuid of non-referenceable
nodes
Description: |-
In Jackrabbit Classic each node, even non-referenceable ones, has a UUID as its identifier, and thus the {{jcr:frozenUuid}} properties of frozen nodes are always UUIDs. In contrast Oak uses path identifiers for non-referenceable frozen nodes (see OAK-1009), which presents a problem when dealing with version histories migrated from Jackrabbit Classic.
To avoid this mismatch, the upgrade code should check each frozen node for referenceability and replace the frozen UUID with a path identifier if needed.
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
index ffe9e8e..bc60aa8 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
@@ -18,16 +18,25 @@ package org.apache.jackrabbit.oak.upgrade;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.collect.Iterables.addAll;
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Lists.newArrayListWithCapacity;
import static com.google.common.collect.Maps.newHashMap;
import static com.google.common.collect.Maps.newLinkedHashMap;
+import static com.google.common.collect.Sets.newHashSet;
import static com.google.common.collect.Sets.newLinkedHashSet;
+import static org.apache.jackrabbit.JcrConstants.JCR_FROZENMIXINTYPES;
+import static org.apache.jackrabbit.JcrConstants.JCR_FROZENPRIMARYTYPE;
+import static org.apache.jackrabbit.JcrConstants.JCR_FROZENUUID;
import static org.apache.jackrabbit.JcrConstants.JCR_MIXINTYPES;
import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
import static org.apache.jackrabbit.JcrConstants.JCR_UUID;
import static org.apache.jackrabbit.JcrConstants.MIX_REFERENCEABLE;
+import static org.apache.jackrabbit.JcrConstants.NT_FROZENNODE;
import static org.apache.jackrabbit.JcrConstants.NT_UNSTRUCTURED;
+import static org.apache.jackrabbit.oak.api.Type.NAME;
+import static org.apache.jackrabbit.oak.api.Type.NAMES;
+import static org.apache.jackrabbit.oak.api.Type.STRING;
import static org.apache.jackrabbit.oak.plugins.tree.TreeConstants.OAK_CHILD_ORDER;
import java.io.ByteArrayInputStream;
@@ -97,6 +106,8 @@ class JackrabbitNodeState extends AbstractNodeState {
private final TypePredicate isOrderable;
+ private final TypePredicate isFrozenNode;
+
/**
* Source namespace mappings (URI -< prefix).
*/
@@ -116,6 +127,7 @@ class JackrabbitNodeState extends AbstractNodeState {
this.loader = parent.loader;
this.isReferenceable = parent.isReferenceable;
this.isOrderable = parent.isOrderable;
+ this.isFrozenNode = parent.isFrozenNode;
this.uriToPrefix = parent.uriToPrefix;
this.nodes = createNodes(bundle);
this.properties = createProperties(bundle);
@@ -133,6 +145,7 @@ class JackrabbitNodeState extends AbstractNodeState {
this.loader = new BundleLoader(source);
this.isReferenceable = new TypePredicate(root, MIX_REFERENCEABLE);
this.isOrderable = TypePredicate.isOrderable(root);
+ this.isFrozenNode = new TypePredicate(root, NT_FROZENNODE);
this.uriToPrefix = uriToPrefix;
try {
NodePropBundle bundle = loader.loadBundle(id);
@@ -239,7 +252,7 @@ class JackrabbitNodeState extends AbstractNodeState {
return children;
}
- public Map<String, PropertyState> createProperties(NodePropBundle bundle) {
+ private Map<String, PropertyState> createProperties(NodePropBundle bundle) {
Map<String, PropertyState> properties = newHashMap();
String primary;
@@ -290,6 +303,32 @@ class JackrabbitNodeState extends AbstractNodeState {
}
}
+ // OAK-1789: Convert the jcr:frozenUuid of a non-referenceable
+ // frozen node from UUID to a path identifier
+ PropertyState frozenUuid = properties.get(JCR_FROZENUUID);
+ if (frozenUuid != null
+ && frozenUuid.getType() == STRING
+ && isFrozenNode.apply(primary, mixins)) {
+ String frozenPrimary = NT_UNSTRUCTURED;
+ Set<String> frozenMixins = newHashSet();
+
+ PropertyState property = properties.get(JCR_FROZENPRIMARYTYPE);
+ if (property != null && property.getType() == NAME) {
+ primary = property.getValue(NAME);
+ }
+ property = properties.get(JCR_FROZENMIXINTYPES);
+ if (property != null && property.getType() == NAMES) {
+ addAll(frozenMixins, property.getValue(NAMES));
+ }
+
+ if (!isReferenceable.apply(frozenPrimary, frozenMixins)) {
+ frozenUuid = PropertyStates.createProperty(
+ JCR_FROZENUUID,
+ parent.getString(JCR_FROZENUUID) + "/" + name);
+ properties.put(JCR_FROZENUUID, frozenUuid);
+ }
+ }
+
return properties;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1789_08ba79d4.diff |
bugs-dot-jar_data_OAK-4153_9120fd1b | ---
BugID: OAK-4153
Summary: segment's compareAgainstBaseState wont call childNodeDeleted when deleting
last and adding n nodes
Description: |-
{{SegmentNodeState.compareAgainstBaseState}} fails to call {{NodeStateDiff.childNodeDeleted}} when for the same parent the only child is deleted and at the same time multiple new, different children are added.
Reason is that the [current code|https://github.com/apache/jackrabbit-oak/blob/a9ce70b61567ffe27529dad8eb5d38ced77cf8ad/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java#L558] for '{{afterChildName == MANY_CHILD_NODES}}' *and* '{{beforeChildName == ONE_CHILD_NODE}}' does not handle all cases: it assumes that 'after' contains the 'before' child and doesn't handle the situation where the 'before' child has gone.
diff --git a/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java b/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
index cb42af2..6df8c38 100644
--- a/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
+++ b/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
@@ -555,9 +555,11 @@ public class SegmentNodeState extends Record implements NodeState {
}
}
} else if (beforeChildName != Template.MANY_CHILD_NODES) {
+ boolean beforeChildStillExists = false;
for (ChildNodeEntry entry
: afterTemplate.getChildNodeEntries(afterId)) {
String childName = entry.getName();
+ beforeChildStillExists |= childName.equals(beforeChildName);
NodeState afterChild = entry.getNodeState();
if (beforeChildName.equals(childName)) {
NodeState beforeChild =
@@ -577,6 +579,13 @@ public class SegmentNodeState extends Record implements NodeState {
return false;
}
}
+ if (!beforeChildStillExists) {
+ NodeState beforeChild =
+ beforeTemplate.getChildNode(beforeChildName, beforeId);
+ if (!diff.childNodeDeleted(beforeChildName, beforeChild)) {
+ return false;
+ }
+ }
} else {
MapRecord afterMap = afterTemplate.getChildNodeMap(afterId);
MapRecord beforeMap = beforeTemplate.getChildNodeMap(beforeId);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4153_9120fd1b.diff |
bugs-dot-jar_data_OAK-2363_90ea7aa5 | ---
BugID: OAK-2363
Summary: NPE in DocumentNodeStore#retrieve for non existing checkpoint
Description: 'Said method throws a NPE when passing it a valid revision identifier
from a non existing checkpoint. '
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 4efc6cb..5c519c2 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -39,6 +39,7 @@ import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
+import java.util.SortedMap;
import java.util.TimeZone;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
@@ -70,6 +71,7 @@ import org.apache.jackrabbit.oak.commons.json.JsopReader;
import org.apache.jackrabbit.oak.commons.json.JsopTokenizer;
import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob;
import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector;
+import org.apache.jackrabbit.oak.plugins.document.Checkpoints.Info;
import org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobReferenceIterator;
import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCache;
@@ -1404,7 +1406,8 @@ public final class DocumentNodeStore
@Override
public NodeState retrieve(@Nonnull String checkpoint) {
Revision r = Revision.fromString(checkpoint);
- if (checkpoints.getCheckpoints().containsKey(r)) {
+ SortedMap<Revision, Info> checkpoints = this.checkpoints.getCheckpoints();
+ if (checkpoints != null && checkpoints.containsKey(r)) {
return getRoot(r);
} else {
return null;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2363_90ea7aa5.diff |
bugs-dot-jar_data_OAK-3333_194999ed | ---
BugID: OAK-3333
Summary: SplitOperations purges _commitRoot entries too eagerly
Description: |-
OAK-2528 introduced purging of _commitRoot entries without associated local changes on the document. Those _commitRoot entries are created when a child nodes is added and the _children flag is touched on the parent.
The purge operation is too eager and removes all such entries, which may result in an undetected hierarchy conflict.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
index 33aafe5..a426521 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/SplitOperations.java
@@ -233,6 +233,7 @@ class SplitOperations {
committedChanges.put(REVISIONS, revisions);
NavigableMap<Revision, String> commitRoot =
new TreeMap<Revision, String>(context.getRevisionComparator());
+ boolean mostRecent = true;
for (Map.Entry<Revision, String> entry : doc.getLocalCommitRoot().entrySet()) {
Revision r = entry.getKey();
if (splitRevs.contains(r)) {
@@ -240,9 +241,13 @@ class SplitOperations {
numValues++;
} else if (r.getClusterId() == context.getClusterId()
&& !changes.contains(r)) {
- // OAK-2528: _commitRoot entry without associated
- // change -> consider as garbage
- addGarbage(r, COMMIT_ROOT);
+ // OAK-2528: _commitRoot entry without associated change
+ // consider all but most recent as garbage (OAK-3333)
+ if (mostRecent) {
+ mostRecent = false;
+ } else {
+ addGarbage(r, COMMIT_ROOT);
+ }
}
}
committedChanges.put(COMMIT_ROOT, commitRoot);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3333_194999ed.diff |
bugs-dot-jar_data_OAK-185_7fe28a0e | ---
BugID: OAK-185
Summary: Trying to remove a missing property throws PathNotFoundException
Description: |-
The following code snippet throws a {{PathNotFoundException}} if the "missing" property is not present.
{code:java}
node.setProperty("missing", (String) null);
{code}
A better way to handle such a case would be for the above statement to simply do nothing.
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeDelegate.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeDelegate.java
index 351ae8b..bda7280 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeDelegate.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeDelegate.java
@@ -291,6 +291,10 @@ public class NodeDelegate extends ItemDelegate {
return new PropertyDelegate(sessionDelegate, getTree(), propertyState);
}
+ public void removeProperty(String name) throws InvalidItemStateException {
+ getTree().removeProperty(name);
+ }
+
/**
* Set a multi valued property
* @param name oak name
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
index 70d0494..fbb8b10 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
@@ -264,15 +264,16 @@ public class NodeImpl extends ItemImpl<NodeDelegate> implements Node {
throws RepositoryException {
checkStatus();
- int targetType = getTargetType(value, type);
- Value targetValue = ValueHelper.convert(value, targetType, getValueFactory());
+ String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
if (value == null) {
- Property p = getProperty(jcrName);
- p.remove();
- return p;
+ dlg.removeProperty(oakName);
+ return null;
} else {
- String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
- CoreValue oakValue = ValueConverter.toCoreValue(targetValue, sessionDelegate);
+ int targetType = getTargetType(value, type);
+ Value targetValue =
+ ValueHelper.convert(value, targetType, getValueFactory());
+ CoreValue oakValue =
+ ValueConverter.toCoreValue(targetValue, sessionDelegate);
return new PropertyImpl(dlg.setProperty(oakName, oakValue));
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-185_7fe28a0e.diff |
bugs-dot-jar_data_OAK-1779_9d36bede | ---
BugID: OAK-1779
Summary: Stale cache after MongoMK GC
Description: After a MongoMK revision GC the docChildrenCache may be stale and lead
to a NPE when reading children with deleted and GC'ed siblings.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index d57f875..35a5f77 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -589,6 +589,10 @@ public final class DocumentNodeStore
return docChildrenCacheStats;
}
+ void invalidateDocChildrenCache() {
+ docChildrenCache.invalidateAll();
+ }
+
public int getPendingWriteCount() {
return unsavedLastRevisions.getPaths().size();
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
index e671b66..885f721 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
@@ -121,6 +121,7 @@ public class VersionGarbageCollector {
log.debug(sb.toString());
}
nodeStore.getDocumentStore().remove(Collection.NODES, docIdsToDelete);
+ nodeStore.invalidateDocChildrenCache();
stats.deletedDocGCCount += docIdsToDelete.size();
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1779_9d36bede.diff |
bugs-dot-jar_data_OAK-3579_2565d74a | ---
BugID: OAK-3579
Summary: BackgroundLeaseUpdate not scheduled when asyncDelay=0
Description: The BackgroundLeaseUpdate extends from NodeStoreTask, which returns from
the run() method when asyncDelay is 0. This is fine for the background read and
update tasks. However, the lease update task must run even when asyncDelay is set
to zero.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index ada87d0..8511e37 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -68,6 +68,8 @@ import javax.management.NotCompliantMBeanException;
import com.google.common.base.Function;
import com.google.common.base.Predicates;
+import com.google.common.base.Supplier;
+import com.google.common.base.Suppliers;
import com.google.common.cache.Cache;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
@@ -2561,18 +2563,35 @@ public final class DocumentNodeStore
static abstract class NodeStoreTask implements Runnable {
final WeakReference<DocumentNodeStore> ref;
private final AtomicBoolean isDisposed;
- private int delay;
+ private final Supplier<Integer> delaySupplier;
- NodeStoreTask(DocumentNodeStore nodeStore, AtomicBoolean isDisposed) {
- ref = new WeakReference<DocumentNodeStore>(nodeStore);
- delay = nodeStore.getAsyncDelay();
+ NodeStoreTask(final DocumentNodeStore nodeStore,
+ final AtomicBoolean isDisposed,
+ Supplier<Integer> delay) {
+ this.ref = new WeakReference<DocumentNodeStore>(nodeStore);
this.isDisposed = isDisposed;
+ if (delay == null) {
+ delay = new Supplier<Integer>() {
+ @Override
+ public Integer get() {
+ DocumentNodeStore ns = ref.get();
+ return ns != null ? ns.getAsyncDelay() : 0;
+ }
+ };
+ }
+ this.delaySupplier = delay;
+ }
+
+ NodeStoreTask(final DocumentNodeStore nodeStore,
+ final AtomicBoolean isDisposed) {
+ this(nodeStore, isDisposed, null);
}
protected abstract void execute(@Nonnull DocumentNodeStore nodeStore);
@Override
public void run() {
+ int delay = delaySupplier.get();
while (delay != 0 && !isDisposed.get()) {
synchronized (isDisposed) {
try {
@@ -2588,7 +2607,7 @@ public final class DocumentNodeStore
} catch (Throwable t) {
LOG.warn("Background operation failed: " + t.toString(), t);
}
- delay = nodeStore.getAsyncDelay();
+ delay = delaySupplier.get();
} else {
// node store not in use anymore
break;
@@ -2633,7 +2652,7 @@ public final class DocumentNodeStore
BackgroundLeaseUpdate(DocumentNodeStore nodeStore,
AtomicBoolean isDisposed) {
- super(nodeStore, isDisposed);
+ super(nodeStore, isDisposed, Suppliers.ofInstance(1000));
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3579_2565d74a.diff |
bugs-dot-jar_data_OAK-1662_3efb5cbf | ---
BugID: OAK-1662
Summary: Node not accessible after document split
Description: In a cluster setup it may happen that a node becomes inaccessible when
all remaining local revision entries after a split are not yet visible to a cluster
node.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
index 2e32b74..5d7b137 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Commit.java
@@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@@ -53,7 +54,7 @@ public class Commit {
private final Revision revision;
private HashMap<String, UpdateOp> operations = new LinkedHashMap<String, UpdateOp>();
private JsopWriter diff = new JsopStream();
- private List<Revision> collisions = new ArrayList<Revision>();
+ private Set<Revision> collisions = new LinkedHashSet<Revision>();
/**
* List of all node paths which have been modified in this commit. In addition to the nodes
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 770cc9c..8b4eb79 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -1210,7 +1210,7 @@ public final class DocumentNodeStore
@Override
public NodeState merge(@Nonnull NodeBuilder builder,
@Nonnull CommitHook commitHook,
- @Nullable CommitInfo info)
+ @Nonnull CommitInfo info)
throws CommitFailedException {
return asDocumentRootBuilder(builder).merge(commitHook, info);
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 35a2306..e685b36 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -581,29 +581,45 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
* @return the revision, or null if deleted
*/
@CheckForNull
- public Revision getNewestRevision(RevisionContext context,
- Revision changeRev,
- CollisionHandler handler) {
- // no need to look at all commits. the primary document
- // always contains at least one commit, including all
- // branch commits which are not yet merged
+ public Revision getNewestRevision(final RevisionContext context,
+ final Revision changeRev,
+ final CollisionHandler handler) {
+ final Map<Revision, String> validRevisions = Maps.newHashMap();
+ Predicate<Revision> predicate = new Predicate<Revision>() {
+ @Override
+ public boolean apply(Revision input) {
+ if (input.equals(changeRev)) {
+ return false;
+ }
+ if (isValidRevision(context, input, null, changeRev, validRevisions)) {
+ return true;
+ }
+ handler.concurrentModification(input);
+ return false;
+ }
+ };
+
+ Revision newestRev = null;
+ // check local commits first
SortedMap<Revision, String> revisions = getLocalRevisions();
SortedMap<Revision, String> commitRoots = getLocalCommitRoot();
- Revision newestRev = null;
- for (Revision r : Iterables.mergeSorted(
+ Iterator<Revision> it = filter(Iterables.mergeSorted(
Arrays.asList(revisions.keySet(), commitRoots.keySet()),
- revisions.comparator())) {
- if (!r.equals(changeRev)) {
- if (isValidRevision(context, r, null, changeRev, new HashMap<Revision, String>())) {
- newestRev = r;
- // found newest revision, no need to check more revisions
- // revisions are sorted newest first
- break;
- } else {
- handler.concurrentModification(r);
- }
+ revisions.comparator()), predicate).iterator();
+ if (it.hasNext()) {
+ newestRev = it.next();
+ } else {
+ // check full history (only needed in rare cases)
+ it = filter(Iterables.mergeSorted(
+ Arrays.asList(
+ getValueMap(REVISIONS).keySet(),
+ getValueMap(COMMIT_ROOT).keySet()),
+ revisions.comparator()), predicate).iterator();
+ if (it.hasNext()) {
+ newestRev = it.next();
}
}
+
if (newestRev == null) {
return null;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1662_3efb5cbf.diff |
bugs-dot-jar_data_OAK-1250_0c3b3306 | ---
BugID: OAK-1250
Summary: Guard against invalid/missing checkpoints
Description: "Playing with the backup revealed a case where a checkpoint can become
invalid after a manual restore of the repository. [0]\nThe NodeStore#retrieve apis
already specify that this can return null in the case the checkpoint doesn't exist
anymore, but it looks like the storage bits aren't yet prepared for that scenario.\n\n\n\n[0]\n{noformat}\norg.apache.sling.commons.scheduler.impl.QuartzScheduler
Exception during job execution of org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate@3a6d47
: Failed to load segment 8a8b281c-1a02-4950-aad5-aad8e436a0d8\njava.lang.IllegalStateException:
Failed to load segment 8a8b281c-1a02-4950-aad5-aad8e436a0d8\n\tat org.apache.jackrabbit.oak.plugins.segment.AbstractStore.readSegment(AbstractStore.java:109)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.Segment.getSegment(Segment.java:189)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.Record.getSegment(Record.java:97)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState.getTemplate(SegmentNodeState.java:56)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState.getChildNode(SegmentNodeState.java:209)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore.retrieve(SegmentNodeStore.java:175)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStoreService.retrieve(SegmentNodeStoreService.java:198)
~[na:na]\n\tat org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate.run(AsyncIndexUpdate.java:97)
~[na:na]\n\tat org.apache.sling.commons.scheduler.impl.QuartzJobExecutor.execute(QuartzJobExecutor.java:105)
~[org.apache.sling.commons.scheduler-2.4.2.jar:na]\n\tat org.quartz.core.JobRunShell.run(JobRunShell.java:207)
[org.apache.sling.commons.scheduler-2.4.2.jar:na]\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
[na:1.7.0_40]\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
[na:1.7.0_40]\n\tat java.lang.Thread.run(Thread.java:724) [na:1.7.0_40]\nCaused
by: java.lang.IllegalStateException: Segment 8a8b281c-1a02-4950-aad5-aad8e436a0d8
not found\n\tat org.apache.jackrabbit.oak.plugins.segment.file.FileStore.loadSegment(FileStore.java:184)
~[na:na]\n{noformat}"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
index a2be123..d757585 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
@@ -178,17 +178,45 @@ public class SegmentNodeStore implements NodeStore, Observable {
@Override @Nonnull
public synchronized String checkpoint(long lifetime) {
checkArgument(lifetime > 0);
- // TODO: Guard the checkpoint from garbage collection
- return head.getRecordId().toString();
+ String name = UUID.randomUUID().toString();
+
+ // try 5 times
+ for (int i = 0; i < 5; i++) {
+ if (commitSemaphore.tryAcquire()) {
+ try {
+ refreshHead();
+
+ SegmentNodeState ns = head;
+ RecordId ri = head.getRecordId();
+
+ SegmentRootBuilder builder = ns.builder();
+ NodeBuilder cp = builder.child(name);
+ cp.setProperty("timestamp", System.currentTimeMillis()
+ + lifetime);
+ cp.setChildNode(ROOT, ns.getChildNode(ROOT));
+
+ if (journal.setHead(ri, builder.getNodeState()
+ .getRecordId())) {
+ refreshHead();
+ return name;
+ }
+
+ } finally {
+ commitSemaphore.release();
+ }
+ }
+ }
+
+ return name;
}
@Override @CheckForNull
public synchronized NodeState retrieve(@Nonnull String checkpoint) {
- // TODO: Verify validity of the checkpoint
- RecordId id = RecordId.fromString(checkNotNull(checkpoint));
- SegmentNodeState root =
- new SegmentNodeState(store.getWriter().getDummySegment(), id);
- return root.getChildNode(ROOT);
+ NodeState cp = head.getChildNode(checkpoint).getChildNode(ROOT);
+ if (cp.exists()) {
+ return cp;
+ }
+ return null;
}
private class Commit {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1250_0c3b3306.diff |
bugs-dot-jar_data_OAK-3424_f4349a96 | ---
BugID: OAK-3424
Summary: ClusterNodeInfo does not pick an existing entry on startup
Description: |
When the {{DocumentNodeStore}} starts up, it attempts to find an entry that matches the current instance (which is defined by something based on network interface address and the current working directory).
However, an additional check is done when the cluster lease end time hasn't been reached, in which case the entry is skipped (assuming it belongs to a different instance), and the scan continues. When no other entry is found, a new one is created.
So why would we *ever* consider instances with matching instance information to be different? As far as I can tell the answer is: for unit testing.
But...
With the current assignment very weird things can happen, and I believe I see exactly this happening in a customer problem I'm investigating. The sequence is:
1) First system startup, cluster node id 1 is assigned
2) System crashes or was crashed
3) System restarts within the lease time (120s?), a new cluster node id is assigned
4) System shuts down, and gets restarted after a longer interval: cluster id 1 is used again, and system starts {{MissingLastRevRecovery}}, despite the previous shutdown having been clean
So what we see is that the system starts up with varying cluster node ids, and recovery processes may run with no correlation to what happened before.
Proposal:
a) Make {{ClusterNodeInfo.createInstance()}} much more verbose, so that the default system log contains sufficient information to understand why a certain cluster node id was picked.
b) Drop the logic that skips entries with non-expired leases, so that we get a one-to-one relation between instance ids and cluster node ids. For the unit tests that currently rely on this logic, switch to APIs where the test setup picks the cluster node id.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterNodeInfo.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterNodeInfo.java
index 92208a1..59d5f75 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterNodeInfo.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ClusterNodeInfo.java
@@ -374,7 +374,7 @@ public class ClusterNodeInfo {
int retries = 10;
for (int i = 0; i < retries; i++) {
- ClusterNodeInfo clusterNode = createInstance(store, machineId, instanceId, configuredClusterId);
+ ClusterNodeInfo clusterNode = createInstance(store, machineId, instanceId, configuredClusterId, i == 0);
String key = String.valueOf(clusterNode.id);
UpdateOp update = new UpdateOp(key, true);
update.set(ID, key);
@@ -409,7 +409,7 @@ public class ClusterNodeInfo {
}
private static ClusterNodeInfo createInstance(DocumentStore store, String machineId,
- String instanceId, int configuredClusterId) {
+ String instanceId, int configuredClusterId, boolean waitForLease) {
long now = getCurrentTime();
int clusterNodeId = 0;
@@ -447,16 +447,25 @@ public class ClusterNodeInfo {
}
Long leaseEnd = (Long) doc.get(LEASE_END_KEY);
+ String mId = "" + doc.get(MACHINE_ID_KEY);
+ String iId = "" + doc.get(INSTANCE_ID_KEY);
if (leaseEnd != null && leaseEnd > now) {
- // TODO wait for lease end, see OAK-3449
+ // wait if (a) instructed to, and (b) also the remaining time
+ // time is not much bigger than the lease interval (in which
+ // case something is very very wrong anyway)
+ if (waitForLease && (leaseEnd - now) < (DEFAULT_LEASE_DURATION_MILLIS + 5000) && mId.equals(machineId)
+ && iId.equals(instanceId)) {
+ boolean worthRetrying = waitForLeaseExpiry(store, doc, leaseEnd.longValue(), machineId, instanceId);
+ if (worthRetrying) {
+ return createInstance(store, machineId, instanceId, configuredClusterId, false);
+ }
+ }
+
reuseFailureReason = "leaseEnd " + leaseEnd + " > " + now + " - " + (leaseEnd - now) + "ms in the future";
continue;
}
- String mId = "" + doc.get(MACHINE_ID_KEY);
- String iId = "" + doc.get(INSTANCE_ID_KEY);
-
// remove entries with "random:" keys if not in use (no lease at all)
if (mId.startsWith(RANDOM_PREFIX) && leaseEnd == null) {
store.remove(Collection.CLUSTER_NODES, key);
@@ -506,6 +515,51 @@ public class ClusterNodeInfo {
RecoverLockState.NONE, prevLeaseEnd, newEntry);
}
+ private static boolean waitForLeaseExpiry(DocumentStore store, ClusterNodeInfoDocument cdoc, long leaseEnd, String machineId,
+ String instanceId) {
+ String key = cdoc.getId();
+ LOG.info("Found an existing possibly active cluster node info (" + key + ") for this instance: " + machineId + "/"
+ + instanceId + ", will try use it.");
+
+ // wait until lease expiry plus 2s
+ long waitUntil = leaseEnd + 2000;
+
+ while (getCurrentTime() < waitUntil) {
+ LOG.info("Waiting for cluster node " + key + "'s lease to expire: " + (waitUntil - getCurrentTime()) / 1000 + "s left");
+
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ // ignored
+ }
+
+ try {
+ // check state of cluster node info
+ ClusterNodeInfoDocument reread = store.find(Collection.CLUSTER_NODES, key);
+ if (reread == null) {
+ LOG.info("Cluster node info " + key + ": gone; continueing.");
+ return true;
+ } else {
+ Long newLeaseEnd = (Long) reread.get(LEASE_END_KEY);
+ if (newLeaseEnd == null) {
+ LOG.info("Cluster node " + key + ": lease end information missing, aborting.");
+ return false;
+ } else {
+ if (newLeaseEnd.longValue() != leaseEnd) {
+ LOG.info("Cluster node " + key + " seems to be still active (lease end changed from " + leaseEnd
+ + " to " + newLeaseEnd + ", will not try to use it.");
+ return false;
+ }
+ }
+ }
+ } catch (DocumentStoreException ex) {
+ LOG.info("Error reading cluster node info for key " + key, ex);
+ return false;
+ }
+ }
+ return true;
+ }
+
public void performLeaseCheck() {
if (leaseCheckDisabled || !renewed) {
// if leaseCheckDisabled is set we never do the check, so return fast
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3424_f4349a96.diff |
bugs-dot-jar_data_OAK-2740_429baf4d | ---
BugID: OAK-2740
Summary: TreeTypeProvider treats optimized node type definition info as Ac-Content
Description: |-
while investigating a bug reported by [~teofili] and [~mpetria] that cause group-import with policy node to fail when run with non-administrative session, i found that the {{TreeTypeProvider}} wrongly identifies the optimized item definition information stored with the node types (e.g. {{/jcr:system/jcr:nodeTypes/rep:AccessControllable/rep:namedChildNodeDefinitions/rep:policy}} ) as access control content and thus doesn't read it properly when using a session that doesn't have jcr:readAccessControl privilege at /jcr:system/jcr:nodeTypes.
the effect of this bug is as follows:
the internal calculation of the effective node type and thus item definitions will not work properly for {{rep:policy}} nodes (and similar) as the editing session cannot read the full (oak internal) node type definition as stored below {{/jcr:system/jcr:nodeTypes}}.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/AuthorizationContext.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/AuthorizationContext.java
index 953d00b..49bfe47 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/AuthorizationContext.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/AuthorizationContext.java
@@ -47,7 +47,11 @@ final class AuthorizationContext implements Context, AccessControlConstants, Per
@Override
public boolean definesContextRoot(@Nonnull Tree tree) {
String name = tree.getName();
- return POLICY_NODE_NAMES.contains(name) || REP_PERMISSION_STORE.equals(name);
+ if (POLICY_NODE_NAMES.contains(name)) {
+ return NT_REP_ACL.equals(TreeUtil.getPrimaryTypeName(tree));
+ } else {
+ return REP_PERMISSION_STORE.equals(name);
+ }
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2740_429baf4d.diff |
bugs-dot-jar_data_OAK-1788_dd3437d4 | ---
BugID: OAK-1788
Summary: ConcurrentConflictTest fails occasionally
Description: |-
Occurs every now and then on buildbot. E.g.:
http://ci.apache.org/builders/oak-trunk-win7/builds/16
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
index 9265368..be45ba3 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
@@ -549,6 +549,9 @@ public class Revision {
* <ul>
* <li>
* {@code null} if the revision is older than the earliest range
+ * and the revision timestamp is less than or equal the time
+ * of the last {@link #purge(long)} (see also
+ * {@link #oldestTimestamp}).
* </li>
* <li>
* if the revision is newer than the lower bound of the newest
@@ -565,9 +568,49 @@ public class Revision {
* </li>
* </ul>
*
+ * Below is a graph for a revision comparison example as seen from one
+ * cluster node with some known revision ranges. Revision ranges less
+ * than or equal r2-0-0 have been purged and there are known ranges for
+ * cluster node 1 (this cluster node) and cluster node 2 (some other
+ * cluster node).
+ * <pre>
+ * View from cluster node 1:
+ *
+ * purge r3-0-1 r5-0-2 r7-0-1
+ * ˅ ˅ ˅ ˅
+ * ---+---------+---------+---------+---------+---------
+ * r1-0-0 r2-0-0 r3-0-0 r4-0-0 r5-0-0
+ *
+ * ^
+ * r1-0-1 -> null (1)
+ *
+ * ^
+ * r4-0-2 -> r4-0-0 (2)
+ *
+ * ^
+ * r3-0-1 -> r3-0-0 (3)
+ *
+ * ^
+ * r6-0-2 -> FUTURE (4)
+ *
+ * ^
+ * r9-0-1 -> NEWEST (5)
+ * </pre>
+ * <ol>
+ * <li>older than earliest range and purge time</li>
+ * <li>seen-at of next higher range</li>
+ * <li>seen-at of matching lower bound of range</li>
+ * <li>foreign revision is newer than most recent range</li>
+ * <li>local revision is newer than most recent range</li>
+ * </ol>
+ * This gives the following revision ordering:
+ * <pre>
+ * r1-0-1 < r3-0-1 < r-4-0-2 < r9-0-1 < r6-0-2
+ * </pre>
+ *
* @param r the revision
* @return the seen-at revision or {@code null} if the revision is older
- * than the earliest range.
+ * than the earliest range and purge time.
*/
Revision getRevisionSeen(Revision r) {
List<RevisionRange> list = map.get(r.getClusterId());
@@ -586,8 +629,9 @@ public class Revision {
// search from latest backward
// (binary search could be used, but we expect most queries
// at the end of the list)
+ RevisionRange range = null;
for (int i = list.size() - 1; i >= 0; i--) {
- RevisionRange range = list.get(i);
+ range = list.get(i);
int compare = r.compareRevisionTime(range.revision);
if (compare == 0) {
return range.seenAt;
@@ -597,15 +641,21 @@ public class Revision {
if (r.getClusterId() == currentClusterNodeId) {
// newer than all others, except for FUTURE
return NEWEST;
+ } else {
+ // happens in the future (not visible yet)
+ return FUTURE;
}
- // happens in the future (not visible yet)
- return FUTURE;
} else {
// there is a newer range
return list.get(i + 1).seenAt;
}
}
}
+ if (range != null && r.getTimestamp() > oldestTimestamp) {
+ // revision is older than earliest range and after purge
+ // timestamp. return seen-at revision of earliest range.
+ return range.seenAt;
+ }
return null;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1788_dd3437d4.diff |
bugs-dot-jar_data_OAK-1155_f64e8adc | ---
BugID: OAK-1155
Summary: PropertyIndex cost calculation is faulty
Description: |-
The cost calculation can easily go out of bounds when it needs to estimate (whenever there are more than 100 nodes).
The high value it returns can be higher than the traversal index which has a max of 10M, but can be less smaller.
For example:
100 nodes in the index:
with a single level /content cost is 6250000
adding a second level /content/data cost jumps to 1.544804416E9
101 nodes in the index:
with a single level /content cost is 100
adding a second level /content/data stays at 100
100 nodes, 12 levels deep, cost is 2.147483647E9
101 nodes, 12 levels deep, cost is 6.7108864E7
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
index fa01dc5..3b0bf7b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/property/strategy/ContentMirrorStoreStrategy.java
@@ -343,20 +343,7 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
* The current depth (number of parent nodes).
*/
int depth;
-
- /**
- * The total number of child nodes per node, for those nodes that were
- * fully traversed and do have child nodes. This value is used to
- * calculate the average width.
- */
- long widthTotal;
-
- /**
- * The number of nodes that were fully traversed and do have child
- * nodes. This value is used to calculate the average width.
- */
- int widthCount;
-
+
/**
* The sum of the depth of all matching nodes. This value is used to
* calculate the average depth.
@@ -375,20 +362,12 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
}
if (count < maxCount) {
depth++;
- int width = 0;
- boolean finished = true;
for (ChildNodeEntry entry : state.getChildNodeEntries()) {
if (count >= maxCount) {
- finished = false;
break;
}
- width++;
visit(entry.getNodeState());
}
- if (finished && width > 0) {
- widthTotal += width;
- widthCount++;
- }
depth--;
}
}
@@ -415,15 +394,9 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
return count;
}
double averageDepth = (int) (depthTotal / count);
- double averageWidth = 2;
- if (widthCount > 0) {
- averageWidth = (int) (widthTotal / widthCount);
- }
- // calculate with an average width of at least 2
- averageWidth = Math.max(2, averageWidth);
- // the number of estimated matches is calculated as the
- // of a estimated
- long estimatedNodes = (long) Math.pow(averageWidth, 2 * averageDepth);
+ // the number of estimated matches is higher
+ // the higher the average depth of the first hits
+ long estimatedNodes = (long) (count * Math.pow(1.1, averageDepth));
estimatedNodes = Math.min(estimatedNodes, Integer.MAX_VALUE);
return Math.max(count, (int) estimatedNodes);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1155_f64e8adc.diff |
bugs-dot-jar_data_OAK-2029_e30023ba | ---
BugID: OAK-2029
Summary: Oak Lucene index doesn't get notified about updates when index is stored
on the file system
Description: |-
It looks like the the lucene IndexTracked class responsible for refreshing the in-memory cache of the lucene index doesn't get the update notification when the index is stored on the file system.
This results in searches not working until the next restart
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditorContext.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditorContext.java
index 6c2bfcc..3107e21 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditorContext.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditorContext.java
@@ -26,6 +26,7 @@ import static org.apache.lucene.store.NoLockFactory.getNoLockFactory;
import java.io.File;
import java.io.IOException;
+import java.util.Calendar;
import java.util.Set;
import javax.jcr.PropertyType;
@@ -35,6 +36,7 @@ import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
import org.apache.jackrabbit.oak.plugins.index.IndexUpdateCallback;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.util.ISO8601;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -165,6 +167,13 @@ public class LuceneIndexEditorContext {
void closeWriter() throws IOException {
if (writer != null) {
writer.close();
+
+ //OAK-2029 Record the last updated status so
+ //as to make IndexTracker detect changes when index
+ //is stored in file system
+ NodeBuilder status = definition.child(":status");
+ status.setProperty("lastUpdated", ISO8601.format(Calendar.getInstance()), Type.DATE);
+ status.setProperty("indexedNodes",indexedNodes);
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2029_e30023ba.diff |
bugs-dot-jar_data_OAK-1648_fdc54465 | ---
BugID: OAK-1648
Summary: Creating multiple checkpoint on same head revision overwrites previous entries
Description: |+
Currently when a checkpoint is created in DocumentNodeStore then it is saved in form of currentHeadRev=>expiryTime. Now if multiple checkpoints are created where head revision has not changed then only the last one would be saved and previous entries would be overridden as revision is used as key
One fix would be to change the expiry time only if the new expiry time is greater than previous entry. However doing that safely in a cluster (check then save) is currently not possible with DocumentStore API as the modCount check if only supported for Nodes.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Checkpoints.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Checkpoints.java
index e1d712d..8a6a98c 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Checkpoints.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Checkpoints.java
@@ -26,6 +26,7 @@ import java.util.SortedMap;
import java.util.concurrent.atomic.AtomicInteger;
import javax.annotation.CheckForNull;
+import javax.annotation.Nonnull;
import org.apache.jackrabbit.oak.commons.json.JsopBuilder;
import org.apache.jackrabbit.oak.commons.json.JsopReader;
@@ -73,7 +74,14 @@ class Checkpoints {
}
public Revision create(long lifetimeInMillis, Map<String, String> info) {
- Revision r = nodeStore.getHeadRevision();
+ // create a unique dummy commit we can use as checkpoint revision
+ Revision r = nodeStore.commitQueue.createRevision();
+ nodeStore.commitQueue.done(r, new CommitQueue.Callback() {
+ @Override
+ public void headOfQueue(@Nonnull Revision revision) {
+ // do nothing
+ }
+ });
createCounter.getAndIncrement();
performCleanupIfRequired();
UpdateOp op = new UpdateOp(ID, false);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1648_fdc54465.diff |
bugs-dot-jar_data_OAK-1749_591e4d4a | ---
BugID: OAK-1749
Summary: AsyncIndexUpdate may resurrect nodes
Description: There is a race condition in the AsyncIndexUpdate.run() method. The implementation
creates a checkpoint used as the after node state for the comparison with the previous
checkpoint. In a next step a builder is created from the current root state of the
node store. Node removed between the checkpoint call and retrieving the root state
may get resurrected by the AsyncIndexUpdate.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/AsyncIndexUpdate.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/AsyncIndexUpdate.java
index b509e1e..ea8d927 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/AsyncIndexUpdate.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/AsyncIndexUpdate.java
@@ -148,7 +148,7 @@ public class AsyncIndexUpdate implements Runnable {
return;
}
- NodeBuilder builder = store.getRoot().builder();
+ NodeBuilder builder = after.builder();
NodeBuilder async = builder.child(ASYNC);
NodeState before = null;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1749_591e4d4a.diff |
bugs-dot-jar_data_OAK-3530_4d231938 | ---
BugID: OAK-3530
Summary: TreeTypeProvider returns wrong type for version related node type definitions
Description: "the following paths with result in type {{VERSION}} instead of {{DEFAULT}}
and might lead to unexpected results wrt read access:\n\n- /jcr:system/jcr:nodeTypes/rep:system/rep:namedChildNodeDefinitions/jcr:versionStorage\n-
/jcr:system/jcr:nodeTypes/rep:system/rep:namedChildNodeDefinitions/jcr:activities\n-
/jcr:system/jcr:nodeTypes/rep:system/rep:namedChildNodeDefinitions/jcr:configurations\n
\ "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/TreeTypeProvider.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/TreeTypeProvider.java
index 1b1c336..2aadfd8 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/TreeTypeProvider.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/permission/TreeTypeProvider.java
@@ -18,6 +18,7 @@ package org.apache.jackrabbit.oak.security.authorization.permission;
import javax.annotation.Nonnull;
+import org.apache.jackrabbit.JcrConstants;
import org.apache.jackrabbit.oak.api.Tree;
import org.apache.jackrabbit.oak.plugins.version.VersionConstants;
import org.apache.jackrabbit.oak.spi.security.Context;
@@ -26,21 +27,21 @@ import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
/**
* <h3>TreeTypeProvider</h3>
- * For optimization purpose an Immutable tree will be associated with a
- * {@code TreeTypeProvider} that allows for fast detection of the following types
- * of Trees:
- *
- * <ul>
- * <li>{@link #TYPE_HIDDEN}: a hidden tree whose name starts with ":".
- * Please note that the whole subtree of a hidden node is considered hidden.</li>
- * <li>{@link #TYPE_AC}: A tree that stores access control content
- * and requires special access {@link org.apache.jackrabbit.oak.spi.security.authorization.permission.Permissions#READ_ACCESS_CONTROL permissions}.</li>
- * <li>{@link #TYPE_VERSION}: if a given tree is located within
- * any of the version related stores defined by JSR 283. Depending on the
- * permission evaluation implementation those items require special treatment.</li>
- * <li>{@link #TYPE_DEFAULT}: the default type for trees that don't
- * match any of the upper types.</li>
- * </ul>
+ * Allows to distinguish different types of trees based on their name, ancestry
+ * or primary type. Currently the following types are supported:
+ *
+ * <ul>
+ * <li>{@link #TYPE_HIDDEN}: a hidden tree whose name starts with ":".
+ * Please note that the whole subtree of a hidden node is considered hidden.</li>
+ * <li>{@link #TYPE_AC}: A tree that stores access control content
+ * and requires special access {@link org.apache.jackrabbit.oak.spi.security.authorization.permission.Permissions#READ_ACCESS_CONTROL permissions}.</li>
+ * <li>{@link #TYPE_VERSION}: if a given tree is located within
+ * any of the version related stores defined by JSR 283. Depending on the
+ * permission evaluation implementation those items require special treatment.</li>
+ * <li>{@link #TYPE_INTERNAL}: repository internal content that is not hidden (e.g. permission store)</li>
+ * <li>{@link #TYPE_DEFAULT}: the default type for trees that don't
+ * match any of the upper types.</li>
+ * </ul>
*/
public final class TreeTypeProvider {
@@ -55,53 +56,67 @@ public final class TreeTypeProvider {
// hidden trees
public static final int TYPE_HIDDEN = 16;
- private final Context contextInfo;
+ private final Context authorizationContext;
- public TreeTypeProvider(@Nonnull Context contextInfo) {
- this.contextInfo = contextInfo;
+ public TreeTypeProvider(@Nonnull Context authorizationContext) {
+ this.authorizationContext = authorizationContext;
}
- public int getType(Tree tree) {
+ public int getType(@Nonnull Tree tree) {
if (tree.isRoot()) {
return TYPE_DEFAULT;
} else {
- return getType(tree, getType(tree.getParent()));
+ Tree t = tree;
+ while (!t.isRoot()) {
+ int type = getType(t.getName(), t);
+ // stop walking up the hierarchy as soon as a special type is found
+ if (TYPE_DEFAULT != type) {
+ return type;
+ }
+ t = t.getParent();
+ }
+ return TYPE_DEFAULT;
}
}
- public int getType(Tree tree, int parentType) {
- if (tree.isRoot()) {
- return TYPE_DEFAULT;
- }
+ public int getType(@Nonnull Tree tree, int parentType) {
+ if (tree.isRoot()) {
+ return TYPE_DEFAULT;
+ }
- int type;
- switch (parentType) {
- case TYPE_HIDDEN:
- type = TYPE_HIDDEN;
- break;
- case TYPE_VERSION:
- type = TYPE_VERSION;
- break;
- case TYPE_INTERNAL:
- type = TYPE_INTERNAL;
- break;
- case TYPE_AC:
- type = TYPE_AC;
- break;
- default:
- String name = tree.getName();
- if (NodeStateUtils.isHidden(name)) {
- type = TYPE_HIDDEN;
- } else if (VersionConstants.VERSION_STORE_ROOT_NAMES.contains(name)) {
- type = TYPE_VERSION;
- } else if (PermissionConstants.REP_PERMISSION_STORE.equals(name)) {
- type = TYPE_INTERNAL;
- } else if (contextInfo.definesContextRoot(tree)) {
- type = TYPE_AC;
- } else {
- type = TYPE_DEFAULT;
- }
- }
- return type;
+ int type;
+ switch (parentType) {
+ case TYPE_HIDDEN:
+ type = TYPE_HIDDEN;
+ break;
+ case TYPE_VERSION:
+ type = TYPE_VERSION;
+ break;
+ case TYPE_INTERNAL:
+ type = TYPE_INTERNAL;
+ break;
+ case TYPE_AC:
+ type = TYPE_AC;
+ break;
+ default:
+ type = getType(tree.getName(), tree);
+ }
+ return type;
+ }
+
+ private int getType(@Nonnull String name, @Nonnull Tree tree) {
+ int type;
+ if (NodeStateUtils.isHidden(name)) {
+ type = TYPE_HIDDEN;
+ } else if (VersionConstants.VERSION_STORE_ROOT_NAMES.contains(name)) {
+ type = (JcrConstants.JCR_SYSTEM.equals(tree.getParent().getName())) ? TYPE_VERSION : TYPE_DEFAULT;
+ } else if (PermissionConstants.REP_PERMISSION_STORE.equals(name)) {
+ type = TYPE_INTERNAL;
+ } else if (authorizationContext.definesContextRoot(tree)) {
+ type = TYPE_AC;
+ } else {
+ type = TYPE_DEFAULT;
}
+ return type;
+ }
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3530_4d231938.diff |
bugs-dot-jar_data_OAK-1020_83427028 | ---
BugID: OAK-1020
Summary: Property value converion ignores reisdual property definition
Description: |-
Assume following node type which a property defined with type and a residual unnamed property also defined
{noformat}
[oak:foo]
- stringProp (String)
- * (undefined)
{noformat}
For such node type if a property {{stringProp}} is being set with a binary value then Oak converts it into a String property thereby causing binary stream to change. In JR2 conversion would not happen as conversion logic treats setting (stringProp,BINARY) as a residual property
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
index 925a88b..c42f7f9 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/delegate/NodeDelegate.java
@@ -586,6 +586,7 @@ public class NodeDelegate extends ItemDelegate {
}
// First look for a matching named property definition
+ Tree fuzzyMatch = null;
for (Tree type : types) {
Tree definitions = type
.getChild(OAK_NAMED_PROPERTY_DEFINITIONS)
@@ -598,10 +599,12 @@ public class NodeDelegate extends ItemDelegate {
if (definition.exists()) {
return definition;
}
- if (!exactTypeMatch) {
- for (Tree def : definitions.getChildren()) {
- if (propertyType.isArray() == TreeUtil.getBoolean(def, JCR_MULTIPLE)) {
- return def;
+ for (Tree def : definitions.getChildren()) {
+ if (propertyType.isArray() == TreeUtil.getBoolean(def, JCR_MULTIPLE)) {
+ if (getBoolean(def, JCR_PROTECTED)) {
+ return null; // no fuzzy matches for protected items
+ } else if (!exactTypeMatch && fuzzyMatch == null) {
+ fuzzyMatch = def;
}
}
}
@@ -618,16 +621,17 @@ public class NodeDelegate extends ItemDelegate {
if (definition.exists()) {
return definition;
}
- if (!exactTypeMatch) {
+ if (!exactTypeMatch && fuzzyMatch == null) {
for (Tree def : definitions.getChildren()) {
if (propertyType.isArray() == TreeUtil.getBoolean(def, JCR_MULTIPLE)) {
- return def;
+ fuzzyMatch = def;
+ break;
}
}
}
}
- return null;
+ return fuzzyMatch;
}
private Tree findMatchingChildNodeDefinition(
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1020_83427028.diff |
bugs-dot-jar_data_OAK-429_c02ecef8 | ---
BugID: OAK-429
Summary: MemoryPropertyBuilder.assignFrom leads to ClassCastException on getPropertyState
with date properties
Description:
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryPropertyBuilder.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryPropertyBuilder.java
index 2e9ac56..286b453 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryPropertyBuilder.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryPropertyBuilder.java
@@ -223,11 +223,21 @@ public class MemoryPropertyBuilder<T> implements PropertyBuilder<T> {
setName(property.getName());
if (property.isArray()) {
isArray = true;
- setValues((Iterable<T>) property.getValue(type.getArrayType()));
+ if (type == Type.DATE) {
+ setValues((Iterable<T>) property.getValue(Type.STRINGS));
+ }
+ else {
+ setValues((Iterable<T>) property.getValue(type.getArrayType()));
+ }
}
else {
isArray = false;
- setValue(property.getValue(type));
+ if (type == Type.DATE) {
+ setValue((T) property.getValue(Type.STRING));
+ }
+ else {
+ setValue(property.getValue(type));
+ }
}
}
return this;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-429_c02ecef8.diff |
bugs-dot-jar_data_OAK-4431_7441a3d5 | ---
BugID: OAK-4431
Summary: Index path property should be considered optional for copy on read logic
Description: "As part of changes done for OAK-4347 logic assumes that indexPath is
always non null. This works fine for fresh setup where the indexPath would have
been set by the initial indexing. However for upgraded setup this assumption would
break as it might happen that index does not get updated with new approach and before
that a read is performed.\n\nCurrently with updated code on upgraded setup following
exception is seen \n\n{noformat}\nCaused by: javax.security.auth.login.LoginException:
java.lang.NullPointerException: Index path property [:indexPath] not found\n at
com.google.common.base.Preconditions.checkNotNull(Preconditions.java:236)\n at
org.apache.jackrabbit.oak.plugins.index.lucene.IndexDefinition.getIndexPathFromConfig(IndexDefinition.java:664)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier.getSharedWorkingSet(IndexCopier.java:242)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier.wrapForRead(IndexCopier.java:140)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode.open(IndexNode.java:53)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker.findIndexNode(IndexTracker.java:179)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker.acquireIndexNode(IndexTracker.java:154)\n
\ at org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.getPlans(LucenePropertyIndex.java:250)\n{noformat}\n\nFor
this specific flow the indexPath can be passed in and not looked up from IndexDefinition"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
index 09914c6..8eac46e 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
@@ -137,13 +137,13 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
public Directory wrapForRead(String indexPath, IndexDefinition definition,
Directory remote) throws IOException {
Directory local = createLocalDirForIndexReader(indexPath, definition);
- return new CopyOnReadDirectory(remote, local, prefetchEnabled, indexPath, getSharedWorkingSet(definition));
+ return new CopyOnReadDirectory(remote, local, prefetchEnabled, indexPath, getSharedWorkingSet(indexPath));
}
public Directory wrapForWrite(IndexDefinition definition, Directory remote, boolean reindexMode) throws IOException {
Directory local = createLocalDirForIndexWriter(definition);
return new CopyOnWriteDirectory(remote, local, reindexMode,
- getIndexPathForLogging(definition), getSharedWorkingSet(definition));
+ getIndexPathForLogging(definition), getSharedWorkingSet(definition.getIndexPathFromConfig()));
}
@Override
@@ -238,9 +238,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
* @param defn index definition for which the directory is being created
* @return a set to maintain the state of new files being created by the COW Directory
*/
- private Set<String> getSharedWorkingSet(IndexDefinition defn){
- String indexPath = defn.getIndexPathFromConfig();
-
+ private Set<String> getSharedWorkingSet(String indexPath){
Set<String> sharedSet;
synchronized (sharedWorkingSetMap){
sharedSet = sharedWorkingSetMap.get(indexPath);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4431_7441a3d5.diff |
bugs-dot-jar_data_OAK-3110_d10362c0 | ---
BugID: OAK-3110
Summary: AsyncIndexer fails due to FileNotFoundException thrown by CopyOnWrite logic
Description: "At times the CopyOnWrite reports following exception\n\n{noformat}\n15.07.2015
14:20:35.930 *WARN* [pool-58-thread-1] org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate
The async index update failed\norg.apache.jackrabbit.oak.api.CommitFailedException:
OakLucene0004: Failed to close the Lucene index\n\tat org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditor.leave(LuceneIndexEditor.java:204)\n\tat
org.apache.jackrabbit.oak.plugins.index.IndexUpdate.leave(IndexUpdate.java:219)\n\tat
org.apache.jackrabbit.oak.spi.commit.VisibleEditor.leave(VisibleEditor.java:63)\n\tat
org.apache.jackrabbit.oak.spi.commit.EditorDiff.process(EditorDiff.java:56)\n\tat
org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate.updateIndex(AsyncIndexUpdate.java:366)\n\tat
org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate.run(AsyncIndexUpdate.java:311)\n\tat
org.apache.sling.commons.scheduler.impl.QuartzJobExecutor.execute(QuartzJobExecutor.java:105)\n\tat
org.quartz.core.JobRunShell.run(JobRunShell.java:207)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat
java.lang.Thread.run(Thread.java:745)\nCaused by: java.io.FileNotFoundException:
_2s7.fdt\n\tat org.apache.lucene.store.FSDirectory.fileLength(FSDirectory.java:261)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier$CopyOnWriteDirectory$COWLocalFileReference.fileLength(IndexCopier.java:837)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier$CopyOnWriteDirectory.fileLength(IndexCopier.java:607)\n\tat
org.apache.lucene.index.SegmentCommitInfo.sizeInBytes(SegmentCommitInfo.java:141)\n\tat
org.apache.lucene.index.DocumentsWriterPerThread.sealFlushedSegment(DocumentsWriterPerThread.java:529)\n\tat
org.apache.lucene.index.DocumentsWriterPerThread.flush(DocumentsWriterPerThread.java:502)\n\tat
org.apache.lucene.index.DocumentsWriter.doFlush(DocumentsWriter.java:508)\n\tat
org.apache.lucene.index.DocumentsWriter.flushAllThreads(DocumentsWriter.java:618)\n\tat
org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3147)\n\tat org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3123)\n\tat
org.apache.lucene.index.IndexWriter.closeInternal(IndexWriter.java:988)\n\tat org.apache.lucene.index.IndexWriter.close(IndexWriter.java:932)\n\tat
org.apache.lucene.index.IndexWriter.close(IndexWriter.java:894)\n\tat org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditorContext.closeWriter(LuceneIndexEditorContext.java:192)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditor.leave(LuceneIndexEditor.java:202)\n\t...
10 common frames omitted\n{noformat}"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
index e1e8a53..bea03c6 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexCopier.java
@@ -75,6 +75,7 @@ import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.Iterables.toArray;
import static com.google.common.collect.Iterables.transform;
import static com.google.common.collect.Maps.newConcurrentMap;
+import static com.google.common.collect.Maps.newHashMap;
import static org.apache.jackrabbit.oak.commons.IOUtils.humanReadableByteCount;
public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
@@ -111,6 +112,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
private final Map<String, String> indexPathMapping = newConcurrentMap();
+ private final Map<String, Set<String>> sharedWorkingSetMap = newHashMap();
private final Map<String, String> indexPathVersionMapping = newConcurrentMap();
private final ConcurrentMap<String, LocalIndexFile> failedToDeleteFiles = newConcurrentMap();
private final Set<LocalIndexFile> copyInProgressFiles = Collections.newSetFromMap(new ConcurrentHashMap<LocalIndexFile, Boolean>());
@@ -131,12 +133,13 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
public Directory wrapForRead(String indexPath, IndexDefinition definition,
Directory remote) throws IOException {
Directory local = createLocalDirForIndexReader(indexPath, definition);
- return new CopyOnReadDirectory(remote, local, prefetchEnabled, indexPath);
+ return new CopyOnReadDirectory(remote, local, prefetchEnabled, indexPath, getSharedWorkingSet(definition));
}
public Directory wrapForWrite(IndexDefinition definition, Directory remote, boolean reindexMode) throws IOException {
Directory local = createLocalDirForIndexWriter(definition);
- return new CopyOnWriteDirectory(remote, local, reindexMode, getIndexPathForLogging(definition));
+ return new CopyOnWriteDirectory(remote, local, reindexMode,
+ getIndexPathForLogging(definition), getSharedWorkingSet(definition));
}
@Override
@@ -238,6 +241,34 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
}
/**
+ * Provide the corresponding shared state to enable COW inform COR
+ * about new files it is creating while indexing. This would allow COR to ignore
+ * such files while determining the deletion candidates.
+ *
+ * @param defn index definition for which the directory is being created
+ * @return a set to maintain the state of new files being created by the COW Directory
+ */
+ private Set<String> getSharedWorkingSet(IndexDefinition defn){
+ String indexPath = defn.getIndexPathFromConfig();
+
+ if (indexPath == null){
+ //With indexPath null the working directory would not
+ //be shared between COR and COW. So just return a new set
+ return new HashSet<String>();
+ }
+
+ Set<String> sharedSet;
+ synchronized (sharedWorkingSetMap){
+ sharedSet = sharedWorkingSetMap.get(indexPath);
+ if (sharedSet == null){
+ sharedSet = Sets.newConcurrentHashSet();
+ sharedWorkingSetMap.put(indexPath, sharedSet);
+ }
+ }
+ return sharedSet;
+ }
+
+ /**
* Creates the workDir. If it exists then it is cleaned
*
* @param indexRootDir root directory under which all indexing related files are managed
@@ -274,12 +305,17 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
*/
private final Set<String> localFileNames = Sets.newConcurrentHashSet();
- public CopyOnReadDirectory(Directory remote, Directory local, boolean prefetch, String indexPath) throws IOException {
+ public CopyOnReadDirectory(Directory remote, Directory local, boolean prefetch,
+ String indexPath, Set<String> sharedWorkingSet) throws IOException {
super(remote);
this.remote = remote;
this.local = local;
this.indexPath = indexPath;
+
this.localFileNames.addAll(Arrays.asList(local.listAll()));
+ //Remove files which are being worked upon by COW
+ this.localFileNames.removeAll(sharedWorkingSet);
+
if (prefetch) {
prefetchIndexFiles();
}
@@ -549,6 +585,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
private final CountDownLatch copyDone = new CountDownLatch(1);
private final boolean reindexMode;
private final String indexPathForLogging;
+ private final Set<String> sharedWorkingSet;
/**
* Current background task
@@ -602,12 +639,13 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
};
public CopyOnWriteDirectory(Directory remote, Directory local, boolean reindexMode,
- String indexPathForLogging) throws IOException {
+ String indexPathForLogging, Set<String> sharedWorkingSet) throws IOException {
super(local);
this.remote = remote;
this.local = local;
this.indexPathForLogging = indexPathForLogging;
this.reindexMode = reindexMode;
+ this.sharedWorkingSet = sharedWorkingSet;
initialize();
}
@@ -647,6 +685,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
}
ref = new COWLocalFileReference(name);
fileMap.put(name, ref);
+ sharedWorkingSet.add(name);
return ref.createOutput(context);
}
@@ -723,6 +762,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
local.close();
remote.close();
+ sharedWorkingSet.clear();
}
@Override
@@ -994,7 +1034,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
} catch (IOException e) {
failedToDelete(file);
log.debug("Error occurred while removing deleted file {} from Local {}. " +
- "Attempt would be maid to delete it on next run ", fileName, dir, e);
+ "Attempt would be made to delete it on next run ", fileName, dir, e);
}
return successFullyDeleted;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3110_d10362c0.diff |
bugs-dot-jar_data_OAK-3105_311e8b33 | ---
BugID: OAK-3105
Summary: SegmentWriter doesn't properly check the length of external blob IDs
Description: "To store the length field of an external binary ID, the following encoding
is used:\n\n{noformat}\n1110 + 4bit + 8bit\n{noformat}\n\nwhich allows to store
numbers between 0 and 2{^}12^ - 1. \n\nThe current implementation of {{SegmentWriter}}
allows the length of binary IDs to range between 0 and 2{^}13^ - 1, writing incorrect
data when the length of the binary ID ranges from 2{^}12^ to 2{^}13^ - 1.\n\nWhen
reading this incorrect data back, an {{IllegalStateException}} is thrown complaining
that the first byte of the length fields has an unexpected value record type. See
OAK-1842 for an example."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
index abab3d4..ab762f7 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
@@ -595,7 +595,12 @@ public class SegmentWriter {
byte[] data = reference.getBytes(Charsets.UTF_8);
int length = data.length;
- checkArgument(length < 8192);
+ // When writing a binary ID, the four most significant bits of the
+ // length field should be "1110", leaving 12 other bits to store the
+ // length itself. This means that the values of the length field can
+ // only range between 0 and 2^12 - 1.
+
+ checkArgument(length < 4096);
RecordId id = prepare(RecordType.VALUE, 2 + length);
int len = length | 0xE000;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3105_311e8b33.diff |
bugs-dot-jar_data_OAK-926_e1ae968c | ---
BugID: OAK-926
Summary: 'MongoMK: split documents when they are too large'
Description: "Currently, the MongoMK stores all revisions of a node in the same document.
Once there are many revisions, the document gets very large.\n\nThe plan is to split
the document when it gets big.\n\nIt looks like this isn't just a \"nice to have\",
but also a problem for some use cases. Example stack trace:\n\n{code}\n21.07.2013
12:35:47.554 *ERROR* ...\nCaused by: java.lang.IllegalArgumentException: 'ok' should
never be null...\n\tat com.mongodb.CommandResult.ok(CommandResult.java:48)\n\tat
com.mongodb.DBCollection.findAndModify(DBCollection.java:375)\n\tat org.apache.jackrabbit.oak.plugins.mongomk.MongoDocumentStore.findAndModify(MongoDocumentStore.java:302)\n\t...
32 more\n{code}\n\nat the same time in the MongoDB log:\n\n{code}\nSun Jul 21 12:35:47.334
[conn7] warning: log line attempted (159k) over max size(10k), \nprinting beginning
and end ... \nAssertion: 10334:BSONObj size: 16795219 (0x53460001) is invalid. \nSize
must be between 0 and 16793600(16MB) \nFirst element: :childOrder: { r1400279f22d-0-1:
\"[]\", ...\n{code}\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
index 9b6835f..dfe473d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
@@ -118,6 +118,11 @@ public class NodeDocument extends Document {
final DocumentStore store;
+ /**
+ * Parsed and sorted set of previous revisions.
+ */
+ private SortedMap<Revision, Range> previous;
+
private final long time = System.currentTimeMillis();
NodeDocument(@Nonnull DocumentStore store) {
@@ -151,19 +156,23 @@ public class NodeDocument extends Document {
/**
* Returns <code>true</code> if the given <code>revision</code> is marked
- * committed in <strong>this</strong> document including previous documents.
+ * committed.
*
* @param revision the revision.
* @return <code>true</code> if committed; <code>false</code> otherwise.
*/
public boolean isCommitted(@Nonnull Revision revision) {
+ NodeDocument commitRootDoc = getCommitRoot(checkNotNull(revision));
+ if (commitRootDoc == null) {
+ return false;
+ }
String rev = checkNotNull(revision).toString();
- String value = getLocalRevisions().get(rev);
+ String value = commitRootDoc.getLocalRevisions().get(rev);
if (value != null) {
return Utils.isCommitted(value);
}
// check previous docs
- for (NodeDocument prev : getPreviousDocs(revision, REVISIONS)) {
+ for (NodeDocument prev : commitRootDoc.getPreviousDocs(revision, REVISIONS)) {
if (prev.containsRevision(revision)) {
return prev.isCommitted(revision);
}
@@ -659,20 +668,26 @@ public class NodeDocument extends Document {
main.removeMapEntry(property, r);
old.setMapEntry(property, r, entry.getValue());
}
- splitOps.add(old);
- splitOps.add(main);
}
+ splitOps.add(old);
+ splitOps.add(main);
}
return splitOps;
}
- @Override
+ /**
+ * Returns previous revision ranges for this document. The revision keys are
+ * sorted descending, newest first!
+ *
+ * @return the previous ranges for this document.
+ */
@Nonnull
- protected Map<?, ?> transformAndSeal(@Nonnull Map<Object, Object> map,
- @Nullable String key,
- int level) {
- if (level == 1) {
- if (PREVIOUS.equals(key)) {
+ SortedMap<Revision, Range> getPreviousRanges() {
+ if (previous == null) {
+ Map<String, String> map = getLocalMap(PREVIOUS);
+ if (map.isEmpty()) {
+ previous = EMPTY_RANGE_MAP;
+ } else {
SortedMap<Revision, Range> transformed = new TreeMap<Revision, Range>(
new Comparator<Revision>() {
@Override
@@ -687,30 +702,14 @@ public class NodeDocument extends Document {
return c;
}
});
- for (Map.Entry<Object, Object> entry : map.entrySet()) {
- Revision high = Revision.fromString(entry.getKey().toString());
- Revision low = Revision.fromString(entry.getValue().toString());
+ for (Map.Entry<String, String> entry : map.entrySet()) {
+ Revision high = Revision.fromString(entry.getKey());
+ Revision low = Revision.fromString(entry.getValue());
transformed.put(high, new Range(high, low));
}
- return Collections.unmodifiableSortedMap(transformed);
+ previous = Collections.unmodifiableSortedMap(transformed);
}
}
- return super.transformAndSeal(map, key, level);
- }
-
- /**
- * Returns previous revision ranges for this document. The revision keys are
- * sorted descending, newest first!
- *
- * @return the previous ranges for this document.
- */
- @Nonnull
- SortedMap<Revision, Range> getPreviousRanges() {
- @SuppressWarnings("unchecked")
- SortedMap<Revision, Range> previous = (SortedMap<Revision, Range>) get(PREVIOUS);
- if (previous == null) {
- previous = EMPTY_RANGE_MAP;
- }
return previous;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-926_e1ae968c.diff |
bugs-dot-jar_data_OAK-395_e6c31270 | ---
BugID: OAK-395
Summary: Inconsistency in Node#setProperty in case of null value
Description: "Setting a null value to a single valued property will result\nin 'null'
being returned while executing the same on a multivalued\nproperty will return the
removed property.\n\njr2 returned the removed property in both cases as far as i
\nremember and i would suggest that we don't change that behavior. in\nparticular
since the specification IMO doesn't allow to return\nnull-values for these methods."
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
index 2771c28..a3d9da9 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/NodeImpl.java
@@ -1450,13 +1450,17 @@ public class NodeImpl extends ItemImpl<NodeDelegate> implements Node {
return sessionDelegate.perform(new SessionOperation<Property>() {
@Override
public Property perform() throws RepositoryException {
+ String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
if (value == null) {
- Property property = getProperty(jcrName);
- property.remove();
- return property;
+ if (hasProperty(jcrName)) {
+ Property property = getProperty(jcrName);
+ property.remove();
+ return property;
+ } else {
+ return new PropertyImpl(new PropertyDelegate(
+ sessionDelegate, dlg.getLocation().getChild(oakName)));
+ }
} else {
- String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
-
PropertyDefinition definition;
if (hasProperty(jcrName)) {
definition = getProperty(jcrName).getDefinition();
@@ -1485,13 +1489,17 @@ public class NodeImpl extends ItemImpl<NodeDelegate> implements Node {
return sessionDelegate.perform(new SessionOperation<Property>() {
@Override
public Property perform() throws RepositoryException {
+ String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
if (values == null) {
- Property p = getProperty(jcrName);
- p.remove();
- return p;
+ if (hasProperty(jcrName)) {
+ Property property = getProperty(jcrName);
+ property.remove();
+ return property;
+ } else {
+ return new PropertyImpl(new PropertyDelegate(
+ sessionDelegate, dlg.getLocation().getChild(oakName)));
+ }
} else {
- String oakName = sessionDelegate.getOakPathOrThrow(jcrName);
-
PropertyDefinition definition;
if (hasProperty(jcrName)) {
definition = getProperty(jcrName).getDefinition();
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-395_e6c31270.diff |
bugs-dot-jar_data_OAK-2359_b3071839 | ---
BugID: OAK-2359
Summary: read is inefficient when there are many split documents
Description: "As reported in OAK-2358 there is a potential problem with revisionGC
not cleaning up split documents properly (in 1.0.8.r1644758 at least). \n\nAs a
side-effect, having many garbage-revisions renders the diffImpl algorithm to become
very slow - normally it would take only a few millis, but with nodes that have many
split-documents I can see diffImpl take hundres of millis, sometimes up to a few
seconds. Which causes the observation dequeuing to be slower than the rate in which
observation events are enqueued, which results in observation queue never being
cleaned up and event handling being delayed more and more.\n\nAdding some logging
showed that diffImpl would often read many split-documents, which supports the assumption
that the revisionGC not cleaning up revisions has the diffImpl-slowness as a side-effect.
Having said that - diffImpl should probably still be able to run fast, since all
the revisions it should look at should be in the main document, not in split documents.\n\nI
dont have a test case handy for this at the moment unfortunately - if more is coming
up, I'll add more details here."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 2f23838..8000126 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -798,7 +798,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
min, readRevision, validRevisions, lastRevs);
// check if there may be more recent values in a previous document
- if (value != null && !getPreviousRanges().isEmpty()) {
+ if (!getPreviousRanges().isEmpty()) {
Revision newest = getLocalMap(key).firstKey();
if (isRevisionNewer(nodeStore, newest, value.revision)) {
// not reading the most recent value, we may need to
@@ -894,13 +894,13 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
// check local deleted map first
Value value = getLatestValue(context, getLocalDeleted(),
null, maxRev, validRevisions, lastRevs);
- if (value == null && !getPreviousRanges().isEmpty()) {
+ if (value.value == null && !getPreviousRanges().isEmpty()) {
// need to check complete map
value = getLatestValue(context, getDeleted(),
null, maxRev, validRevisions, lastRevs);
}
- return value != null && "false".equals(value.value) ? value.revision : null;
+ return "false".equals(value.value) ? value.revision : null;
}
/**
@@ -1435,10 +1435,12 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
/**
* Get the latest property value that is larger or equal the min revision,
- * and smaller or equal the readRevision revision. A {@code null} return
- * value indicates that the property was not set or removed within the given
- * range. A non-null value means the the property was either set or removed
- * depending on {@link Value#value}.
+ * and smaller or equal the readRevision revision. The returned value will
+ * provide the revision when the value was set between the {@code min} and
+ * {@code readRevision}. The returned value will have a {@code null} value
+ * contained if there is no valid change within the given range. In this
+ * case the associated revision is {@code min} or {@code readRevision} if
+ * no {@code min} is provided.
*
* @param valueMap the sorted revision-value map
* @param min the minimum revision (null meaning unlimited)
@@ -1446,9 +1448,9 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
* @param validRevisions map of revision to commit value considered valid
* against the given readRevision.
* @param lastRevs to keep track of the most recent modification.
- * @return the value, or null if not found
+ * @return the latest value from the {@code readRevision} point of view.
*/
- @CheckForNull
+ @Nonnull
private Value getLatestValue(@Nonnull RevisionContext context,
@Nonnull Map<Revision, String> valueMap,
@Nullable Revision min,
@@ -1486,7 +1488,9 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
return new Value(commitRev, entry.getValue());
}
}
- return null;
+
+ Revision r = min != null ? min : readRevision;
+ return new Value(r, null);
}
@Override
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
index 94d21d3..7b1bec6 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ValueMap.java
@@ -120,27 +120,34 @@ class ValueMap {
@Override
public String get(Object key) {
+ Revision r = (Revision) key;
// first check values map of this document
- String value = map.get(key);
- if (value != null) {
- return value;
+ if (map.containsKey(r)) {
+ return map.get(r);
}
- Revision r = (Revision) key;
for (NodeDocument prev : doc.getPreviousDocs(property, r)) {
- value = prev.getValueMap(property).get(key);
+ String value = prev.getValueMap(property).get(r);
if (value != null) {
return value;
}
}
- // not found
+ // not found or null
return null;
}
@Override
public boolean containsKey(Object key) {
- // can use get()
- // the values map does not have null values
- return get(key) != null;
+ // check local map first
+ if (map.containsKey(key)) {
+ return true;
+ }
+ Revision r = (Revision) key;
+ for (NodeDocument prev : doc.getPreviousDocs(property, r)) {
+ if (prev.getValueMap(property).containsKey(key)) {
+ return true;
+ }
+ }
+ return false;
}
};
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2359_b3071839.diff |
bugs-dot-jar_data_OAK-2427_e6d4f9a6 | ---
BugID: OAK-2427
Summary: XPath to SQL-2 conversion fails due to escaping error
Description: "The problem is that the comment is not properly escaped (a C-style comment),
so that \"*/\" in the XPath query accidentally ends the comment in the SQL-2 query.\n\nThe
following query can't be converted to SQL-2, because it contains \"*/\":\n\n{noformat}\n/jcr:root/etc//*[@type
= 'product' \nand ((@size = 'M' or */@size= 'M' or */*/@size = 'M' \nor */*/*/@size
= 'M' or */*/*/*/@size = 'M' or */*/*/*/*/@size = 'M'))]\n{noformat}\n\nI think
this was introduced by OAK-2354 \nhttp://svn.apache.org/viewvc?view=revision&revision=1645616\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
index 6e614b5..66ccb04 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
@@ -179,16 +179,8 @@ public class Statement {
buff.append(orderList.get(i));
}
}
-
// leave original xpath string as a comment
- if (xpathQuery != null) {
- buff.append(" /* xpath: ");
- // the xpath query may not contain the "end comment" marker
- String xpathEscaped = xpathQuery.replaceAll("\\*\\/", "* /");
- buff.append(xpathEscaped);
- buff.append(" */");
- }
-
+ appendXPathAsComment(buff, xpathQuery);
return buff.toString();
}
@@ -251,14 +243,21 @@ public class Statement {
}
}
// leave original xpath string as a comment
- if (xpathQuery != null) {
- buff.append(" /* xpath: ");
- buff.append(xpathQuery);
- buff.append(" */");
- }
+ appendXPathAsComment(buff, xpathQuery);
return buff.toString();
}
}
+
+ private static void appendXPathAsComment(StringBuilder buff, String xpath) {
+ if (xpath == null) {
+ return;
+ }
+ buff.append(" /* xpath: ");
+ // the xpath query may contain the "end comment" marker
+ String xpathEscaped = xpath.replaceAll("\\*\\/", "* /");
+ buff.append(xpathEscaped);
+ buff.append(" */");
+ }
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2427_e6d4f9a6.diff |
bugs-dot-jar_data_OAK-3249_64712735 | ---
BugID: OAK-3249
Summary: Some version copy settings conflicts with the earlyShutdown
Description: |-
The {{RepositoryUpgrade#earlyShutdown}} property causes the source CRX2 repository to shutdown right after copying the content, before the first commit hook is launched. However, the {{VersionableEditor}} hook sometimes needs access to the source repository, to read the version histories that hasn't been copied yet (as the version histories are copied in two stages). As a result, the {{earlyShutdown}} may cause the upgrade process to fail.
{{earlyShutdown}} should be overriden for all cases in which the source repository is still needed in the commit hook phase. In particular, it should be set to {{false}} if:
* orphaned version histories are not copied,
* orphaned version histories are copied, but the copyOrphanedVersion date is set after the copyVersion date.
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
index f4684cc..93b5133 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
@@ -462,7 +462,7 @@ public class RepositoryUpgrade {
createIndexEditorProvider()
)));
- target.merge(builder, new LoggingCompositeHook(hooks, source, earlyShutdown), CommitInfo.EMPTY);
+ target.merge(builder, new LoggingCompositeHook(hooks, source, overrideEarlyShutdown()), CommitInfo.EMPTY);
logger.info("Processing commit hooks completed in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
logger.debug("Repository upgrade completed.");
} catch (Exception e) {
@@ -470,6 +470,24 @@ public class RepositoryUpgrade {
}
}
+ private boolean overrideEarlyShutdown() {
+ if (earlyShutdown == false) {
+ return false;
+ }
+
+ final VersionCopyConfiguration c = this.versionCopyConfiguration;
+ if (c.isCopyVersions() && c.skipOrphanedVersionsCopy()) {
+ logger.info("Overriding early shutdown to false because of the copy versions settings");
+ return false;
+ }
+ if (c.isCopyVersions() && !c.skipOrphanedVersionsCopy()
+ && c.getOrphanedMinDate().after(c.getVersionsMinDate())) {
+ logger.info("Overriding early shutdown to false because of the copy versions settings");
+ return false;
+ }
+ return true;
+ }
+
private static EditorProvider createTypeEditorProvider() {
return new EditorProvider() {
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3249_64712735.diff |
bugs-dot-jar_data_OAK-478_a7f0e808 | ---
BugID: OAK-478
Summary: NPE in the TypeValidator when using the Lucene Index
Description:
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/commit/ValidatingHook.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/commit/ValidatingHook.java
index b3d10b3..7840eca 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/commit/ValidatingHook.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/commit/ValidatingHook.java
@@ -151,7 +151,19 @@ public class ValidatingHook implements CommitHook {
@Override
public void childNodeAdded(String name, NodeState after) {
- childNodeChanged(name, EMPTY_NODE, after);
+ if (NodeStateUtils.isHidden(name)) {
+ return;
+ }
+ if (exception == null) {
+ try {
+ Validator v = validator.childNodeAdded(name, after);
+ if (v != null) {
+ validate(v, EMPTY_NODE, after);
+ }
+ } catch (CommitFailedException e) {
+ exception = e;
+ }
+ }
}
@Override
@@ -175,7 +187,19 @@ public class ValidatingHook implements CommitHook {
@Override
public void childNodeDeleted(String name, NodeState before) {
- childNodeChanged(name, before, EMPTY_NODE);
+ if (NodeStateUtils.isHidden(name)) {
+ return;
+ }
+ if (exception == null) {
+ try {
+ Validator v = validator.childNodeDeleted(name, before);
+ if (v != null) {
+ validate(v, before, EMPTY_NODE);
+ }
+ } catch (CommitFailedException e) {
+ exception = e;
+ }
+ }
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-478_a7f0e808.diff |
bugs-dot-jar_data_OAK-4432_c9765c21 | ---
BugID: OAK-4432
Summary: 'Ignore files in the root directory of the FileDataStore in #getAllIdentifiers'
Description: "The call to OakFileDataStore#getAllIdentifiers should ignore the the
files directly at the root of the DataStore (These files are used for SharedDataStore
etc). This does not cause any functional problems but leads to logging warning in
the logs. \nThere is already a check but it fails when the data store root is specified
as a relative path."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java
index a55d100..5e86881 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/blob/datastore/OakFileDataStore.java
@@ -38,7 +38,6 @@ import com.google.common.io.BaseEncoding;
import com.google.common.io.Closeables;
import com.google.common.io.Files;
-import org.apache.commons.io.FilenameUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.filefilter.FileFilterUtils;
import org.apache.jackrabbit.core.data.DataIdentifier;
@@ -50,6 +49,8 @@ import org.apache.jackrabbit.oak.plugins.blob.SharedDataStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.commons.io.FilenameUtils.normalizeNoEndSeparator;
+
/**
* Oak specific extension of JR2 FileDataStore which enables
* provisioning the signing key via OSGi config
@@ -67,12 +68,13 @@ public class OakFileDataStore extends FileDataStore implements SharedDataStore {
@Override
public Iterator<DataIdentifier> getAllIdentifiers() {
- final String path = FilenameUtils.normalizeNoEndSeparator(getPath());
+ final String path = normalizeNoEndSeparator(getPath());
return Files.fileTreeTraverser().postOrderTraversal(new File(getPath()))
.filter(new Predicate<File>() {
@Override
public boolean apply(File input) {
- return input.isFile() && !input.getParent().equals(path);
+ return input.isFile() &&
+ !normalizeNoEndSeparator(input.getParent()).equals(path);
}
})
.transform(new Function<File, DataIdentifier>() {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4432_c9765c21.diff |
bugs-dot-jar_data_OAK-1916_705ce1d1 | ---
BugID: OAK-1916
Summary: NodeStoreKernel doesn't handle array properties correctly
Description: "{{NodeStoreKernel}} currently only supports array properties of type
long. For other types it will fail with an {{IllegalStateException}}. See also the
FIXME in the code."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
index fe5221b..9eb6cd9 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeState.java
@@ -116,10 +116,14 @@ public class SegmentNodeState extends Record implements NodeState {
public PropertyState getProperty(String name) {
checkNotNull(name);
Template template = getTemplate();
+ PropertyState property = null;
if (JCR_PRIMARYTYPE.equals(name)) {
- return template.getPrimaryType();
+ property = template.getPrimaryType();
} else if (JCR_MIXINTYPES.equals(name)) {
- return template.getMixinTypes();
+ property = template.getMixinTypes();
+ }
+ if (property != null) {
+ return property;
}
PropertyTemplate propertyTemplate =
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1916_705ce1d1.diff |
bugs-dot-jar_data_OAK-3817_2a02a138 | ---
BugID: OAK-3817
Summary: Hidden properties (one prefixed with ':') in lucene's analyzer configuration
fail to construct analyzers
Description: This is similar to OAK-2524 in the sense that lucene doesn't like extra
arguments sent its way while constructing analyzers. In some cases (like node move
adds {{:source-path}}) we have hidden properties added to index definition nodes
and they get passed along to lucene analyzer factories which complaint and fail.
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
index 589fc63..a02983f 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
@@ -41,6 +41,7 @@ import org.apache.jackrabbit.oak.plugins.index.lucene.util.ConfigUtil;
import org.apache.jackrabbit.oak.plugins.index.lucene.util.TokenizerChain;
import org.apache.jackrabbit.oak.plugins.tree.TreeFactory;
import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
import org.apache.lucene.analysis.util.CharArraySet;
@@ -201,6 +202,7 @@ final class NodeStateAnalyzerFactory{
String name = ps.getName();
if (ps.getType() != Type.BINARY
&& !ps.isArray()
+ && !(name != null && NodeStateUtils.isHidden(name))
&& !IGNORE_PROP_NAMES.contains(name)) {
result.put(name, ps.getValue(Type.STRING));
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3817_2a02a138.diff |
bugs-dot-jar_data_OAK-1308_69ba2a54 | ---
BugID: OAK-1308
Summary: XPath queries with ISO9075 escaped properties don't work
Description: "XPath queries with ISO9075 escaped properties or relative path don't
work as expected. Example: \n\n{code}\n/jcr:root//*/element(*,rep:User)[_x002e_tokens/@jcr:primaryType]\n{code}\n\nThe
relative property should be converted to \".tokens/@jcr:primaryType\", but is not.\n\nThis
issue is similar to OAK-1000, but for property names or relative properties."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/XPathToSQL2Converter.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/XPathToSQL2Converter.java
index 91a257b..3cf0717 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/XPathToSQL2Converter.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/XPathToSQL2Converter.java
@@ -516,7 +516,7 @@ public class XPathToSQL2Converter {
boolean isPath = false;
while (true) {
if (currentTokenType == IDENTIFIER) {
- String name = readIdentifier();
+ String name = readPathSegment();
buff.append(name);
} else if (readIf("*")) {
// any node
@@ -533,7 +533,7 @@ public class XPathToSQL2Converter {
// xpath supports @*, even thought jackrabbit may not
buff.append('*');
} else {
- buff.append(readIdentifier());
+ buff.append(readPathSegment());
}
return new Expression.Property(currentSelector, buff.toString());
} else {
@@ -649,7 +649,7 @@ public class XPathToSQL2Converter {
if (readIf("*")) {
return new Expression.Property(currentSelector, "*");
}
- return new Expression.Property(currentSelector, readIdentifier());
+ return new Expression.Property(currentSelector, readPathSegment());
}
private void readExcerpt() throws ParseException {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1308_69ba2a54.diff |
bugs-dot-jar_data_OAK-1926_9225a3e2 | ---
BugID: OAK-1926
Summary: UnmergedBranch state growing with empty BranchCommit leading to performance
degradation
Description: "In some cluster deployment cases it has been seen that in memory state
of UnmergedBranches contains large number of empty commits. For e.g. in one of
of the runs there were 750 entries in the UnmergedBranches and each Branch had empty
branch commits.\n\nIf there are large number of UnmergedBranches then read performance
would degrade as for determining revision validity currently logic scans all branches\n\nBelow
is some part of UnmergedBranch state\n\n{noformat}\nBranch 1\n1 -> br146d2edb7a7-0-1
(true) (revision: \"br146d2edb7a7-0-1\", clusterId: 1, time: \"2014-06-25 05:08:52.903\",
branch: true)\n2 -> br146d2f0450b-0-1 (true) (revision: \"br146d2f0450b-0-1\", clusterId:
1, time: \"2014-06-25 05:11:40.171\", branch: true)\nBranch 2\n1 -> br146d2ef1d08-0-1
(true) (revision: \"br146d2ef1d08-0-1\", clusterId: 1, time: \"2014-06-25 05:10:24.392\",
branch: true)\nBranch 3\n1 -> br146d2ed26ca-0-1 (true) (revision: \"br146d2ed26ca-0-1\",
clusterId: 1, time: \"2014-06-25 05:08:15.818\", branch: true)\n2 -> br146d2edfd0e-0-1
(true) (revision: \"br146d2edfd0e-0-1\", clusterId: 1, time: \"2014-06-25 05:09:10.670\",
branch: true)\nBranch 4\n1 -> br146d2ecd85b-0-1 (true) (revision: \"br146d2ecd85b-0-1\",
clusterId: 1, time: \"2014-06-25 05:07:55.739\", branch: true)\nBranch 5\n1 -> br146d2ec21a0-0-1
(true) (revision: \"br146d2ec21a0-0-1\", clusterId: 1, time: \"2014-06-25 05:07:08.960\",
branch: true)\n2 -> br146d2ec8eca-0-1 (true) (revision: \"br146d2ec8eca-0-1\", clusterId:
1, time: \"2014-06-25 05:07:36.906\", branch: true)\nBranch 6\n1 -> br146d2eaf159-1-1
(true) (revision: \"br146d2eaf159-1-1\", clusterId: 1, time: \"2014-06-25 05:05:51.065\",
counter: 1, branch: true)\nBranch 7\n1 -> br146d2e9a513-0-1 (true) (revision: \"br146d2e9a513-0-1\",
clusterId: 1, time: \"2014-06-25 05:04:26.003\", branch: true)\n{noformat}\n\n[~mreutegg]
Suggested that these branch might be for those revision which have resulted in a
collision and upon checking it indeed appears to be the case (value true in brackets
above indicate that). Further given the age of such revision it looks like they
get populated upon startup itself\n\n*Fix*\n* Need to check why we need to populate
the UnermgedBranch\n* Possibly implement some purge job which would remove such
stale entries \n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 92e0b0a..9e721a0 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -556,30 +556,40 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
}
/**
- * Gets a sorted map of uncommitted revisions of this document with the
+ * Purge the uncommitted revisions of this document with the
* local cluster node id as returned by the {@link RevisionContext}. These
* are the {@link #REVISIONS} entries where {@link Utils#isCommitted(String)}
* returns false.
*
+ * <p>
+ * <bold>Note</bold> - This method should only be invoked upon startup
+ * as then only we can safely assume that these revisions would not be
+ * committed
+ * </p>
+ *
* @param context the revision context.
- * @return the uncommitted revisions of this document.
+ * @return count of the revision entries purged
*/
- public SortedMap<Revision, Revision> getUncommittedRevisions(RevisionContext context) {
+ public int purgeUncommittedRevisions(RevisionContext context) {
// only look at revisions in this document.
// uncommitted revisions are not split off
Map<Revision, String> valueMap = getLocalRevisions();
- SortedMap<Revision, Revision> revisions =
- new TreeMap<Revision, Revision>(context.getRevisionComparator());
+ UpdateOp op = new UpdateOp(getId(), false);
+ int purgeCount = 0;
for (Map.Entry<Revision, String> commit : valueMap.entrySet()) {
if (!Utils.isCommitted(commit.getValue())) {
Revision r = commit.getKey();
if (r.getClusterId() == context.getClusterId()) {
- Revision b = Revision.fromString(commit.getValue());
- revisions.put(r, b);
+ purgeCount++;
+ op.removeMapEntry(REVISIONS, r);
}
}
}
- return revisions;
+
+ if (op.hasChanges()) {
+ store.findAndUpdate(Collection.NODES, op);
+ }
+ return purgeCount;
}
/**
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UnmergedBranches.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UnmergedBranches.java
index b39b8df..9573cb7 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UnmergedBranches.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UnmergedBranches.java
@@ -21,7 +21,6 @@ import static com.google.common.base.Preconditions.checkNotNull;
import java.util.Comparator;
import java.util.List;
-import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -31,6 +30,8 @@ import javax.annotation.CheckForNull;
import javax.annotation.Nonnull;
import org.apache.jackrabbit.oak.plugins.document.util.Utils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* <code>UnmergedBranches</code> contains all un-merged branches of a DocumentMK
@@ -38,6 +39,8 @@ import org.apache.jackrabbit.oak.plugins.document.util.Utils;
*/
class UnmergedBranches {
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
/**
* Map of branches with the head of the branch as key.
*/
@@ -72,17 +75,9 @@ class UnmergedBranches {
if (doc == null) {
return;
}
- SortedMap<Revision, Revision> revisions = doc.getUncommittedRevisions(context);
- while (!revisions.isEmpty()) {
- SortedSet<Revision> commits = new TreeSet<Revision>(comparator);
- Revision head = revisions.lastKey();
- commits.add(head);
- Revision base = revisions.remove(head).asTrunkRevision();
- while (revisions.containsKey(base)) {
- commits.add(base);
- base = revisions.remove(base).asTrunkRevision();
- }
- branches.add(new Branch(commits, base));
+ int purgeCount = doc.purgeUncommittedRevisions(context);
+ if (purgeCount > 0) {
+ log.info("Purged [{}] uncommitted branch revision entries", purgeCount);
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1926_9225a3e2.diff |
bugs-dot-jar_data_OAK-1432_808ac9c0 | ---
BugID: OAK-1432
Summary: 'Query: use "union" for complex XPath queries that use multiple "or"'
Description: "The following XPath query is converted to a union, however there is
still an \"or\" in the converted query, which means the query still can't use all
indexes and has to traverse the whole repository:\n\n{noformat}\n/jcr:root/a/b//element(*,
nt:unstructured)[(\n(@sling:resourceType = 'x' \nor @sling:resourceType = 'dam/collection')
\nor @sling:resourceSuperType = 'dam/collection')] \n{noformat}"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Expression.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Expression.java
index 5c37162..5f79f71 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Expression.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Expression.java
@@ -42,7 +42,7 @@ abstract class Expression {
} else if (add == null) {
return old;
}
- return new Expression.Condition(old, "and", add, Expression.PRECEDENCE_AND);
+ return new Expression.AndCondition(old, add);
}
/**
@@ -55,6 +55,15 @@ abstract class Expression {
}
/**
+ * Pull an OR condition up to the right hand side of an AND condition.
+ *
+ * @return the (possibly rotated) expression
+ */
+ Expression pullOrRight() {
+ return this;
+ }
+
+ /**
* Get the operator / operation precedence. The JCR specification uses:
* 1=OR, 2=AND, 3=condition, 4=operand
*
@@ -255,6 +264,27 @@ abstract class Expression {
super(left, "and", right, Expression.PRECEDENCE_AND);
}
+ @Override
+ AndCondition pullOrRight() {
+ if (right instanceof OrCondition) {
+ return this;
+ } else if (left instanceof OrCondition) {
+ return new AndCondition(right, left);
+ }
+ if (right instanceof AndCondition) {
+ // pull up x:
+ // a and (b and (x)) -> (a and b) and (x)
+ AndCondition r2 = (AndCondition) right;
+ r2 = r2.pullOrRight();
+ AndCondition l2 = new AndCondition(left, r2.left);
+ l2 = l2.pullOrRight();
+ return new AndCondition(l2, r2.right);
+ } else if (left instanceof AndCondition) {
+ return new AndCondition(right, left).pullOrRight();
+ }
+ return this;
+ }
+
}
/**
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
index 03a2438..0504429 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/xpath/Statement.java
@@ -58,8 +58,34 @@ public class Statement {
if (where == null) {
return this;
}
- if (where instanceof OrCondition) {
- OrCondition or = (OrCondition) where;
+ ArrayList<Expression> unionList = new ArrayList<Expression>();
+ addToUnionList(where, unionList);
+ if (unionList.size() == 1) {
+ return this;
+ }
+ Statement union = null;
+ for (int i = 0; i < unionList.size(); i++) {
+ Expression e = unionList.get(i);
+ Statement s = new Statement();
+ s.columnSelector = columnSelector;
+ s.selectors = selectors;
+ s.columnList = columnList;
+ s.where = e;
+ if (i == unionList.size() - 1) {
+ s.xpathQuery = xpathQuery;
+ }
+ if (union == null) {
+ union = s;
+ } else {
+ union = new UnionStatement(union.optimize(), s.optimize());
+ }
+ }
+ return union;
+ }
+
+ private static void addToUnionList(Expression condition, ArrayList<Expression> unionList) {
+ if (condition instanceof OrCondition) {
+ OrCondition or = (OrCondition) condition;
if (or.getCommonLeftPart() != null) {
// @x = 1 or @x = 2
// is automatically converted to
@@ -72,29 +98,17 @@ public class Statement {
// @x = 1 or @y = 2
// or similar are converted to
// (@x = 1) union (@y = 2)
- Statement s1 = new Statement();
- s1.columnSelector = columnSelector;
- s1.selectors = selectors;
- s1.columnList = columnList;
- s1.where = or.left;
- Statement s2 = new Statement();
- s2.columnSelector = columnSelector;
- s2.selectors = selectors;
- s2.columnList = columnList;
- s2.where = or.right;
- s2.xpathQuery = xpathQuery;
- return new UnionStatement(s1.optimize(), s2.optimize());
+ addToUnionList(or.left, unionList);
+ addToUnionList(or.right, unionList);
+ return;
}
- } else if (where instanceof AndCondition) {
+ } else if (condition instanceof AndCondition) {
// conditions of type
// @a = 1 and (@x = 1 or @y = 2)
// are automatically converted to
// (@a = 1 and @x = 1) union (@a = 1 and @y = 2)
- AndCondition and = (AndCondition) where;
- if (and.left instanceof OrCondition && !(and.right instanceof OrCondition)) {
- // swap left and right
- and = new AndCondition(and.right, and.left);
- }
+ AndCondition and = (AndCondition) condition;
+ and = and.pullOrRight();
if (and.right instanceof OrCondition) {
OrCondition or = (OrCondition) and.right;
if (or.getCommonLeftPart() != null) {
@@ -106,23 +120,13 @@ public class Statement {
// do not optimize "contains"
} else {
// same as above, but with the added "and"
- // TODO avoid code duplication if possible
- Statement s1 = new Statement();
- s1.columnSelector = columnSelector;
- s1.selectors = selectors;
- s1.columnList = columnList;
- s1.where = new AndCondition(and.left, or.left);
- Statement s2 = new Statement();
- s2.columnSelector = columnSelector;
- s2.selectors = selectors;
- s2.columnList = columnList;
- s2.where = new AndCondition(and.left, or.right);
- s2.xpathQuery = xpathQuery;
- return new UnionStatement(s1.optimize(), s2.optimize());
+ addToUnionList(new AndCondition(and.left, or.left), unionList);
+ addToUnionList(new AndCondition(and.left, or.right), unionList);
+ return;
}
}
}
- return this;
+ unionList.add(condition);
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1432_808ac9c0.diff |
bugs-dot-jar_data_OAK-2430_be3a9114 | ---
BugID: OAK-2430
Summary: TARMK Cold Standby size increase due to checkpoints copy
Description: The current sync design gets confused by existing checkpoints and tries
to copy them by value, bypassing the current storage optimization where there are
a lot of references to existing content. this can result in a considerable size
increase on the standby.
diff --git a/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java b/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java
index fc055e2..7b32f51 100644
--- a/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java
+++ b/oak-tarmk-standby/src/main/java/org/apache/jackrabbit/oak/plugins/segment/standby/client/StandbyApplyDiff.java
@@ -26,9 +26,9 @@ import java.io.IOException;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
-import org.apache.jackrabbit.oak.commons.PathUtils;
-import org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState;
+import org.apache.jackrabbit.oak.plugins.segment.RecordId;
import org.apache.jackrabbit.oak.plugins.segment.SegmentBlob;
+import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState;
import org.apache.jackrabbit.oak.plugins.segment.SegmentStore;
import org.apache.jackrabbit.oak.plugins.segment.standby.store.RemoteSegmentLoader;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
@@ -50,22 +50,20 @@ class StandbyApplyDiff implements NodeStateDiff {
private final String path;
+ private final boolean logOnly;
+
public StandbyApplyDiff(NodeBuilder builder, SegmentStore store,
RemoteSegmentLoader loader) {
- this(builder, store, loader, "/");
+ this(builder, store, loader, "/", false);
}
private StandbyApplyDiff(NodeBuilder builder, SegmentStore store,
- RemoteSegmentLoader loader, String path) {
+ RemoteSegmentLoader loader, String path, boolean logOnly) {
this.builder = builder;
this.store = store;
this.loader = loader;
this.path = path;
- if (log.isTraceEnabled()) {
- if (PathUtils.getDepth(path) < 5) {
- log.trace("running diff on {}", path);
- }
- }
+ this.logOnly = logOnly;
}
@Override
@@ -73,7 +71,9 @@ class StandbyApplyDiff implements NodeStateDiff {
if (!loader.isRunning()) {
return false;
}
- builder.setProperty(binaryCheck(after));
+ if (!logOnly) {
+ builder.setProperty(binaryCheck(after));
+ }
return true;
}
@@ -82,7 +82,9 @@ class StandbyApplyDiff implements NodeStateDiff {
if (!loader.isRunning()) {
return false;
}
- builder.setProperty(binaryCheck(after));
+ if (!logOnly) {
+ builder.setProperty(binaryCheck(after));
+ }
return true;
}
@@ -91,7 +93,9 @@ class StandbyApplyDiff implements NodeStateDiff {
if (!loader.isRunning()) {
return false;
}
- builder.removeProperty(before.getName());
+ if (!logOnly) {
+ builder.removeProperty(before.getName());
+ }
return true;
}
@@ -143,13 +147,18 @@ class StandbyApplyDiff implements NodeStateDiff {
if (!loader.isRunning()) {
return false;
}
- NodeBuilder child = EmptyNodeState.EMPTY_NODE.builder();
- boolean success = EmptyNodeState.compareAgainstEmptyState(after,
- new StandbyApplyDiff(child, store, loader, path + name + "/"));
- if (success) {
- builder.setChildNode(name, child.getNodeState());
+
+ if (after instanceof SegmentNodeState) {
+ if (log.isTraceEnabled()) {
+ log.trace("childNodeAdded {}, RO:{}", path + name, logOnly);
+ }
+ if (!logOnly) {
+ RecordId id = ((SegmentNodeState) after).getRecordId();
+ builder.setChildNode(name, new SegmentNodeState(id));
+ }
+ return true;
}
- return success;
+ return false;
}
@Override
@@ -159,8 +168,26 @@ class StandbyApplyDiff implements NodeStateDiff {
return false;
}
- return after.compareAgainstBaseState(before, new StandbyApplyDiff(
- builder.getChildNode(name), store, loader, path + name + "/"));
+ if (after instanceof SegmentNodeState) {
+ RecordId id = ((SegmentNodeState) after).getRecordId();
+
+ if (log.isTraceEnabled()) {
+ // if (PathUtils.getDepth(path) < 5) {
+ RecordId oldId = ((SegmentNodeState) before).getRecordId();
+ log.trace("childNodeChanged {}, {} -> {}, RO:{}", path + name,
+ oldId, id, logOnly);
+ // }
+ }
+ if (!logOnly) {
+ builder.setChildNode(name, new SegmentNodeState(id));
+ }
+
+ // return true;
+ return after.compareAgainstBaseState(before, new StandbyApplyDiff(
+ builder.getChildNode(name), store, loader, path + name
+ + "/", true));
+ }
+ return false;
}
@Override
@@ -168,7 +195,10 @@ class StandbyApplyDiff implements NodeStateDiff {
if (!loader.isRunning()) {
return false;
}
- builder.getChildNode(name).remove();
+ log.trace("childNodeDeleted {}, RO:{}", path + name, logOnly);
+ if (!logOnly) {
+ builder.getChildNode(name).remove();
+ }
return true;
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2430_be3a9114.diff |
bugs-dot-jar_data_OAK-1254_25a70439 | ---
BugID: OAK-1254
Summary: Parallel execution of SimpleSearchTest fails with MongoMK
Description: At some point in the benchmark run one MongoMK instance will fail to
read a node created by another instance. The exception is very similar to *E1* reported
in OAK-1204.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
index ecd48c6..6fee0d6 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
@@ -1135,8 +1135,15 @@ public final class MongoNodeStore
Revision r = e.getValue();
Revision last = lastKnownRevision.get(machineId);
if (last == null || r.compareRevisionTime(last) > 0) {
- lastKnownRevision.put(machineId, r);
+ if (!hasNewRevisions) {
+ // publish our revision once before any foreign revision
+
+ // the latest revisions of the current cluster node
+ // happened before the latest revisions of other cluster nodes
+ revisionComparator.add(Revision.newRevision(clusterId), headSeen);
+ }
hasNewRevisions = true;
+ lastKnownRevision.put(machineId, r);
revisionComparator.add(r, otherSeen);
}
}
@@ -1144,11 +1151,6 @@ public final class MongoNodeStore
store.invalidateCache();
// TODO only invalidate affected items
docChildrenCache.invalidateAll();
- // add a new revision, so that changes are visible
- Revision r = Revision.newRevision(clusterId);
- // the latest revisions of the current cluster node
- // happened before the latest revisions of other cluster nodes
- revisionComparator.add(r, headSeen);
// the head revision is after other revisions
setHeadRevision(Revision.newRevision(clusterId));
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1254_25a70439.diff |
bugs-dot-jar_data_OAK-3769_306a9e00 | ---
BugID: OAK-3769
Summary: QueryParse exception when fulltext search performed with term having '/'
Description: "Running the below query, results in Exception pointed by [1]\n\n/jcr:root/content/dam//element(*,dam:Asset)[jcr:contains(jcr:content/metadata/@cq:tags,
'stockphotography:business/business_abstract')] order by @jcr:created descending\n\nAlso
if you remove the node at /oak:index/damAssetLucene/indexRules/dam:Asset/properties/cqTags
\ and re-index the /oak:index/damAssetLucene index, the query works.\n\nSeems '/'
is special character and needs to be escaped by Oak.\n\n[1]\n{noformat}\nCaused
by: org.apache.lucene.queryparser.flexible.core.QueryNodeParseException: Syntax
Error, cannot parse stockphotography\\:business/business_abstract: Lexical error
at line 1, column 45. Encountered: <EOF> after : \"/business_abstract\" \nat org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.parse(StandardSyntaxParser.java:74)\nat
org.apache.lucene.queryparser.flexible.core.QueryParserHelper.parse(QueryParserHelper.java:250)\nat
org.apache.lucene.queryparser.flexible.standard.StandardQueryParser.parse(StandardQueryParser.java:168)\nat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.tokenToQuery(LucenePropertyIndex.java:1260)\n...
138 common frames omitted\nCaused by: org.apache.lucene.queryparser.flexible.standard.parser.TokenMgrError:
Lexical error at line 1, column 45. Encountered: <EOF> after : \"/business_abstract\"\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParserTokenManager.getNextToken(StandardSyntaxParserTokenManager.java:937)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.jj_scan_token(StandardSyntaxParser.java:945)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.jj_3R_4(StandardSyntaxParser.java:827)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.jj_3_2(StandardSyntaxParser.java:739)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.jj_2_2(StandardSyntaxParser.java:730)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.Clause(StandardSyntaxParser.java:318)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.ModClause(StandardSyntaxParser.java:303)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.ConjQuery(StandardSyntaxParser.java:234)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.DisjQuery(StandardSyntaxParser.java:204)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.Query(StandardSyntaxParser.java:166)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.TopLevelQuery(StandardSyntaxParser.java:147)\nat
org.apache.lucene.queryparser.flexible.standard.parser.StandardSyntaxParser.parse(StandardSyntaxParser.java:65)\n...
141 common frames omitted\n{noformat}"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
index e7cec26..e7f29cd 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
@@ -1337,7 +1337,7 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
/**
* Following logic is taken from org.apache.jackrabbit.core.query.lucene.JackrabbitQueryParser#parse(java.lang.String)
*/
- private static String rewriteQueryText(String textsearch) {
+ static String rewriteQueryText(String textsearch) {
// replace escaped ' with just '
StringBuilder rewritten = new StringBuilder();
// the default lucene query parser recognizes 'AND' and 'NOT' as
@@ -1346,27 +1346,30 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
textsearch = textsearch.replaceAll("NOT", "not");
boolean escaped = false;
for (int i = 0; i < textsearch.length(); i++) {
- if (textsearch.charAt(i) == '\\') {
+ char c = textsearch.charAt(i);
+ if (c == '\\') {
if (escaped) {
rewritten.append("\\\\");
escaped = false;
} else {
escaped = true;
}
- } else if (textsearch.charAt(i) == '\'') {
+ } else if (c == '\'') {
if (escaped) {
escaped = false;
}
- rewritten.append(textsearch.charAt(i));
- } else if (textsearch.charAt(i) == ':') {
- // fields as known in lucene are not supported
- rewritten.append("\\:");
+ rewritten.append(c);
+ } else if (c == ':' || c == '/') {
+ //TODO Some other chars are also considered special See OAK-3769 for details
+ //':' fields as known in lucene are not supported
+ //'/' its a special char used for regex search in Lucene
+ rewritten.append('\\').append(c);
} else {
if (escaped) {
rewritten.append('\\');
escaped = false;
}
- rewritten.append(textsearch.charAt(i));
+ rewritten.append(c);
}
}
return rewritten.toString();
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3769_306a9e00.diff |
bugs-dot-jar_data_OAK-1727_26041fe7 | ---
BugID: OAK-1727
Summary: Cross foreign cluster revision comparison may be wrong
Description: Running one of the access control related benchmarks concurrently on
a MongoDB may result in strange conflicts even when DocumentNodeStore retries the
commit. The root cause may be a wrong revision comparison when both revision to
compare are from a foreign cluster node and one of them is not withing the known
seen-at revision ranges.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
index db6a698..79043e9 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
@@ -521,9 +521,14 @@ public class Revision {
if (range1 == FUTURE && range2 == FUTURE) {
return o1.compareRevisionTimeThenClusterId(o2);
}
- if (range1 == null || range2 == null) {
+ if (range1 == null && range2 == null) {
return o1.compareRevisionTimeThenClusterId(o2);
}
+ if (range1 == null) {
+ return -1;
+ } else if (range2 == null) {
+ return 1;
+ }
int comp = range1.compareRevisionTimeThenClusterId(range2);
if (comp != 0) {
return comp;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1727_26041fe7.diff |
bugs-dot-jar_data_OAK-2308_f4d5bbe1 | ---
BugID: OAK-2308
Summary: Incorrect recovery of _lastRev for branch commit
Description: The recovery process for _lastRevs is incorrect for branch commits. It
propagates the revision of the commit to the branch up to the root node instead
of the revision of the merge for the changes.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
index 899995a..cc57477 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/LastRevRecoveryAgent.java
@@ -289,7 +289,8 @@ public class LastRevRecoveryAgent {
// if found then lastRev needs to be fixed
for (Revision rev : revs) {
if (rev.compareRevisionTime(currentLastRev) > 0) {
- if (doc.isCommitted(rev)) {
+ rev = doc.getCommitRevision(rev);
+ if (rev != null) {
return rev;
}
} else {
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 388cca2..912237b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -545,6 +545,26 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
}
/**
+ * Returns the commit revision for the change with the given revision.
+ *
+ * @param revision the revision of a change.
+ * @return the commit revision of the change or {@code null} if the change
+ * is not committed or unknown.
+ */
+ @CheckForNull
+ public Revision getCommitRevision(@Nonnull Revision revision) {
+ NodeDocument commitRoot = getCommitRoot(checkNotNull(revision));
+ if (commitRoot == null) {
+ return null;
+ }
+ String value = commitRoot.getCommitValue(revision);
+ if (Utils.isCommitted(value)) {
+ return Utils.resolveCommitRevision(revision, value);
+ }
+ return null;
+ }
+
+ /**
* Returns <code>true</code> if this document contains an entry for the
* given <code>revision</code> in the {@link #REVISIONS} map. Please note
* that an entry in the {@link #REVISIONS} map does not necessarily mean
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2308_f4d5bbe1.diff |
bugs-dot-jar_data_OAK-3763_ab1a0cc2 | ---
BugID: OAK-3763
Summary: EmptyNodeState.equals() broken
Description: EmptyNodeState.equals() returns incorrect results when the other node
state is not of type EmptyNodeState and the two states have differing exists() flags.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/EmptyNodeState.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/EmptyNodeState.java
index 62c757d..54415d4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/EmptyNodeState.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/EmptyNodeState.java
@@ -186,7 +186,8 @@ public final class EmptyNodeState implements NodeState {
} else if (object instanceof NodeState) {
NodeState that = (NodeState) object;
return that.getPropertyCount() == 0
- && that.getChildNodeCount(1) == 0;
+ && that.getChildNodeCount(1) == 0
+ && (exists == that.exists());
} else {
return false;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3763_ab1a0cc2.diff |
bugs-dot-jar_data_OAK-596_9b268da0 | ---
BugID: OAK-596
Summary: Microkernel.diff returns empty diff when there are differences
Description: |-
{code}
String rev1 = mk.commit("/", "+\"node1\":{\"node2\":{\"prop1\":\"val1\",\"prop2\":\"val2\"}}", null, null);
String rev2 = mk.commit("/", "^\"node1/node2/prop1\":\"val1 new\" ^\"node1/node2/prop2\":null", null, null);
String diff = mk.diff(rev1, rev2, "/node1/node2", 0);
{code}
Here {{diff}} is empty although there are clearly differences between {{rev1}} and {{rev2}} at depth 0.
diff --git a/oak-mongomk/src/main/java/org/apache/jackrabbit/mongomk/impl/model/MongoNode.java b/oak-mongomk/src/main/java/org/apache/jackrabbit/mongomk/impl/model/MongoNode.java
index f58a597..4bbd8dc 100644
--- a/oak-mongomk/src/main/java/org/apache/jackrabbit/mongomk/impl/model/MongoNode.java
+++ b/oak-mongomk/src/main/java/org/apache/jackrabbit/mongomk/impl/model/MongoNode.java
@@ -16,6 +16,7 @@
*/
package org.apache.jackrabbit.mongomk.impl.model;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
@@ -148,6 +149,11 @@ public class MongoNode extends BasicDBObject {
public MongoNode copy() {
MongoNode copy = new MongoNode();
copy.putAll((Map) super.copy());
+ List<String> children = getChildren();
+ if (children != null) {
+ copy.put(KEY_CHILDREN, new ArrayList<String>(children));
+ }
+ copy.put(KEY_PROPERTIES, new HashMap<String, Object>(getProperties()));
return copy;
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-596_9b268da0.diff |
bugs-dot-jar_data_OAK-2799_3979fa8d | ---
BugID: OAK-2799
Summary: OakIndexInput cloned instances are not closed
Description: |-
Related to the inspections I was doing for OAK-2798 I also noticed that we don't fully comply with the {{IndexInput}} javadoc [1] as the cloned instances should throw the given exception if original is closed, but I also think that the original instance should close the cloned instances, see also [ByteBufferIndexInput#close|https://github.com/apache/lucene-solr/blob/lucene_solr_4_7_1/lucene/core/src/java/org/apache/lucene/store/ByteBufferIndexInput.java#L271].
[1] : {code}
/** Abstract base class for input from a file in a {@link Directory}. A
* random-access input stream. Used for all Lucene index input operations.
*
* <p>{@code IndexInput} may only be used from one thread, because it is not
* thread safe (it keeps internal state like file position). To allow
* multithreaded use, every {@code IndexInput} instance must be cloned before
* used in another thread. Subclasses must therefore implement {@link #clone()},
* returning a new {@code IndexInput} which operates on the same underlying
* resource, but positioned independently. Lucene never closes cloned
* {@code IndexInput}s, it will only do this on the original one.
* The original instance must take care that cloned instances throw
* {@link AlreadyClosedException} when the original one is closed.
{code}
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectory.java
index 946d0c1..056ddf7 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectory.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakDirectory.java
@@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
+import java.util.Iterator;
import java.util.List;
import java.util.Set;
@@ -33,6 +34,7 @@ import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.apache.jackrabbit.oak.util.PerfLogger;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
@@ -40,6 +42,7 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.NoLockFactory;
+import org.apache.lucene.util.WeakIdentityMap;
import org.slf4j.LoggerFactory;
import static com.google.common.base.Preconditions.checkArgument;
@@ -364,29 +367,41 @@ class OakDirectory extends Directory {
private static class OakIndexInput extends IndexInput {
private final OakIndexFile file;
+ private boolean isClone = false;
+ private final WeakIdentityMap<OakIndexInput, Boolean> clones;
public OakIndexInput(String name, NodeBuilder file) {
super(name);
this.file = new OakIndexFile(name, file);
+ clones = WeakIdentityMap.newConcurrentHashMap();
}
private OakIndexInput(OakIndexInput that) {
super(that.toString());
this.file = new OakIndexFile(that.file);
+ clones = null;
}
@Override
public OakIndexInput clone() {
- return new OakIndexInput(this);
+ // TODO : shouldn't we call super#clone ?
+ OakIndexInput clonedIndexInput = new OakIndexInput(this);
+ clonedIndexInput.isClone = true;
+ if (clones != null) {
+ clones.put(clonedIndexInput, Boolean.TRUE);
+ }
+ return clonedIndexInput;
}
@Override
public void readBytes(byte[] b, int o, int n) throws IOException {
+ checkNotClosed();
file.readBytes(b, o, n);
}
@Override
public byte readByte() throws IOException {
+ checkNotClosed();
byte[] b = new byte[1];
readBytes(b, 0, 1);
return b[0];
@@ -394,16 +409,19 @@ class OakDirectory extends Directory {
@Override
public void seek(long pos) throws IOException {
+ checkNotClosed();
file.seek(pos);
}
@Override
public long length() {
+ checkNotClosed();
return file.length;
}
@Override
public long getFilePointer() {
+ checkNotClosed();
return file.position;
}
@@ -411,6 +429,20 @@ class OakDirectory extends Directory {
public void close() {
file.blob = null;
file.data = null;
+
+ if (clones != null) {
+ for (Iterator<OakIndexInput> it = clones.keyIterator(); it.hasNext();) {
+ final OakIndexInput clone = it.next();
+ assert clone.isClone;
+ clone.close();
+ }
+ }
+ }
+
+ private void checkNotClosed() {
+ if (file.blob == null && file.data == null) {
+ throw new AlreadyClosedException("Already closed: " + this);
+ }
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2799_3979fa8d.diff |
bugs-dot-jar_data_OAK-4420_d645112f | ---
BugID: OAK-4420
Summary: 'RepositorySidegrade: oak-segment to oak-segment-tar should migrate checkpoint
info'
Description: The sidegrade from {{oak-segment}} to {{oak-segment-tar}} should also
take care of moving the checkpoint data and meta. This will save a very expensive
full-reindex.
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositorySidegrade.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositorySidegrade.java
index 48bc82c..386d010 100755
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositorySidegrade.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositorySidegrade.java
@@ -18,23 +18,29 @@ package org.apache.jackrabbit.oak.upgrade;
import java.util.ArrayList;
import java.util.Calendar;
+import java.util.Comparator;
import java.util.List;
import java.util.Set;
import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
import javax.jcr.RepositoryException;
+import com.google.common.base.Function;
import org.apache.jackrabbit.oak.Oak;
import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.commons.PathUtils;
import org.apache.jackrabbit.oak.plugins.nodetype.write.InitialContent;
import org.apache.jackrabbit.oak.spi.commit.CommitHook;
import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
import org.apache.jackrabbit.oak.spi.commit.EditorHook;
import org.apache.jackrabbit.oak.spi.lifecycle.RepositoryInitializer;
+import org.apache.jackrabbit.oak.spi.state.ChildNodeEntry;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.apache.jackrabbit.oak.spi.state.NodeState;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
import org.apache.jackrabbit.oak.upgrade.RepositoryUpgrade.LoggingCompositeHook;
+import org.apache.jackrabbit.oak.upgrade.cli.node.TarNodeStore;
import org.apache.jackrabbit.oak.upgrade.nodestate.NameFilteringNodeState;
import org.apache.jackrabbit.oak.upgrade.nodestate.report.LoggingReporter;
import org.apache.jackrabbit.oak.upgrade.nodestate.report.ReportingNodeState;
@@ -47,7 +53,10 @@ import org.slf4j.LoggerFactory;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.ImmutableSet.copyOf;
import static com.google.common.collect.ImmutableSet.of;
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Lists.transform;
import static com.google.common.collect.Sets.union;
+import static java.util.Collections.sort;
import static org.apache.jackrabbit.oak.upgrade.RepositoryUpgrade.DEFAULT_EXCLUDE_PATHS;
import static org.apache.jackrabbit.oak.upgrade.RepositoryUpgrade.DEFAULT_INCLUDE_PATHS;
import static org.apache.jackrabbit.oak.upgrade.RepositoryUpgrade.DEFAULT_MERGE_PATHS;
@@ -256,14 +265,19 @@ public class RepositorySidegrade {
}
private void removeCheckpointReferences(NodeBuilder builder) throws CommitFailedException {
- // removing references to the checkpoints,
+ // removing references to the checkpoints,
// which don't exist in the new repository
builder.setChildNode(":async");
}
private void copyState(NodeState sourceRoot, NodeBuilder targetRoot) throws CommitFailedException {
copyWorkspace(sourceRoot, targetRoot);
- removeCheckpointReferences(targetRoot);
+
+ if (!copyCheckpoints(targetRoot)) {
+ LOG.info("Copying checkpoints is not supported for this combination of node stores");
+ removeCheckpointReferences(targetRoot);
+ }
+
if (!versionCopyConfiguration.skipOrphanedVersionsCopy()) {
copyVersionStorage(sourceRoot, targetRoot, versionCopyConfiguration);
}
@@ -296,4 +310,77 @@ public class RepositorySidegrade {
copyProperties(sourceRoot, targetRoot);
}
}
-}
\ No newline at end of file
+
+ private boolean copyCheckpoints(NodeBuilder targetRoot) {
+ if (!(source instanceof TarNodeStore && target instanceof TarNodeStore)) {
+ return false;
+ }
+
+ TarNodeStore sourceTarNS = (TarNodeStore) source;
+ TarNodeStore targetTarNS = (TarNodeStore) target;
+
+ NodeState srcSuperRoot = sourceTarNS.getSuperRoot();
+ NodeBuilder builder = targetTarNS.getSuperRoot().builder();
+
+ String previousRoot = null;
+ for (String checkpoint : getCheckpointPaths(srcSuperRoot)) {
+ // copy the checkpoint without the root
+ NodeStateCopier.builder()
+ .include(checkpoint)
+ .exclude(checkpoint + "/root")
+ .copy(srcSuperRoot, builder);
+
+ // reference the previousRoot or targetRoot as a new checkpoint root
+ NodeState baseRoot;
+ if (previousRoot == null) {
+ baseRoot = targetRoot.getNodeState();
+ } else {
+ baseRoot = getBuilder(builder, previousRoot).getNodeState();
+ }
+ NodeBuilder targetParent = getBuilder(builder, checkpoint);
+ targetParent.setChildNode("root", baseRoot);
+ previousRoot = checkpoint + "/root";
+
+ // apply diff changes
+ NodeStateCopier.builder()
+ .include(checkpoint + "/root")
+ .copy(srcSuperRoot, builder);
+ }
+
+ targetTarNS.setSuperRoot(builder);
+ return true;
+ }
+
+ /**
+ * Return all checkpoint paths, sorted by their "created" property, descending.
+ *
+ * @param superRoot
+ * @return
+ */
+ private static List<String> getCheckpointPaths(NodeState superRoot) {
+ List<ChildNodeEntry> checkpoints = newArrayList(superRoot.getChildNode("checkpoints").getChildNodeEntries().iterator());
+ sort(checkpoints, new Comparator<ChildNodeEntry>() {
+ @Override
+ public int compare(ChildNodeEntry o1, ChildNodeEntry o2) {
+ long c1 = o1.getNodeState().getLong("created");
+ long c2 = o1.getNodeState().getLong("created");
+ return -Long.compare(c1, c2);
+ }
+ });
+ return transform(checkpoints, new Function<ChildNodeEntry, String>() {
+ @Nullable
+ @Override
+ public String apply(@Nullable ChildNodeEntry input) {
+ return "/checkpoints/" + input.getName();
+ }
+ });
+ }
+
+ private static NodeBuilder getBuilder(NodeBuilder root, String path) {
+ NodeBuilder builder = root;
+ for (String element : PathUtils.elements(path)) {
+ builder = builder.child(element);
+ }
+ return builder;
+ }
+}
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentFactory.java
index 43a26b5..94aeb49 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentFactory.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentFactory.java
@@ -20,14 +20,20 @@ import java.io.Closeable;
import java.io.File;
import java.io.IOException;
+import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeBuilder;
+import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeState;
import org.apache.jackrabbit.oak.plugins.segment.SegmentNodeStore;
import org.apache.jackrabbit.oak.plugins.segment.file.FileStore;
import org.apache.jackrabbit.oak.plugins.segment.file.FileStore.Builder;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
import com.google.common.io.Closer;
+import static com.google.common.base.Preconditions.checkArgument;
+
public class SegmentFactory implements NodeStoreFactory {
private final File dir;
@@ -49,9 +55,27 @@ public class SegmentFactory implements NodeStoreFactory {
builder.withBlobStore(blobStore);
}
builder.withMaxFileSize(256).withMemoryMapping(mmap);
- FileStore fs = builder.build();
+ final FileStore fs = builder.build();
closer.register(asCloseable(fs));
- return SegmentNodeStore.builder(fs).build();
+ return new TarNodeStore(SegmentNodeStore.builder(fs).build(), new TarNodeStore.SuperRootProvider() {
+ @Override
+ public void setSuperRoot(NodeBuilder builder) {
+ checkArgument(builder instanceof SegmentNodeBuilder);
+ SegmentNodeBuilder segmentBuilder = (SegmentNodeBuilder) builder;
+ SegmentNodeState lastRoot = (SegmentNodeState) getSuperRoot();
+
+ if (!lastRoot.getRecordId().equals(((SegmentNodeState) segmentBuilder.getBaseState()).getRecordId())) {
+ throw new IllegalArgumentException("The new head is out of date");
+ }
+
+ fs.setHead(lastRoot, ((SegmentNodeBuilder) builder).getNodeState());
+ }
+
+ @Override
+ public NodeState getSuperRoot() {
+ return fs.getHead();
+ }
+ });
}
public File getRepositoryDir() {
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentTarFactory.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentTarFactory.java
index caa53dd..7714b51 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentTarFactory.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/SegmentTarFactory.java
@@ -16,6 +16,7 @@
*/
package org.apache.jackrabbit.oak.upgrade.cli.node;
+import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.jackrabbit.oak.segment.file.FileStoreBuilder.fileStoreBuilder;
import java.io.Closeable;
@@ -23,10 +24,14 @@ import java.io.File;
import java.io.IOException;
import com.google.common.io.Closer;
+import org.apache.jackrabbit.oak.segment.SegmentNodeBuilder;
+import org.apache.jackrabbit.oak.segment.SegmentNodeState;
import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
import org.apache.jackrabbit.oak.segment.file.FileStore;
import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
public class SegmentTarFactory implements NodeStoreFactory {
@@ -45,14 +50,33 @@ public class SegmentTarFactory implements NodeStoreFactory {
@Override
public NodeStore create(BlobStore blobStore, Closer closer) throws IOException {
- FileStoreBuilder builder = fileStoreBuilder(new File(dir, "segmentstore"));
+ final FileStoreBuilder builder = fileStoreBuilder(new File(dir, "segmentstore"));
if (blobStore != null) {
builder.withBlobStore(blobStore);
}
builder.withMaxFileSize(256).withMemoryMapping(mmap);
- FileStore fs = builder.build();
+ final FileStore fs = builder.build();
closer.register(asCloseable(fs));
- return SegmentNodeStoreBuilders.builder(fs).build();
+
+ return new TarNodeStore(SegmentNodeStoreBuilders.builder(fs).build(), new TarNodeStore.SuperRootProvider() {
+ @Override
+ public void setSuperRoot(NodeBuilder builder) {
+ checkArgument(builder instanceof SegmentNodeBuilder);
+ SegmentNodeBuilder segmentBuilder = (SegmentNodeBuilder) builder;
+ SegmentNodeState lastRoot = (SegmentNodeState) getSuperRoot();
+
+ if (!lastRoot.getRecordId().equals(((SegmentNodeState) segmentBuilder.getBaseState()).getRecordId())) {
+ throw new IllegalArgumentException("The new head is out of date");
+ }
+
+ fs.getRevisions().setHead(lastRoot.getRecordId(), segmentBuilder.getNodeState().getRecordId());
+ }
+
+ @Override
+ public NodeState getSuperRoot() {
+ return fs.getReader().readHeadState();
+ }
+ });
}
public File getRepositoryDir() {
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/TarNodeStore.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/TarNodeStore.java
new file mode 100644
index 0000000..e83cfcc
--- /dev/null
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/cli/node/TarNodeStore.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.upgrade.cli.node;
+
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.spi.commit.CommitHook;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Map;
+
+public class TarNodeStore implements NodeStore {
+
+ private final NodeStore ns;
+
+ private final SuperRootProvider superRootProvider;
+
+ public TarNodeStore(NodeStore ns, SuperRootProvider superRootProvider) {
+ this.ns = ns;
+ this.superRootProvider = superRootProvider;
+ }
+
+ public void setSuperRoot(NodeBuilder builder) {
+ superRootProvider.setSuperRoot(builder);
+ }
+
+ public NodeState getSuperRoot() {
+ return superRootProvider.getSuperRoot();
+ }
+
+ @Nonnull
+ @Override
+ public NodeState getRoot() {
+ return ns.getRoot();
+ }
+
+ @Nonnull
+ @Override
+ public NodeState merge(@Nonnull NodeBuilder builder, @Nonnull CommitHook commitHook, @Nonnull CommitInfo info) throws CommitFailedException {
+ return ns.merge(builder, commitHook, info);
+ }
+
+ @Nonnull
+ @Override
+ public NodeState rebase(@Nonnull NodeBuilder builder) {
+ return ns.rebase(builder);
+ }
+
+ @Override
+ public NodeState reset(@Nonnull NodeBuilder builder) {
+ return ns.reset(builder);
+ }
+
+ @Nonnull
+ @Override
+ public Blob createBlob(InputStream inputStream) throws IOException {
+ return ns.createBlob(inputStream);
+ }
+
+ @Override
+ public Blob getBlob(@Nonnull String reference) {
+ return ns.getBlob(reference);
+ }
+
+ @Nonnull
+ @Override
+ public String checkpoint(long lifetime, @Nonnull Map<String, String> properties) {
+ return checkpoint(lifetime, properties);
+ }
+
+ @Nonnull
+ @Override
+ public String checkpoint(long lifetime) {
+ return checkpoint(lifetime);
+ }
+
+ @Nonnull
+ @Override
+ public Map<String, String> checkpointInfo(@Nonnull String checkpoint) {
+ return checkpointInfo(checkpoint);
+ }
+
+ @Override
+ public NodeState retrieve(@Nonnull String checkpoint) {
+ return retrieve(checkpoint);
+ }
+
+ @Override
+ public boolean release(@Nonnull String checkpoint) {
+ return release(checkpoint);
+ }
+
+ interface SuperRootProvider {
+
+ void setSuperRoot(NodeBuilder builder);
+
+ NodeState getSuperRoot();
+
+ }
+}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4420_d645112f.diff |
bugs-dot-jar_data_OAK-2330_408a566e | ---
BugID: OAK-2330
Summary: Field boost not working if the property for indexing is picked using aggregate
index rules
Description: "For below index definition - \n{code}\n{ \n jcr:primaryType:\"oak:QueryIndexDefinition\",\n
\ compatVersion:2,\n type:\"lucene\",\n async:\"async\",\n reindex:false,\n
\ reindexCount:12,\n aggregates:{ \n jcr:primaryType:\"oak:Unstructured\",\n
\ app:Asset:{ \n jcr:primaryType:\"oak:Unstructured\",\n include0:{
\ \n jcr:primaryType:\"oak:Unstructured\",\n path:\"jcr:content/metadata/*\"\n
\ }\n }\n },\n indexRules:{ \n jcr:primaryType:\"nt:unstructured\",\n
\ app:Asset:{ \n jcr:primaryType:\"nt:unstructured\",\n properties:{
\ \n jcr:primaryType:\"nt:unstructured\",\n foo:{ \n jcr:primaryType:\"nt:unstructured\",\n
\ nodeScopeIndex:true,\n ordered:true,\n propertyIndex:true,\n
\ name:\"jcr:content/metadata/foo\",\n type:\"Long\",\n
\ boost:3,\n nodeName:\"foo\"\n }\n }\n
\ }\n }\n}\n{code}\n\nOn executing query of form - \n\n{code}\n//element(*,
app:Asset) \n[\n jcr:contains(., 'bar' )\n]\n{code}\n\nshould boost the results
containing property - 'jcr:content/metadata/foo', but its ignoring index time boosting
for it.\n\n"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
index 447e41f..44408aa 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexEditor.java
@@ -291,7 +291,7 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
dirty |= addTypedOrderedFields(fields, property, pname, pd);
}
- dirty |= indexProperty(path, fields, state, property, pname, false, pd);
+ dirty |= indexProperty(path, fields, state, property, pname, pd);
}
dirty |= indexAggregates(path, fields, state);
@@ -335,7 +335,6 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
NodeState state,
PropertyState property,
String pname,
- boolean aggregateMode,
PropertyDefinition pd) throws CommitFailedException {
boolean includeTypeForFullText = indexingRule.includePropertyType(property.getType().tag());
if (Type.BINARY.tag() == property.getType().tag()
@@ -358,7 +357,7 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
fields.add(newPropertyField(analyzedPropName, value, !pd.skipTokenization(pname), pd.stored));
}
- if (pd.nodeScopeIndex && !aggregateMode) {
+ if (pd.nodeScopeIndex) {
Field field = newFulltextField(value);
field.setBoost(pd.boost);
fields.add(field);
@@ -533,7 +532,7 @@ public class LuceneIndexEditor implements IndexEditor, Aggregate.AggregateRoot {
result.propertyPath, result.pd);
}
dirty |= indexProperty(path, fields, state, result.propertyState,
- result.propertyPath, true, result.pd);
+ result.propertyPath, result.pd);
if (dirty) {
dirtyFlag.set(true);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2330_408a566e.diff |
bugs-dot-jar_data_OAK-1363_69b68890 | ---
BugID: OAK-1363
Summary: TokenLoginModule does not set userId on auth info
Description: |-
the token login module does not set the userid in the authinfo (because it does not know it). and the LoginModuleImpl does not overwrite the AuthInfo if it already exists.
the consequence: {{Session.getUserID()}} returns {{NULL}} for logins that create a token.
I think that the authinfos should be added even if they already exist. and all users of the public credentials need to be aware that authinfos can exist that are not complete.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenLoginModule.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenLoginModule.java
index 79a05df..74f5281 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenLoginModule.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenLoginModule.java
@@ -170,7 +170,8 @@ public final class TokenLoginModule extends AbstractLoginModule {
for (String name : attributes.keySet()) {
tc.setAttribute(name, attributes.get(name));
}
- updateSubject(tc, getAuthInfo(ti), null);
+ sharedState.put(SHARED_KEY_ATTRIBUTES, attributes);
+ updateSubject(tc, null, null);
} else {
// failed to create token -> fail commit()
log.debug("TokenProvider failed to create a login token for user " + userId);
@@ -236,19 +237,21 @@ public final class TokenLoginModule extends AbstractLoginModule {
* @param tokenInfo The tokenInfo to retrieve attributes from.
* @return The {@code AuthInfo} resulting from the successful login.
*/
- @Nonnull
- private AuthInfo getAuthInfo(TokenInfo tokenInfo) {
- Map<String, Object> attributes = new HashMap<String, Object>();
- if (tokenProvider != null && tokenInfo != null) {
+ @CheckForNull
+ private AuthInfo getAuthInfo(@Nullable TokenInfo tokenInfo) {
+ if (tokenInfo != null) {
+ Map<String, Object> attributes = new HashMap<String, Object>();
Map<String, String> publicAttributes = tokenInfo.getPublicAttributes();
for (String attrName : publicAttributes.keySet()) {
attributes.put(attrName, publicAttributes.get(attrName));
}
+ return new AuthInfoImpl(tokenInfo.getUserId(), attributes, principals);
+ } else {
+ return null;
}
- return new AuthInfoImpl(userId, attributes, principals);
}
- private void updateSubject(@Nonnull TokenCredentials tc, @Nonnull AuthInfo authInfo,
+ private void updateSubject(@Nonnull TokenCredentials tc, @Nullable AuthInfo authInfo,
@Nullable Set<? extends Principal> principals) {
if (!subject.isReadOnly()) {
subject.getPublicCredentials().add(tc);
@@ -257,12 +260,9 @@ public final class TokenLoginModule extends AbstractLoginModule {
subject.getPrincipals().addAll(principals);
}
- // replace all existing auth-info
- Set<AuthInfo> ais = subject.getPublicCredentials(AuthInfo.class);
- if (!ais.isEmpty()) {
- subject.getPublicCredentials().removeAll(ais);
+ if (authInfo != null) {
+ setAuthInfo(authInfo, subject);
}
- subject.getPublicCredentials().add(authInfo);
}
}
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
index 6434c4b..489b5d2 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/user/LoginModuleImpl.java
@@ -33,9 +33,9 @@ import javax.security.auth.login.LoginException;
import org.apache.jackrabbit.oak.api.AuthInfo;
import org.apache.jackrabbit.oak.spi.security.ConfigurationParameters;
-import org.apache.jackrabbit.oak.spi.security.authentication.AuthInfoImpl;
import org.apache.jackrabbit.oak.spi.security.SecurityProvider;
import org.apache.jackrabbit.oak.spi.security.authentication.AbstractLoginModule;
+import org.apache.jackrabbit.oak.spi.security.authentication.AuthInfoImpl;
import org.apache.jackrabbit.oak.spi.security.authentication.Authentication;
import org.apache.jackrabbit.oak.spi.security.authentication.ImpersonationCredentials;
import org.apache.jackrabbit.oak.spi.security.user.UserConfiguration;
@@ -143,10 +143,7 @@ public final class LoginModuleImpl extends AbstractLoginModule {
if (!subject.isReadOnly()) {
subject.getPrincipals().addAll(principals);
subject.getPublicCredentials().add(credentials);
- Set<AuthInfo> ais = subject.getPublicCredentials(AuthInfo.class);
- if (ais.isEmpty()) {
- subject.getPublicCredentials().add(createAuthInfo());
- }
+ setAuthInfo(createAuthInfo(), subject);
} else {
log.debug("Could not add information to read only subject {}", subject);
}
@@ -213,14 +210,19 @@ public final class LoginModuleImpl extends AbstractLoginModule {
}
private AuthInfo createAuthInfo() {
- Map<String, Object> attributes = new HashMap<String, Object>();
Credentials creds;
if (credentials instanceof ImpersonationCredentials) {
creds = ((ImpersonationCredentials) credentials).getBaseCredentials();
} else {
creds = credentials;
}
- if (creds instanceof SimpleCredentials) {
+ Map<String, Object> attributes = new HashMap<String, Object>();
+ Object shared = sharedState.get(SHARED_KEY_ATTRIBUTES);
+ if (shared instanceof Map) {
+ for (Object key : ((Map) shared).keySet()) {
+ attributes.put(key.toString(), ((Map) shared).get(key));
+ }
+ } else if (creds instanceof SimpleCredentials) {
SimpleCredentials sc = (SimpleCredentials) creds;
for (String attrName : sc.getAttributeNames()) {
attributes.put(attrName, sc.getAttribute(attrName));
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/security/authentication/AbstractLoginModule.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/security/authentication/AbstractLoginModule.java
index 5a73a5c..e9a2d8d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/security/authentication/AbstractLoginModule.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/security/authentication/AbstractLoginModule.java
@@ -35,6 +35,7 @@ import javax.security.auth.login.LoginException;
import javax.security.auth.spi.LoginModule;
import org.apache.jackrabbit.api.security.user.UserManager;
+import org.apache.jackrabbit.oak.api.AuthInfo;
import org.apache.jackrabbit.oak.api.ContentRepository;
import org.apache.jackrabbit.oak.api.ContentSession;
import org.apache.jackrabbit.oak.api.Root;
@@ -154,6 +155,12 @@ public abstract class AbstractLoginModule implements LoginModule {
*/
public static final String SHARED_KEY_LOGIN_NAME = "javax.security.auth.login.name";
+ /**
+ * Key of the sharedState entry referring to public attributes that are shared
+ * between multiple login modules.
+ */
+ public static final String SHARED_KEY_ATTRIBUTES = "javax.security.auth.login.attributes";
+
protected Subject subject;
protected CallbackHandler callbackHandler;
protected Map sharedState;
@@ -441,4 +448,12 @@ public abstract class AbstractLoginModule implements LoginModule {
return principalProvider.getPrincipals(userId);
}
}
+
+ static protected void setAuthInfo(@Nonnull AuthInfo authInfo, @Nonnull Subject subject) {
+ Set<AuthInfo> ais = subject.getPublicCredentials(AuthInfo.class);
+ if (!ais.isEmpty()) {
+ subject.getPublicCredentials().removeAll(ais);
+ }
+ subject.getPublicCredentials().add(authInfo);
+ }
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1363_69b68890.diff |
bugs-dot-jar_data_OAK-2250_08b25cb0 | ---
BugID: OAK-2250
Summary: Lucene Index property definition is ignored if its not in includePropertyNames
config
Description: "Lucene index property definition will not be used unless that property
is in includePropertyNames config. This enforces including that property in includePropertyNames.\nincludePropertyNames
restricts all properties from getting indexed, so user is now enforced to include
all properties in includePropertyNames to be indexed.\n "
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
index 9dd4b00..a89b6ae 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
@@ -285,7 +285,8 @@ class IndexDefinition {
private Map<String, PropertyDefinition> collectPropertyDefns(NodeBuilder defn) {
Map<String, PropertyDefinition> propDefns = newHashMap();
NodeBuilder propNode = defn.getChildNode(LuceneIndexConstants.PROP_NODE);
- for (String propName : Iterables.concat(includes, orderedProps)) {
+ //Include all immediate child nodes to 'properties' node by default
+ for (String propName : Iterables.concat(includes, orderedProps, propNode.getChildNodeNames())) {
NodeBuilder propDefnNode;
if (relativeProps.containsKey(propName)) {
propDefnNode = relativeProps.get(propName).getPropDefnNode(propNode);
@@ -293,7 +294,7 @@ class IndexDefinition {
propDefnNode = propNode.getChildNode(propName);
}
- if (propDefnNode.exists()) {
+ if (propDefnNode.exists() && !propDefns.containsKey(propName)) {
propDefns.put(propName, new PropertyDefinition(this, propName, propDefnNode));
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2250_08b25cb0.diff |
bugs-dot-jar_data_OAK-3474_ff81ef72 | ---
BugID: OAK-3474
Summary: NodeDocument.getNodeAtRevision can go into property history traversal when
latest rev on current doc isn't committed
Description: |-
{{NodeDocument.getNodeAtRevision}} tried to look at latest revisions entries for each property in current document. But it just looks at the *last* entry for a given property. In case this last entry isn't committed, the code would go into previous documents to look for a committed value.
(cc [~mreutegg])
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index f78ffed..b1b7de7 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -966,8 +966,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
// check if there may be more recent values in a previous document
if (!getPreviousRanges().isEmpty()) {
- Revision newest = local.firstKey();
- if (isRevisionNewer(nodeStore, newest, value.revision)) {
+ if (!isMostRecentCommitted(nodeStore, local, value.revision)) {
// not reading the most recent value, we may need to
// consider previous documents as well
Revision newestPrev = getPreviousRanges().firstKey();
@@ -1709,6 +1708,39 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
//----------------------------< internal >----------------------------------
/**
+ * Returns {@code true} if the given {@code revision} is more recent or
+ * equal to the committed revision in {@code valueMap}. This method assumes
+ * the given {@code revision} is committed.
+ *
+ * @param context the revision context.
+ * @param valueMap the value map sorted most recent first.
+ * @param revision a committed revision.
+ * @return if {@code revision} is the most recent committed revision in the
+ * {@code valueMap}.
+ */
+ private boolean isMostRecentCommitted(RevisionContext context,
+ SortedMap<Revision, String> valueMap,
+ Revision revision) {
+ if (valueMap.isEmpty()) {
+ return true;
+ }
+ // shortcut when revision is the first key
+ Revision first = valueMap.firstKey();
+ if (!isRevisionNewer(context, first, revision)) {
+ return true;
+ }
+ // need to check commit status
+ for (Revision r : valueMap.keySet()) {
+ Revision c = getCommitRevision(r);
+ if (c != null) {
+ return !isRevisionNewer(context, c, revision);
+ }
+ }
+ // no committed revision found in valueMap
+ return true;
+ }
+
+ /**
* Returns {@code true} if the two revisions are ambiguous. That is, they
* are from different cluster nodes and the comparison of the two revision
* depends on the seen at revision and is different when just comparing the
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3474_ff81ef72.diff |
bugs-dot-jar_data_OAK-2318_1d08cbd3 | ---
BugID: OAK-2318
Summary: DocumentNodeStore.diffManyChildren() reads too many nodes
Description: |-
DocumentNodeStore.diffManyChildren() compares too many nodes when running in a non-clustered setup and there are many changes below a location with 'many' children.
This is a regression introduced by OAK-2232. The fix changed the way how the minimum revision is calculated based on the two revisions to compare. The seen-at revision of the RevisionComparator is taken into account. However, in a single cluster node setup, the revision range for the current clusterId is never updated. This means the minimum revision is calculated too far back and causes queries with too many nodes than necessary.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
index a9cff7e..3079d88 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/Revision.java
@@ -554,7 +554,14 @@ public class Revision {
return timestamp;
}
// go through all known cluster nodes
- for (List<RevisionRange> list : map.values()) {
+ for (Map.Entry<Integer, List<RevisionRange>> e : map.entrySet()) {
+ if (revision.getClusterId() == currentClusterNodeId
+ && e.getKey() == currentClusterNodeId) {
+ // range and revision is for current cluster node
+ // no need to adjust timestamp
+ continue;
+ }
+ List<RevisionRange> list = e.getValue();
RevisionRange range;
for (int i = list.size() - 1; i >= 0; i--) {
range = list.get(i);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2318_1d08cbd3.diff |
bugs-dot-jar_data_OAK-2695_0598498e | ---
BugID: OAK-2695
Summary: DocumentNodeStore.dispatch() may pass null to NodeStateDiff
Description: This is a regression introduced by OAK-2562. The dispatch method passes
a null state if the node does not exist at a given revision.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
index 4b83da9..9cf769b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
@@ -168,18 +168,7 @@ public class DocumentNodeState extends AbstractNodeState implements CacheValue {
@Nonnull
@Override
public NodeState getChildNode(@Nonnull String name) {
- if (!hasChildren) {
- checkValidName(name);
- return EmptyNodeState.MISSING_NODE;
- }
- String p = PathUtils.concat(getPath(), name);
- DocumentNodeState child = store.getNode(p, lastRevision);
- if (child == null) {
- checkValidName(name);
- return EmptyNodeState.MISSING_NODE;
- } else {
- return child;
- }
+ return getChildNode(name, lastRevision);
}
@Override
@@ -282,6 +271,23 @@ public class DocumentNodeState extends AbstractNodeState implements CacheValue {
return super.compareAgainstBaseState(base, diff);
}
+ @Nonnull
+ NodeState getChildNode(@Nonnull String name,
+ @Nonnull Revision revision) {
+ if (!hasChildren) {
+ checkValidName(name);
+ return EmptyNodeState.MISSING_NODE;
+ }
+ String p = PathUtils.concat(getPath(), name);
+ DocumentNodeState child = store.getNode(p, checkNotNull(revision));
+ if (child == null) {
+ checkValidName(name);
+ return EmptyNodeState.MISSING_NODE;
+ } else {
+ return child;
+ }
+ }
+
void setProperty(String propertyName, String value) {
if (value == null) {
properties.remove(propertyName);
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 1087f53..1ed7072 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -1905,14 +1905,14 @@ public final class DocumentNodeStore
while (t.read() != '}') {
// skip properties
}
- NodeState child = getNode(concat(node.getPath(), name), nodeRev);
- continueComparison = diff.childNodeAdded(name, child);
+ continueComparison = diff.childNodeAdded(name,
+ node.getChildNode(name, nodeRev));
break;
}
case '-': {
String name = unshareString(t.readString());
- NodeState child = getNode(concat(base.getPath(), name), baseRev);
- continueComparison = diff.childNodeDeleted(name, child);
+ continueComparison = diff.childNodeDeleted(name,
+ base.getChildNode(name, baseRev));
break;
}
case '^': {
@@ -1920,10 +1920,9 @@ public final class DocumentNodeStore
t.read(':');
if (t.matches('{')) {
t.read('}');
- NodeState nodeChild = getNode(concat(node.getPath(), name), nodeRev);
- NodeState baseChild = getNode(concat(base.getPath(), name), baseRev);
- continueComparison = diff.childNodeChanged(
- name, baseChild, nodeChild);
+ continueComparison = diff.childNodeChanged(name,
+ base.getChildNode(name, baseRev),
+ node.getChildNode(name, nodeRev));
} else if (t.matches('[')) {
// ignore multi valued property
while (t.read() != ']') {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2695_0598498e.diff |
bugs-dot-jar_data_OAK-782_45b110e1 | ---
BugID: OAK-782
Summary: 'MemoryNodeBuilder.setNode() loses property values '
Description: |-
{code}
builder.setNode("a", nodeA);
builder.child("a").setProperty(...);
{code}
After the 2nd line executed, properties initially present on {{nodeA}} are gone on {{builder.getNodeState()}}.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
index 3854466..9faa23f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
@@ -254,7 +254,9 @@ public class MemoryNodeBuilder implements NodeBuilder {
writeState = parent.getWriteState(name);
if (writeState == null) {
if (exists()) {
- writeState = new MutableNodeState(baseState);
+ NodeState writeBase =
+ parent.writeState.base.getChildNode(name);
+ writeState = new MutableNodeState(writeBase);
}
else {
writeState = new MutableNodeState(null);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-782_45b110e1.diff |
bugs-dot-jar_data_OAK-2439_beaca1a4 | ---
BugID: OAK-2439
Summary: IndexPlanner returning plan for queries involving jcr:score
Description: "Consider a query like \n\n{noformat}\n/jcr:root//element(*, cq:Taggable)[
(@cq:tags = 'geometrixx-outdoors:activity/biking' or @cq:tags = '/etc/tags/geometrixx-outdoors/activity/biking')
] order by @jcr:score descending\n\n{noformat}\n\nAnd a seemingly non related index
like\n\n{noformat}\n/oak:index/assetType\n ...\n - type = \"lucene\"\n + indexRules\n
\ + nt:base\n + properties\n + assetType\n - propertyIndex
= true\n - name = \"assetType\"\n{noformat}\n\nThen currently {{IndexPlanner}}
would return a plan because even when it cannot evaluate any of property restrictions
because it thinks it can sort on {{jcr:score}}. This later results in an exception
like\n\n{noformat}\n14.01.2015 16:16:35.866 *ERROR* [0:0:0:0:0:0:0:1 [1421248595863]
POST /bin/tagcommand HTTP/1.1] org.apache.sling.engine.impl.SlingRequestProcessorImpl
service: Uncaught Throwable\njava.lang.IllegalStateException: No query created for
filter Filter(query=select [jcr:path], [jcr:score], * from [cq:Taggable] as a where
[cq:tags] in('geometrixx-outdoors:activity/swimming', '/etc/tags/geometrixx-outdoors/activity/swimming')
and isdescendantnode(a, '/') order by [jcr:score] desc /* xpath: /jcr:root//element(*,
cq:Taggable)[ (@cq:tags = 'geometrixx-outdoors:activity/swimming' or @cq:tags =
'/etc/tags/geometrixx-outdoors/activity/swimming') ] order by @jcr:score descending
*/, path=//*, property=[cq:tags=in(geometrixx-outdoors:activity/swimming, /etc/tags/geometrixx-outdoors/activity/swimming)])\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.getQuery(LucenePropertyIndex.java:505)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.access$200(LucenePropertyIndex.java:158)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex$1.loadDocs(LucenePropertyIndex.java:303)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex$1.computeNext(LucenePropertyIndex.java:261)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex$1.computeNext(LucenePropertyIndex.java:253)\n\tat
com.google.common.collect.AbstractIterator.tryToComputeNext(AbstractIterator.java:143)\n{noformat}"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexPlanner.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexPlanner.java
index a2189a0..9a413dc 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexPlanner.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexPlanner.java
@@ -36,6 +36,7 @@ import org.apache.jackrabbit.oak.query.fulltext.FullTextExpression;
import org.apache.jackrabbit.oak.query.fulltext.FullTextTerm;
import org.apache.jackrabbit.oak.query.fulltext.FullTextVisitor;
import org.apache.jackrabbit.oak.spi.query.Filter;
+import org.apache.jackrabbit.oak.spi.query.QueryIndex;
import org.apache.lucene.index.IndexReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -43,6 +44,7 @@ import org.slf4j.LoggerFactory;
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Lists.newArrayListWithCapacity;
import static com.google.common.collect.Maps.newHashMap;
+import static org.apache.jackrabbit.JcrConstants.JCR_SCORE;
import static org.apache.jackrabbit.oak.commons.PathUtils.getAncestorPath;
import static org.apache.jackrabbit.oak.commons.PathUtils.getDepth;
import static org.apache.jackrabbit.oak.commons.PathUtils.getParentPath;
@@ -156,7 +158,8 @@ class IndexPlanner {
//Fulltext expression can also be like jcr:contains(jcr:content/metadata/@format, 'image')
List<OrderEntry> sortOrder = createSortOrder(indexingRule);
- if (!indexedProps.isEmpty() || !sortOrder.isEmpty() || ft != null || evalPathRestrictions) {
+ boolean canSort = canHandleSorting(sortOrder);
+ if (!indexedProps.isEmpty() || canSort || ft != null || evalPathRestrictions) {
//TODO Need a way to have better cost estimate to indicate that
//this index can evaluate more propertyRestrictions natively (if more props are indexed)
//For now we reduce cost per entry
@@ -191,6 +194,20 @@ class IndexPlanner {
return null;
}
+ private boolean canHandleSorting(List<OrderEntry> sortOrder) {
+ if (sortOrder.isEmpty()){
+ return false;
+ }
+
+ //If jcr:score is the only sort order then opt out
+ if (sortOrder.size() == 1
+ && JCR_SCORE.equals(sortOrder.get(0).getPropertyName())){
+ return false;
+ }
+
+ return true;
+ }
+
private boolean canEvalAllFullText(final IndexingRule indexingRule, FullTextExpression ft) {
if (ft == null){
return false;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2439_beaca1a4.diff |
bugs-dot-jar_data_OAK-1208_cb3ac20d | ---
BugID: OAK-1208
Summary: Lucene Index should ignore property existence checks
Description: |-
Some optimizations on the query engine transform certain clauses in property existence checks. ie (p = 'somevalue' turns into 'p is not null').
This doesn't play well with lucene as it can not effectively build a 'not null' query, even worse the query doesn't return any results.
As a fix I'll just skip the existence constraints from the generated lucene query.
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
index 8be3395..8889094 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
@@ -475,6 +475,13 @@ public class LuceneIndex implements FulltextQueryIndex {
}
for (PropertyRestriction pr : filter.getPropertyRestrictions()) {
+
+ if (pr.first == null && pr.last == null) {
+ // ignore property existence checks, Lucene can't to 'property
+ // is not null' queries (OAK-1208)
+ continue;
+ }
+
String name = pr.propertyName;
if (name.contains("/")) {
// lucene cannot handle child-level property restrictions
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1208_cb3ac20d.diff |
bugs-dot-jar_data_OAK-1761_f37ce716 | ---
BugID: OAK-1761
Summary: DocumentNodeStore does not make use of References while serializing Blob
Description: 'The BlobSerializer in DocumentNodeStore does not make use of Blob references
which results in copying the blobs by value hence significantly slowing down any
migration '
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 36b242a..cc5ea66 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -280,7 +280,17 @@ public final class DocumentNodeStore
if (blob instanceof BlobStoreBlob) {
return ((BlobStoreBlob) blob).getBlobId();
}
+
String id;
+
+ String reference = blob.getReference();
+ if(reference != null){
+ id = blobStore.getBlobId(reference);
+ if(id != null){
+ return id;
+ }
+ }
+
try {
id = createBlob(blob.getNewStream()).getBlobId();
} catch (IOException e) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1761_f37ce716.diff |
bugs-dot-jar_data_OAK-1348_bc7b7e8c | ---
BugID: OAK-1348
Summary: ACE merging not behaving correctly if not using managed principals
Description: "{{org.apache.jackrabbit.api.security.JackrabbitAccessControlList#addEntry()}}
does not work correctly, if the given principal is not retrieved from the PrincipalManager.\n\nException:\n{noformat}\nCaused
by: org.apache.jackrabbit.oak.api.CommitFailedException: OakAccessControl0013: Duplicate
ACE found in policy\n\tat org.apache.jackrabbit.oak.security.authorization.accesscontrol.AccessControlValidator.accessViolation(AccessControlValidator.java:278)\n\tat
org.apache.jackrabbit.oak.security.authorization.accesscontrol.AccessControlValidator.checkValidPolicy(AccessControlValidator.java:188)\n{noformat}\n\nthis
used to work in jackrabbit 2.x.\n\nthe problem is probably in {{org.apache.jackrabbit.oak.security.authorization.accesscontrol.ACL#internalAddEntry}}
where the principals are \"equalled\" instead of comparing their names.\n\nnote,
that adding an ACE with such a principal works, just the merging/overwriting detection
doesn't.\n\ntest:\n{code}\n Principal p1 = new Principal() { getName(){return \"foo\"}};\n
\ Principal p2 = new Principal() { getName(){return \"foo\"}};\n acl.addEntry(p1,
privileges, true);\n acl.addEntry(p2, privileges, false);\n ...\n save(); //
throws\n{code}"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/ACL.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/ACL.java
index a868b62..9b0afaa 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/ACL.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authorization/accesscontrol/ACL.java
@@ -190,7 +190,7 @@ abstract class ACL extends AbstractAccessControlList {
List<ACE> subList = Lists.newArrayList(Iterables.filter(entries, new Predicate<ACE>() {
@Override
public boolean apply(@Nullable ACE ace) {
- return (ace != null) && ace.getPrincipal().equals(principal);
+ return (ace != null) && ace.getPrincipal().getName().equals(principal.getName());
}
}));
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1348_bc7b7e8c.diff |
bugs-dot-jar_data_OAK-614_6feacf6b | ---
BugID: OAK-614
Summary: AssertionError in MemoryNodeBuilder
Description: "{code}\n NodeBuilder root = ...\n NodeBuilder child = root.child(\"new\");\n\n
\ root.removeNode(\"new\");\n child.getChildNodeCount();\n{code}\n\nThe last
line throws an {{AssertionError}} when no node named \"new\" existed initially.
It throws an {{IllegalStateException}} as expected otherwise. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
index e514790..fac27d2 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/MemoryNodeBuilder.java
@@ -188,20 +188,19 @@ public class MemoryNodeBuilder implements NodeBuilder {
}
/**
- * Determine whether this child has been removed.
+ * Determine whether this child exists.
* Assumes {@code read()}, {@code write()} needs not be called.
- * @return {@code true} iff this child has been removed
+ * @return {@code true} iff this child exists
*/
- private boolean removed() {
- return !isRoot() && parent.writeState != null &&
- parent.hasBaseState(name) && !parent.writeState.hasChildNode(name);
+ private boolean exists() {
+ return isRoot() || parent.writeState == null || parent.writeState.hasChildNode(name);
}
@Nonnull
private NodeState read() {
if (revision != root.revision) {
assert(!isRoot()); // root never gets here since revision == root.revision
- checkState(!removed(), "This node has already been removed");
+ checkState(exists(), "This node has already been removed");
parent.read();
// The builder could have been reset, need to re-get base state
@@ -231,7 +230,7 @@ public class MemoryNodeBuilder implements NodeBuilder {
private MutableNodeState write(long newRevision, boolean skipRemovedCheck) {
// make sure that all revision numbers up to the root gets updated
if (!isRoot()) {
- checkState(skipRemovedCheck || !removed());
+ checkState(skipRemovedCheck || exists());
parent.write(newRevision, skipRemovedCheck);
}
@@ -243,7 +242,7 @@ public class MemoryNodeBuilder implements NodeBuilder {
writeState = parent.getWriteState(name);
if (writeState == null) {
- if (removed()) {
+ if (!exists()) {
writeState = new MutableNodeState(null);
}
else {
@@ -385,7 +384,7 @@ public class MemoryNodeBuilder implements NodeBuilder {
MutableNodeState childState = getWriteState(name);
if (childState == null) {
writeState.nodes.remove(name);
- childState = createChildBuilder(name).write();
+ childState = createChildBuilder(name).write(root.revision + 1, true);
}
childState.reset(state);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-614_6feacf6b.diff |
bugs-dot-jar_data_OAK-4353_b0014b7d | ---
BugID: OAK-4353
Summary: IndexOutOfBoundsException in FileStore.writeStream
Description: "When writing streams of specific length I get \n{code}\njava.lang.IndexOutOfBoundsException\nat
java.nio.Buffer.checkIndex(Buffer.java:538)\nat java.nio.HeapByteBuffer.getInt(HeapByteBuffer.java:359)\nat
org.apache.jackrabbit.oak.segment.Segment.getGcGen(Segment.java:318)\nat org.apache.jackrabbit.oak.segment.file.FileStore.writeSegment(FileStore.java:1371)\nat
org.apache.jackrabbit.oak.segment.SegmentWriter$SegmentWriteOperation.internalWriteStream(SegmentWriter.java:661)\n{code}\n"
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Segment.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Segment.java
index d1c0f0c..2f38496 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Segment.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/Segment.java
@@ -26,6 +26,7 @@ import static com.google.common.collect.Lists.newArrayListWithCapacity;
import static com.google.common.collect.Maps.newConcurrentMap;
import static java.lang.Boolean.getBoolean;
import static org.apache.jackrabbit.oak.commons.IOUtils.closeQuietly;
+import static org.apache.jackrabbit.oak.segment.SegmentId.isDataSegmentId;
import static org.apache.jackrabbit.oak.segment.SegmentVersion.isValid;
import static org.apache.jackrabbit.oak.segment.SegmentWriter.BLOCK_SIZE;
@@ -38,6 +39,7 @@ import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import java.util.Arrays;
import java.util.List;
+import java.util.UUID;
import java.util.concurrent.ConcurrentMap;
import javax.annotation.CheckForNull;
@@ -314,12 +316,27 @@ public class Segment {
return data.getShort(ROOT_COUNT_OFFSET) & 0xffff;
}
- public static int getGcGen(ByteBuffer data) {
- return data.getInt(GC_GEN_OFFSET);
+ /**
+ * Determine the gc generation a segment from its data. Note that bulk segments don't have
+ * generations (i.e. stay at 0).
+ *
+ * @param data the date of the segment
+ * @param segmentId the id of the segment
+ * @return the gc generation of this segment or 0 if this is bulk segment.
+ */
+ public static int getGcGen(ByteBuffer data, UUID segmentId) {
+ return isDataSegmentId(segmentId.getLeastSignificantBits())
+ ? data.getInt(GC_GEN_OFFSET)
+ : 0;
}
+ /**
+ * Determine the gc generation of this segment. Note that bulk segments don't have
+ * generations (i.e. stay at 0).
+ * @return the gc generation of this segment or 0 if this is bulk segment.
+ */
public int getGcGen() {
- return getGcGen(data);
+ return getGcGen(data, id.asUUID());
}
public RecordType getRootType(int index) {
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
index 84d61a7..e8954f4 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/FileStore.java
@@ -1368,7 +1368,7 @@ public class FileStore implements SegmentStore {
public void writeSegment(SegmentId id, byte[] data, int offset, int length) throws IOException {
fileStoreLock.writeLock().lock();
try {
- int generation = Segment.getGcGen(wrap(data, offset, length));
+ int generation = Segment.getGcGen(wrap(data, offset, length), id.asUUID());
long size = writer.writeEntry(
id.getMostSignificantBits(),
id.getLeastSignificantBits(),
diff --git a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TarReader.java b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TarReader.java
index 8fcdcc2..5c7ee7d 100644
--- a/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TarReader.java
+++ b/oak-segment-tar/src/main/java/org/apache/jackrabbit/oak/segment/file/TarReader.java
@@ -218,7 +218,7 @@ class TarReader implements Closeable {
for (Map.Entry<UUID, byte[]> entry : entries.entrySet()) {
UUID uuid = entry.getKey();
byte[] data = entry.getValue();
- int generation = getGcGen(wrap(data));
+ int generation = getGcGen(wrap(data), uuid);
writer.writeEntry(
uuid.getMostSignificantBits(),
uuid.getLeastSignificantBits(),
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4353_b0014b7d.diff |
bugs-dot-jar_data_OAK-1227_117b0a3d | ---
BugID: OAK-1227
Summary: Node.hasNode("foo[2]") must not throw PathNotFoundException
Description: similar to OAK-1225, Node.hasNode("foo[2]") should return false
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
index 2dbd284..21a32d0 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
@@ -797,24 +797,32 @@ public class NodeImpl<T extends NodeDelegate> extends ItemImpl<T> implements Nod
@Override
public boolean hasNode(String relPath) throws RepositoryException {
- final String oakPath = getOakPathOrThrow(relPath);
- return perform(new NodeOperation<Boolean>(dlg) {
- @Override
- public Boolean perform() throws RepositoryException {
- return node.getChild(oakPath) != null;
- }
- });
+ try {
+ final String oakPath = getOakPathOrThrow(relPath);
+ return perform(new NodeOperation<Boolean>(dlg) {
+ @Override
+ public Boolean perform() throws RepositoryException {
+ return node.getChild(oakPath) != null;
+ }
+ });
+ } catch (PathNotFoundException e) {
+ return false;
+ }
}
@Override
public boolean hasProperty(String relPath) throws RepositoryException {
- final String oakPath = getOakPathOrThrow(relPath);
- return perform(new NodeOperation<Boolean>(dlg) {
- @Override
- public Boolean perform() throws RepositoryException {
- return node.getPropertyOrNull(oakPath) != null;
- }
- });
+ try {
+ final String oakPath = getOakPathOrThrow(relPath);
+ return perform(new NodeOperation<Boolean>(dlg) {
+ @Override
+ public Boolean perform() throws RepositoryException {
+ return node.getPropertyOrNull(oakPath) != null;
+ }
+ });
+ } catch (PathNotFoundException e) {
+ return false;
+ }
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1227_117b0a3d.diff |
bugs-dot-jar_data_OAK-1225_3535afe2 | ---
BugID: OAK-1225
Summary: Session.nodeExists("/foo/bar[2]") must not throw PathNotFoundException
Description: similar to OAK-1216, Session.nodeExists() of an SNS path with indexes
> 1 should return false.
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionImpl.java
index 13ade55..7d6e5d2 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/SessionImpl.java
@@ -161,7 +161,11 @@ public class SessionImpl implements JackrabbitSession {
return perform(new ReadOperation<Node>() {
@Override
public Node perform() throws RepositoryException {
- return NodeImpl.createNodeOrNull(sd.getNode(getOakPathOrThrow(absPath)), sessionContext);
+ try {
+ return NodeImpl.createNodeOrNull(sd.getNode(getOakPathOrThrow(absPath)), sessionContext);
+ } catch (PathNotFoundException e) {
+ return null;
+ }
}
});
}
@@ -179,7 +183,12 @@ public class SessionImpl implements JackrabbitSession {
if (absPath.equals("/")) {
return null;
} else {
- final String oakPath = getOakPathOrThrow(absPath);
+ final String oakPath;
+ try {
+ oakPath = getOakPathOrThrow(absPath);
+ } catch (PathNotFoundException e) {
+ return null;
+ }
return perform(new ReadOperation<Property>() {
@Override
public Property perform() throws RepositoryException {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1225_3535afe2.diff |
bugs-dot-jar_data_OAK-1883_9c2421ed | ---
BugID: OAK-1883
Summary: Unnecessary invocations of LastRevRecovery when recovery already done.
Description: 'Even after _lastRev recovery executed on a cluster node, there are unnecessary invocations
of recovery happening on that cluster node, till that cluster node comes online
again.
'
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/MissingLastRevSeeker.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/MissingLastRevSeeker.java
index eef7c82..7659ec4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/MissingLastRevSeeker.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/MissingLastRevSeeker.java
@@ -94,6 +94,7 @@ public class MissingLastRevSeeker {
public void releaseRecoveryLock(int clusterId){
UpdateOp update = new UpdateOp(Integer.toString(clusterId), true);
update.set(ClusterNodeInfo.REV_RECOVERY_LOCK, null);
+ update.set(ClusterNodeInfo.STATE, null);
store.createOrUpdate(Collection.CLUSTER_NODES, update);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1883_9c2421ed.diff |
bugs-dot-jar_data_OAK-3798_2ac1dccd | ---
BugID: OAK-3798
Summary: NodeDocument.getNewestRevision() incorrect when there are previous documents
Description: |-
The method may incorrectly return null when there are previous documents and the base revision is lower than all local changes.
This is most likely caused by changes done for OAK-3388.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index fbcb646..f4006d4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -757,6 +757,15 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
clusterIds.add(prevRev.getClusterId());
}
}
+ if (!clusterIds.isEmpty()) {
+ // add clusterIds of local changes as well
+ for (Revision r : getLocalCommitRoot().keySet()) {
+ clusterIds.add(r.getClusterId());
+ }
+ for (Revision r : getLocalRevisions().keySet()) {
+ clusterIds.add(r.getClusterId());
+ }
+ }
}
// if we don't have clusterIds, we can use the local changes only
boolean fullScan = true;
@@ -786,7 +795,8 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
}
if (!fullScan) {
// check if we can stop going through changes
- if (clusterIds.contains(r.getClusterId())) {
+ if (isRevisionNewer(context, lower, r)
+ && newestRevs.containsKey(r.getClusterId())) {
if (isRevisionNewer(context, lower, r)) {
clusterIds.remove(r.getClusterId());
if (clusterIds.isEmpty()) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3798_2ac1dccd.diff |
bugs-dot-jar_data_OAK-3019_5135cf4b | ---
BugID: OAK-3019
Summary: VersionablePathHook must not process hidden nodes
Description: The VersionablePathHook also processes hidden nodes, e.g. index data,
which adds considerable overhead to the merge phase.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/version/VersionablePathHook.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/version/VersionablePathHook.java
index 9369dde..132296a 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/version/VersionablePathHook.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/version/VersionablePathHook.java
@@ -37,6 +37,7 @@ import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
import org.apache.jackrabbit.oak.spi.state.DefaultNodeStateDiff;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
@@ -108,6 +109,10 @@ public class VersionablePathHook implements CommitHook {
@Override
public boolean childNodeChanged(
String name, NodeState before, NodeState after) {
+ if (NodeStateUtils.isHidden(name)) {
+ // stop comparison
+ return false;
+ }
Node node = new Node(nodeAfter, name);
return after.compareAgainstBaseState(
before, new Diff(versionManager, node, exceptions));
@@ -117,7 +122,8 @@ public class VersionablePathHook implements CommitHook {
if (JcrConstants.JCR_VERSIONHISTORY.equals(after.getName()) && nodeAfter.isVersionable(versionManager)) {
NodeBuilder vhBuilder;
try {
- vhBuilder = versionManager.getOrCreateVersionHistory(nodeAfter.builder, Collections.EMPTY_MAP);
+ vhBuilder = versionManager.getOrCreateVersionHistory(
+ nodeAfter.builder, Collections.<String, Object>emptyMap());
} catch (CommitFailedException e) {
exceptions.add(e);
// stop further comparison
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3019_5135cf4b.diff |
bugs-dot-jar_data_OAK-1822_016df669 | ---
BugID: OAK-1822
Summary: NodeDocument _modified may go back in time
Description: In a cluster with multiple DocumentMK instances the _modified field of
a NodeDocument may go back in time. This will result in incorrect diff calculations
when the DocumentNodeStore uses the _modified field to find changed nodes for a
given revision range.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
index 0433392..b49f72e 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/NodeDocument.java
@@ -1205,7 +1205,7 @@ public final class NodeDocument extends Document implements CachedNodeDocument{
public static void setModified(@Nonnull UpdateOp op,
@Nonnull Revision revision) {
- checkNotNull(op).set(MODIFIED_IN_SECS, getModifiedInSecs(checkNotNull(revision).getTimestamp()));
+ checkNotNull(op).max(MODIFIED_IN_SECS, getModifiedInSecs(checkNotNull(revision).getTimestamp()));
}
public static void setRevision(@Nonnull UpdateOp op,
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
index 56b0698..0517fe1 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateOp.java
@@ -131,9 +131,7 @@ public final class UpdateOp {
* @param value the value
*/
void setMapEntry(@Nonnull String property, @Nonnull Revision revision, Object value) {
- Operation op = new Operation();
- op.type = Operation.Type.SET_MAP_ENTRY;
- op.value = value;
+ Operation op = new Operation(Operation.Type.SET_MAP_ENTRY, value);
changes.put(new Key(property, checkNotNull(revision)), op);
}
@@ -145,8 +143,7 @@ public final class UpdateOp {
* @param revision the revision
*/
public void removeMapEntry(@Nonnull String property, @Nonnull Revision revision) {
- Operation op = new Operation();
- op.type = Operation.Type.REMOVE_MAP_ENTRY;
+ Operation op = new Operation(Operation.Type.REMOVE_MAP_ENTRY, null);
changes.put(new Key(property, checkNotNull(revision)), op);
}
@@ -157,9 +154,23 @@ public final class UpdateOp {
* @param value the value
*/
void set(String property, Object value) {
- Operation op = new Operation();
- op.type = Operation.Type.SET;
- op.value = value;
+ Operation op = new Operation(Operation.Type.SET, value);
+ changes.put(new Key(property, null), op);
+ }
+
+ /**
+ * Set the property to the given value if the new value is higher than the
+ * existing value. The property is also set to the given value if the
+ * property does not yet exist.
+ * <p>
+ * The result of a max operation with different types of values is
+ * undefined.
+ *
+ * @param property the name of the property to set.
+ * @param value the new value for the property.
+ */
+ <T> void max(String property, Comparable<T> value) {
+ Operation op = new Operation(Operation.Type.MAX, value);
changes.put(new Key(property, null), op);
}
@@ -187,9 +198,7 @@ public final class UpdateOp {
if (isNew) {
throw new IllegalStateException("Cannot use containsMapEntry() on new document");
}
- Operation op = new Operation();
- op.type = Operation.Type.CONTAINS_MAP_ENTRY;
- op.value = exists;
+ Operation op = new Operation(Operation.Type.CONTAINS_MAP_ENTRY, exists);
changes.put(new Key(property, checkNotNull(revision)), op);
}
@@ -200,9 +209,7 @@ public final class UpdateOp {
* @param value the increment
*/
public void increment(@Nonnull String property, long value) {
- Operation op = new Operation();
- op.type = Operation.Type.INCREMENT;
- op.value = value;
+ Operation op = new Operation(Operation.Type.INCREMENT, value);
changes.put(new Key(property, null), op);
}
@@ -239,6 +246,14 @@ public final class UpdateOp {
SET,
/**
+ * Set the value if the new value is higher than the existing value.
+ * The new value is also considered higher, when there is no
+ * existing value.
+ * The sub-key is not used.
+ */
+ MAX,
+
+ /**
* Increment the Long value with the provided Long value.
* The sub-key is not used.
*/
@@ -267,12 +282,17 @@ public final class UpdateOp {
/**
* The operation type.
*/
- public Type type;
+ public final Type type;
/**
* The value, if any.
*/
- public Object value;
+ public final Object value;
+
+ Operation(Type type, Object value) {
+ this.type = checkNotNull(type);
+ this.value = value;
+ }
@Override
public String toString() {
@@ -283,18 +303,16 @@ public final class UpdateOp {
Operation reverse = null;
switch (type) {
case INCREMENT:
- reverse = new Operation();
- reverse.type = Type.INCREMENT;
- reverse.value = -(Long) value;
+ reverse = new Operation(Type.INCREMENT, -(Long) value);
break;
case SET:
+ case MAX:
case REMOVE_MAP_ENTRY:
case CONTAINS_MAP_ENTRY:
// nothing to do
break;
case SET_MAP_ENTRY:
- reverse = new Operation();
- reverse.type = Type.REMOVE_MAP_ENTRY;
+ reverse = new Operation(Type.REMOVE_MAP_ENTRY, null);
break;
}
return reverse;
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateUtils.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateUtils.java
index b8015ff..240665d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateUtils.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/UpdateUtils.java
@@ -44,7 +44,9 @@ public class UpdateUtils {
* @param comparator
* the revision comparator.
*/
- public static void applyChanges(@Nonnull Document doc, @Nonnull UpdateOp update, @Nonnull Comparator<Revision> comparator) {
+ public static void applyChanges(@Nonnull Document doc,
+ @Nonnull UpdateOp update,
+ @Nonnull Comparator<Revision> comparator) {
for (Entry<Key, Operation> e : checkNotNull(update).getChanges().entrySet()) {
Key k = e.getKey();
Operation op = e.getValue();
@@ -53,6 +55,15 @@ public class UpdateUtils {
doc.put(k.toString(), op.value);
break;
}
+ case MAX: {
+ Comparable newValue = (Comparable) op.value;
+ Object old = doc.get(k.toString());
+ //noinspection unchecked
+ if (old == null || newValue.compareTo(old) > 0) {
+ doc.put(k.toString(), op.value);
+ }
+ break;
+ }
case INCREMENT: {
Object old = doc.get(k.toString());
Long x = (Long) op.value;
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
index 0266e38..99db8d1 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
@@ -570,6 +570,7 @@ public class MongoDocumentStore implements CachingDocumentStore {
Operation op = entry.getValue();
switch (op.type) {
case SET:
+ case MAX:
case INCREMENT: {
inserts[i].put(k.toString(), op.value);
break;
@@ -965,6 +966,7 @@ public class MongoDocumentStore implements CachingDocumentStore {
@Nonnull
private static DBObject createUpdate(UpdateOp updateOp) {
BasicDBObject setUpdates = new BasicDBObject();
+ BasicDBObject maxUpdates = new BasicDBObject();
BasicDBObject incUpdates = new BasicDBObject();
BasicDBObject unsetUpdates = new BasicDBObject();
@@ -980,16 +982,17 @@ public class MongoDocumentStore implements CachingDocumentStore {
}
Operation op = entry.getValue();
switch (op.type) {
- case SET: {
+ case SET:
+ case SET_MAP_ENTRY: {
setUpdates.append(k.toString(), op.value);
break;
}
- case INCREMENT: {
- incUpdates.append(k.toString(), op.value);
+ case MAX: {
+ maxUpdates.append(k.toString(), op.value);
break;
}
- case SET_MAP_ENTRY: {
- setUpdates.append(k.toString(), op.value);
+ case INCREMENT: {
+ incUpdates.append(k.toString(), op.value);
break;
}
case REMOVE_MAP_ENTRY: {
@@ -1003,6 +1006,9 @@ public class MongoDocumentStore implements CachingDocumentStore {
if (!setUpdates.isEmpty()) {
update.append("$set", setUpdates);
}
+ if (!maxUpdates.isEmpty()) {
+ update.append("$max", maxUpdates);
+ }
if (!incUpdates.isEmpty()) {
update.append("$inc", incUpdates);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1822_016df669.diff |
bugs-dot-jar_data_OAK-1874_3ae276c1 | ---
BugID: OAK-1874
Summary: 'Indexes: re-index automatically when adding an index'
Description: |-
When adding an index via import of content, the index is not automatically re-built. This is problematic, because subsequent queries will return no data because of that. Currently, the only way to re-index is to set the "reindex" property to "true".
I suggest that indexes are automatically re-indexes if the hidden child node (":data" I believe) is missing. This is in addition to the "reindex" property.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
index 2116cee..75f0f31 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
@@ -19,6 +19,7 @@ package org.apache.jackrabbit.oak.plugins.index;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.Lists.newArrayList;
import static com.google.common.collect.Lists.newArrayListWithCapacity;
+import static org.apache.jackrabbit.oak.api.Type.BOOLEAN;
import static org.apache.jackrabbit.oak.commons.PathUtils.concat;
import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.ASYNC_PROPERTY_NAME;
import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.ASYNC_REINDEX_VALUE;
@@ -108,7 +109,7 @@ public class IndexUpdate implements Editor {
@Override
public void enter(NodeState before, NodeState after)
throws CommitFailedException {
- collectIndexEditors(builder.getChildNode(INDEX_DEFINITIONS_NAME));
+ collectIndexEditors(builder.getChildNode(INDEX_DEFINITIONS_NAME), before);
// no-op when reindex is empty
CommitFailedException exception = EditorDiff.process(
@@ -122,17 +123,30 @@ public class IndexUpdate implements Editor {
}
}
- private void collectIndexEditors(NodeBuilder definitions)
- throws CommitFailedException {
+ private boolean shouldReindex(NodeBuilder definition, NodeState before,
+ String name) {
+ PropertyState ps = definition.getProperty(REINDEX_PROPERTY_NAME);
+ if (ps != null && ps.getValue(BOOLEAN)) {
+ return true;
+ }
+ // reindex in the case this is a new node, even though the reindex flag
+ // might be set to 'false' (possible via content import)
+ return !before.getChildNode(INDEX_DEFINITIONS_NAME).hasChildNode(name);
+ }
+
+ private void collectIndexEditors(NodeBuilder definitions,
+ NodeState before) throws CommitFailedException {
for (String name : definitions.getChildNodeNames()) {
NodeBuilder definition = definitions.getChildNode(name);
if (Objects.equal(async, definition.getString(ASYNC_PROPERTY_NAME))) {
String type = definition.getString(TYPE_PROPERTY_NAME);
+ boolean shouldReindex = shouldReindex(definition,
+ before, name);
Editor editor = provider.getIndexEditor(type, definition, root, updateCallback);
if (editor == null) {
// trigger reindexing when an indexer becomes available
definition.setProperty(REINDEX_PROPERTY_NAME, true);
- } else if (definition.getBoolean(REINDEX_PROPERTY_NAME)) {
+ } else if (shouldReindex) {
if (definition.getBoolean(REINDEX_ASYNC_PROPERTY_NAME)
&& definition.getString(ASYNC_PROPERTY_NAME) == null) {
// switch index to an async update mode
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1874_3ae276c1.diff |
bugs-dot-jar_data_OAK-1024_e39b4d96 | ---
BugID: OAK-1024
Summary: Full-text search on the traversing index fails if the condition contains
a slash
Description: "A full-text search on the traversing index falls back to a sort of manual
evaluation of results. \nThis is handled by the _FullTextTerm_ class, and it appears
that it passes the constraint text through a cleanup process where it strips most
of the characters that are neither _Character.isLetterOrDigit(c)_ not in the list
_+-:&_\n\nI'm not exactly sure where this list comes from, but I see the '/' character
is missing which causes a certain type of query to fail.\n\nExample:\n{code}\n//*[jcr:contains(.,
'text/plain')]\n{code}\n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
index 78a672f..cd974c2 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
@@ -67,7 +67,7 @@ public class FullTextTerm extends FullTextExpression {
} else if (c == '_') {
buff.append("\\_");
pattern = true;
- } else if (Character.isLetterOrDigit(c) || " +-:&/".indexOf(c) >= 0) {
+ } else if (Character.isLetterOrDigit(c) || " +-:&/.".indexOf(c) >= 0) {
buff.append(c);
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1024_e39b4d96.diff |
bugs-dot-jar_data_OAK-2117_c7669f31 | ---
BugID: OAK-2117
Summary: Reindex removes all nodes under index definition node
Description: Reindex logic in {{IndexUpdate}} removes all child node from index definition
node thus removing valid nodes which might be part of index defintion. It should
only remove hidden nodes
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
index f68c213..47cdfd1 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/IndexUpdate.java
@@ -45,6 +45,7 @@ import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.spi.commit.Editor;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -170,7 +171,9 @@ public class IndexUpdate implements Editor {
// as we don't know the index content node name
// beforehand, we'll remove all child nodes
for (String rm : definition.getChildNodeNames()) {
- definition.getChildNode(rm).remove();
+ if (NodeStateUtils.isHidden(rm)) {
+ definition.getChildNode(rm).remove();
+ }
}
reindex.put(concat(getPath(), INDEX_DEFINITIONS_NAME, name), editor);
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2117_c7669f31.diff |
bugs-dot-jar_data_OAK-3310_4416a9f8 | ---
BugID: OAK-3310
Summary: Write operations on Property do not check checked-out state of Node
Description: Write operations on Property do not check the checked-out state. The
same is true for Node.setProperty(..., null).
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
index 71f12f9..07e0ae3 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/NodeImpl.java
@@ -1424,6 +1424,14 @@ public class NodeImpl<T extends NodeDelegate> extends ItemImpl<T> implements Nod
throws RepositoryException {
final String oakName = getOakName(checkNotNull(jcrName));
return perform(new ItemWriteOperation<Property>("internalRemoveProperty") {
+ @Override
+ public void checkPreconditions() throws RepositoryException {
+ super.checkPreconditions();
+ if (!isCheckedOut()) {
+ throw new VersionException(
+ "Cannot remove property. Node is checked in.");
+ }
+ }
@Nonnull
@Override
public Property perform() throws RepositoryException {
diff --git a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/PropertyImpl.java b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/PropertyImpl.java
index 52ca344..7a54197 100644
--- a/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/PropertyImpl.java
+++ b/oak-jcr/src/main/java/org/apache/jackrabbit/oak/jcr/session/PropertyImpl.java
@@ -37,6 +37,7 @@ import javax.jcr.Value;
import javax.jcr.ValueFactory;
import javax.jcr.ValueFormatException;
import javax.jcr.nodetype.PropertyDefinition;
+import javax.jcr.version.VersionException;
import org.apache.jackrabbit.oak.api.Tree.Status;
import org.apache.jackrabbit.oak.api.Type;
@@ -110,6 +111,15 @@ public class PropertyImpl extends ItemImpl<PropertyDelegate> implements Property
public void remove() throws RepositoryException {
sessionDelegate.performVoid(new ItemWriteOperation("remove") {
@Override
+ public void checkPreconditions() throws RepositoryException {
+ super.checkPreconditions();
+ if (!getParent().isCheckedOut()) {
+ throw new VersionException(
+ "Cannot set property. Node is checked in.");
+ }
+ }
+
+ @Override
public void performVoid() {
dlg.remove();
}
@@ -451,6 +461,15 @@ public class PropertyImpl extends ItemImpl<PropertyDelegate> implements Property
throws RepositoryException {
sessionDelegate.performVoid(new ItemWriteOperation("internalSetValue") {
@Override
+ public void checkPreconditions() throws RepositoryException {
+ super.checkPreconditions();
+ if (!getParent().isCheckedOut()) {
+ throw new VersionException(
+ "Cannot set property. Node is checked in.");
+ }
+ }
+
+ @Override
public void performVoid() throws RepositoryException {
Type<?> type = dlg.getPropertyState().getType();
if (type.isArray()) {
@@ -478,6 +497,15 @@ public class PropertyImpl extends ItemImpl<PropertyDelegate> implements Property
sessionDelegate.performVoid(new ItemWriteOperation("internalSetValue") {
@Override
+ public void checkPreconditions() throws RepositoryException {
+ super.checkPreconditions();
+ if (!getParent().isCheckedOut()) {
+ throw new VersionException(
+ "Cannot set property. Node is checked in.");
+ }
+ }
+
+ @Override
public void performVoid() throws RepositoryException {
Type<?> type = dlg.getPropertyState().getType();
if (!type.isArray()) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3310_4416a9f8.diff |
bugs-dot-jar_data_OAK-3089_ba38c380 | ---
BugID: OAK-3089
Summary: 'LIRS cache: zero size cache causes IllegalArgumentException'
Description: |-
The LIRS cache does not support a zero size cache currently. Such a configuration causes an IllegalArgumentException.
Instead, no exception should be thrown, and no or a minimum size cache should be used.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
index 6eb74dc..26e44e7 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
@@ -409,8 +409,8 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
* @param maxMemory the maximum size (1 or larger)
*/
public void setMaxMemory(long maxMemory) {
- if (maxMemory <= 0) {
- throw new IllegalArgumentException("Max memory must be larger than 0");
+ if (maxMemory < 0) {
+ throw new IllegalArgumentException("Max memory must not be negative");
}
this.maxMemory = maxMemory;
if (segments != null) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3089_ba38c380.diff |
bugs-dot-jar_data_OAK-1186_52372042 | ---
BugID: OAK-1186
Summary: Parallel execution of ConcurrentReadAccessControlledTreeTest fails with MongoMK
Description: |-
The is caused by concurrent creation of test content and the conflict it creates in the index. Every Oak test instance tries to create {{/oak:index/nodetype/:index/nt%3Afile}}, but only one will succeed. AFAICS there are two options how to handle this:
- Implement conflict annotation (OAK-1185), though I'm not sure this will really work. On commit, the rebase happens first, when changes from the other Oak instance may not be visible yet. Then, the commit hook runs and perform another branch commit with the changes, which works fine. Only the last step fails, when MongoMK tries to merge the branch. This is the point when the conflict may be detected.
- Implement a retry logic in MongoMK/NS
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
index ac9ee2f..8581ecb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoNodeStore.java
@@ -28,6 +28,7 @@ import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -50,6 +51,7 @@ import javax.annotation.Nullable;
import com.google.common.base.Function;
import com.google.common.cache.Cache;
import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.jackrabbit.mk.api.MicroKernelException;
import org.apache.jackrabbit.mk.blobs.BlobStore;
@@ -786,28 +788,57 @@ public final class MongoNodeStore
if (b == null) {
throw new MicroKernelException("Empty branch cannot be reset");
}
+ if (!b.getCommits().last().equals(branchHead)) {
+ throw new MicroKernelException(branchHead + " is not the head " +
+ "of a branch");
+ }
if (!b.containsCommit(ancestor)) {
throw new MicroKernelException(ancestor + " is not " +
"an ancestor revision of " + branchHead);
}
- Revision rev;
+ if (branchHead.equals(ancestor)) {
+ // trivial
+ return branchHead;
+ }
boolean success = false;
Commit commit = newCommit(branchHead);
try {
- // apply reverse diff
- getRoot(ancestor).compareAgainstBaseState(getRoot(branchHead),
- new CommitDiff(commit, getBlobSerializer()));
- UpdateOp rootOp = commit.getUpdateOperationForNode("/");
- // clear collisions
Iterator<Revision> it = b.getCommits().tailSet(ancestor).iterator();
// first revision is the ancestor (tailSet is inclusive)
- // do not clear collision for this revision
- it.next();
+ // do not undo changes for this revision
+ Revision base = it.next();
+ Map<String, UpdateOp> operations = new HashMap<String, UpdateOp>();
while (it.hasNext()) {
- NodeDocument.removeCollision(rootOp, it.next());
+ Revision reset = it.next();
+ getRoot(reset).compareAgainstBaseState(getRoot(base),
+ new ResetDiff(reset.asTrunkRevision(), operations));
+ UpdateOp rootOp = operations.get("/");
+ if (rootOp == null) {
+ rootOp = new UpdateOp(Utils.getIdFromPath("/"), false);
+ NodeDocument.setModified(rootOp, commit.getRevision());
+ operations.put("/", rootOp);
+ }
+ NodeDocument.removeCollision(rootOp, reset.asTrunkRevision());
+ NodeDocument.removeRevision(rootOp, reset.asTrunkRevision());
+ }
+ // update root document first
+ if (store.findAndUpdate(Collection.NODES, operations.get("/")) != null) {
+ // clean up in-memory branch data
+ // first revision is the ancestor (tailSet is inclusive)
+ List<Revision> revs = Lists.newArrayList(b.getCommits().tailSet(ancestor));
+ for (Revision r : revs.subList(1, revs.size())) {
+ b.removeCommit(r);
+ }
+ // successfully updating the root document can be considered
+ // as success because the changes are not marked as committed
+ // anymore
+ success = true;
+ }
+ operations.remove("/");
+ // update remaining documents
+ for (UpdateOp op : operations.values()) {
+ store.findAndUpdate(Collection.NODES, op);
}
- rev = apply(commit);
- success = true;
} finally {
if (!success) {
canceled(commit);
@@ -815,7 +846,7 @@ public final class MongoNodeStore
done(commit, true, null);
}
}
- return rev;
+ return ancestor;
}
@Nonnull
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
index de0d062..aba6a98 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/NodeDocument.java
@@ -843,6 +843,11 @@ public class NodeDocument extends Document {
checkNotNull(op).unsetMapEntry(REVISIONS, checkNotNull(revision));
}
+ public static void removeRevision(@Nonnull UpdateOp op,
+ @Nonnull Revision revision) {
+ checkNotNull(op).removeMapEntry(REVISIONS, checkNotNull(revision));
+ }
+
public static void removeCollision(@Nonnull UpdateOp op,
@Nonnull Revision revision) {
checkNotNull(op).removeMapEntry(COLLISIONS, checkNotNull(revision));
@@ -872,6 +877,11 @@ public class NodeDocument extends Document {
String.valueOf(commitRootDepth));
}
+ public static void removeCommitRoot(@Nonnull UpdateOp op,
+ @Nonnull Revision revision) {
+ checkNotNull(op).removeMapEntry(COMMIT_ROOT, revision);
+ }
+
public static void setDeleted(@Nonnull UpdateOp op,
@Nonnull Revision revision,
boolean deleted) {
@@ -879,6 +889,11 @@ public class NodeDocument extends Document {
String.valueOf(deleted));
}
+ public static void removeDeleted(@Nonnull UpdateOp op,
+ @Nonnull Revision revision) {
+ checkNotNull(op).removeMapEntry(DELETED, revision);
+ }
+
//----------------------------< internal >----------------------------------
/**
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/ResetDiff.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/ResetDiff.java
new file mode 100644
index 0000000..9559887
--- /dev/null
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/ResetDiff.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.mongomk;
+
+import java.util.Map;
+
+import javax.annotation.Nonnull;
+
+import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.commons.PathUtils;
+import org.apache.jackrabbit.oak.plugins.mongomk.util.Utils;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
+import org.apache.jackrabbit.oak.spi.state.NodeStateDiff;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
+import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.MISSING_NODE;
+
+/**
+ * Implementation of a node state diff, which translates a diff into reset
+ * operations on a branch.
+ */
+class ResetDiff implements NodeStateDiff {
+
+ private final Revision revision;
+ private final String path;
+ private final Map<String, UpdateOp> operations;
+ private UpdateOp update;
+
+ ResetDiff(@Nonnull Revision revision,
+ @Nonnull Map<String, UpdateOp> operations) {
+ this(revision, "/", operations);
+ }
+
+ private ResetDiff(@Nonnull Revision revision,
+ @Nonnull String path,
+ @Nonnull Map<String, UpdateOp> operations) {
+ this.revision = checkNotNull(revision);
+ this.path = checkNotNull(path);
+ this.operations = checkNotNull(operations);
+ }
+
+ @Override
+ public boolean propertyAdded(PropertyState after) {
+ getUpdateOp().removeMapEntry(after.getName(), revision);
+ return true;
+ }
+
+ @Override
+ public boolean propertyChanged(PropertyState before, PropertyState after) {
+ getUpdateOp().removeMapEntry(after.getName(), revision);
+ return true;
+ }
+
+ @Override
+ public boolean propertyDeleted(PropertyState before) {
+ getUpdateOp().removeMapEntry(before.getName(), revision);
+ return true;
+ }
+
+ @Override
+ public boolean childNodeAdded(String name, NodeState after) {
+ String p = PathUtils.concat(path, name);
+ ResetDiff diff = new ResetDiff(revision, p, operations);
+ UpdateOp op = diff.getUpdateOp();
+ NodeDocument.removeDeleted(op, revision);
+ return after.compareAgainstBaseState(EMPTY_NODE, diff);
+ }
+
+ @Override
+ public boolean childNodeChanged(String name,
+ NodeState before,
+ NodeState after) {
+ String p = PathUtils.concat(path, name);
+ return after.compareAgainstBaseState(before,
+ new ResetDiff(revision, p, operations));
+ }
+
+ @Override
+ public boolean childNodeDeleted(String name, NodeState before) {
+ String p = PathUtils.concat(path, name);
+ ResetDiff diff = new ResetDiff(revision, p, operations);
+ NodeDocument.removeDeleted(diff.getUpdateOp(), revision);
+ return MISSING_NODE.compareAgainstBaseState(before, diff);
+ }
+
+ Map<String, UpdateOp> getOperations() {
+ return operations;
+ }
+
+ private UpdateOp getUpdateOp() {
+ if (update == null) {
+ update = operations.get(path);
+ if (update == null) {
+ String id = Utils.getIdFromPath(path);
+ update = new UpdateOp(id, false);
+ operations.put(path, update);
+ }
+ NodeDocument.removeRevision(update, revision);
+ NodeDocument.removeCommitRoot(update, revision);
+ }
+ return update;
+ }
+}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1186_52372042.diff |
bugs-dot-jar_data_OAK-4300_06c367af | ---
BugID: OAK-4300
Summary: Cost per entry for Lucene index of type v1 should be higher than that of
v2
Description: |-
Currently default cost per entry for Lucene index of type
# v1 - which uses query time aggregation
# v2 - which uses index time aggregation
Are same. However given that query time aggregation would require more effort it should result in a higher cost per entry.
This fact impacts the result in cases like OAK-2081 (see last few comments) where with usage of limits both index are currently considered equals
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
index 99018c5..ed96808 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/IndexDefinition.java
@@ -270,7 +270,7 @@ class IndexDefinition implements Aggregate.AggregateMapper {
}
this.maxFieldLength = getOptionalValue(defn, LuceneIndexConstants.MAX_FIELD_LENGTH, DEFAULT_MAX_FIELD_LENGTH);
- this.costPerEntry = getOptionalValue(defn, LuceneIndexConstants.COST_PER_ENTRY, 1.0);
+ this.costPerEntry = getOptionalValue(defn, LuceneIndexConstants.COST_PER_ENTRY, getDefaultCostPerEntry(version));
this.costPerExecution = getOptionalValue(defn, LuceneIndexConstants.COST_PER_EXECUTION, 1.0);
this.indexesAllTypes = areAllTypesIndexed();
this.analyzers = collectAnalyzers(defn);
@@ -1512,4 +1512,10 @@ class IndexDefinition implements Aggregate.AggregateMapper {
return activeDelete >= 0;
}
+ private static double getDefaultCostPerEntry(IndexFormatVersion version) {
+ //For older format cost per entry would be higher as it does a runtime
+ //aggregation
+ return version == IndexFormatVersion.V1 ? 1.5 : 1.0;
+ }
+
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4300_06c367af.diff |
bugs-dot-jar_data_OAK-1364_05c89637 | ---
BugID: OAK-1364
Summary: CacheLIRS concurrency issue
Description: "Some of the methods of the cache can throw a NullPointerException when
the cache is used concurrently. Example stack trace:\n\n{code}\njava.lang.NullPointerException:
null\norg.apache.jackrabbit.oak.cache.CacheLIRS.values(CacheLIRS.java:470) \norg.apache.jackrabbit.oak.cache.CacheLIRS$1.values(CacheLIRS.java:1432)\norg.apache.jackrabbit.oak.plugins.segment.file.FileStore.flush(FileStore.java:205)\n{code}\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
index 3356131..431d1f3 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/cache/CacheLIRS.java
@@ -154,11 +154,6 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
}
}
- private Entry<K, V> find(Object key) {
- int hash = getHash(key);
- return getSegment(hash).find(key, hash);
- }
-
/**
* Check whether there is a resident entry for the given key. This
* method does not adjust the internal state of the cache.
@@ -179,7 +174,8 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
* @return the value, or null if there is no resident entry
*/
public V peek(K key) {
- Entry<K, V> e = find(key);
+ int hash = getHash(key);
+ Entry<K, V> e = getSegment(hash).find(key, hash);
return e == null ? null : e.value;
}
@@ -459,7 +455,10 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
public synchronized Set<Map.Entry<K, V>> entrySet() {
HashMap<K, V> map = new HashMap<K, V>();
for (K k : keySet()) {
- map.put(k, find(k).value);
+ V v = peek(k);
+ if (v != null) {
+ map.put(k, v);
+ }
}
return map.entrySet();
}
@@ -467,7 +466,7 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
protected Collection<V> values() {
ArrayList<V> list = new ArrayList<V>();
for (K k : keySet()) {
- V v = find(k).value;
+ V v = peek(k);
if (v != null) {
list.add(v);
}
@@ -478,7 +477,7 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
boolean containsValue(Object value) {
for (Segment<K, V> s : segments) {
for (K k : s.keySet()) {
- V v = find(k).value;
+ V v = peek(k);
if (v != null && v.equals(value)) {
return true;
}
@@ -860,6 +859,9 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
synchronized V get(K key, int hash, CacheLoader<K, V> loader) throws ExecutionException {
V value = get(key, hash);
if (value == null) {
+ if (loader == null) {
+ return null;
+ }
long start = System.nanoTime();
try {
value = loader.load(key);
@@ -1394,7 +1396,7 @@ public class CacheLIRS<K, V> implements LoadingCache<K, V> {
@SuppressWarnings("unchecked")
@Override
public V get(Object key) {
- return CacheLIRS.this.getUnchecked((K) key);
+ return CacheLIRS.this.peek((K) key);
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1364_05c89637.diff |
bugs-dot-jar_data_OAK-748_503451c1 | ---
BugID: OAK-748
Summary: 'ContentMirrorStoreStrategy #insert fails to enforce uniqueness and is slow'
Description: |-
Following OAK-734 I've noticed that the _ContentMirrorStoreStrategy_ fails to enforce the uniqueness constraints assumed on the #insert method.
It is also responsible for a slowdown on the #insert method because of the behavior change of the Property2Index (very frequent saves instead of a bulk one).
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/p2/strategy/ContentMirrorStoreStrategy.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/p2/strategy/ContentMirrorStoreStrategy.java
index 0d3b259..f3d4804 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/p2/strategy/ContentMirrorStoreStrategy.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/index/p2/strategy/ContentMirrorStoreStrategy.java
@@ -42,7 +42,26 @@ import com.google.common.collect.Queues;
import com.google.common.collect.Sets;
/**
- * TODO document
+ * An IndexStoreStrategy implementation that saves the nodes under a hierarchy
+ * that mirrors the repository tree. <br>
+ * This should minimize the chance that concurrent updates overlap on the same
+ * content node.<br>
+ * <br>
+ * For example for a node that is under <code>/test/node</code>, the index
+ * structure will be <code>/oak:index/index/test/node</code>:
+ *
+ * <pre>
+ * <code>
+ * /
+ * test
+ * node
+ * oak:index
+ * index
+ * test
+ * node
+ * </code>
+ * </pre>
+ *
*/
public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
@@ -118,6 +137,12 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
public void insert(NodeBuilder index, String key, boolean unique,
Iterable<String> values) throws CommitFailedException {
NodeBuilder child = index.child(key);
+ if (unique
+ && (child.getProperty("match") != null || child
+ .getChildNodeCount() > 0)) {
+ throw new CommitFailedException(
+ "Uniqueness constraint violated for key " + key);
+ }
for (String add : values) {
NodeBuilder indexEntry = child;
@@ -126,16 +151,8 @@ public class ContentMirrorStoreStrategy implements IndexStoreStrategy {
}
indexEntry.setProperty("match", true);
}
- CountingNodeVisitor v = new CountingNodeVisitor(2);
- v.visit(child.getNodeState());
- int matchCount = v.getCount();
- if (matchCount == 0) {
- index.removeNode(key);
- } else if (unique && matchCount > 1) {
- throw new CommitFailedException("Uniqueness constraint violated for key " + key);
- }
}
-
+
@Override
public Iterable<String> query(final Filter filter, final String indexName,
final NodeState index, final Iterable<String> values) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-748_503451c1.diff |
bugs-dot-jar_data_OAK-554_3f51fb09 | ---
BugID: OAK-554
Summary: PropertyStates#createProperty ignores namespace mappings when creating states
of type NAME and PATH
Description: "as far as i saw we use PropertyStates#createProperty to create and\nset
an OAK property from a given JCR value or a list of JCR values.\n\nthis works well
for all types of values except for NAME, PATH which \nmay contain values with remapped
namespaces which will not be converted\nback to oak-values during the state creation:\n\n{code}\n
\ List<String> vals = Lists.newArrayList();\n for (Value value : values)
{\n vals.add(value.getString());\n }\n return new MultiGenericPropertyState(name,
vals, Type.fromTag(type, true));\n{code}\n\nif am not mistaken {code}value.getString(){code}
will return the JCR\nrepresentation of the value instead of the oak representation
as it\nwould be needed here.\n\npossible solutions include:\n- passing namepathmapper
to the create method\n- only accept oak Value implementation that allows to retrieve
the\n internal representation, which is present in the ValueImpl afaik."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/PropertyStates.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/PropertyStates.java
index 3837e96..372ca23 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/PropertyStates.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/memory/PropertyStates.java
@@ -32,6 +32,7 @@ import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
import org.apache.jackrabbit.oak.plugins.value.Conversions;
+import org.apache.jackrabbit.oak.plugins.value.ValueImpl;
import static org.apache.jackrabbit.oak.api.Type.STRINGS;
@@ -52,10 +53,11 @@ public final class PropertyStates {
*/
@Nonnull
public static PropertyState createProperty(String name, Value value) throws RepositoryException {
+
int type = value.getType();
switch (type) {
case PropertyType.STRING:
- return StringPropertyState.stringProperty(name, value.getString());
+ return StringPropertyState.stringProperty(name, getString(value, type));
case PropertyType.BINARY:
return BinaryPropertyState.binaryProperty(name, value);
case PropertyType.LONG:
@@ -69,7 +71,7 @@ public final class PropertyStates {
case PropertyType.DECIMAL:
return DecimalPropertyState.decimalProperty(name, value.getDecimal());
default:
- return new GenericPropertyState(name, value.getString(), Type.fromTag(type, false));
+ return new GenericPropertyState(name, getString(value, type), Type.fromTag(type, false));
}
}
@@ -96,7 +98,7 @@ public final class PropertyStates {
case PropertyType.STRING:
List<String> strings = Lists.newArrayList();
for (Value value : values) {
- strings.add(value.getString());
+ strings.add(getString(value, type));
}
return MultiStringPropertyState.stringProperty(name, strings);
case PropertyType.BINARY:
@@ -138,12 +140,24 @@ public final class PropertyStates {
default:
List<String> vals = Lists.newArrayList();
for (Value value : values) {
- vals.add(value.getString());
+ vals.add(getString(value, type));
}
return new MultiGenericPropertyState(name, vals, Type.fromTag(type, true));
}
}
+ private static String getString(Value value, int type) throws RepositoryException {
+ if (value instanceof ValueImpl) {
+ return ((ValueImpl) value).getOakString();
+ }
+ else if (type == PropertyType.NAME || type == PropertyType.PATH) {
+ throw new IllegalArgumentException("Cannot create name of path property state from Value " +
+ "of class '" + value.getClass() + '\'');
+ } else {
+ return value.getString();
+ }
+ }
+
/**
* Create a {@code PropertyState} from a string.
* @param name The name of the property state
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/value/ValueImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/value/ValueImpl.java
index 59ea846..4f733eb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/value/ValueImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/value/ValueImpl.java
@@ -73,6 +73,15 @@ public class ValueImpl implements Value {
this(checkSingleValued(property), 0, namePathMapper);
}
+ /**
+ * Same as {@link #getString()} unless that names and paths are returned in their
+ * Oak representation instead of being mapped to their JCR representation.
+ * @return A String representation of the value of this property.
+ */
+ public String getOakString() {
+ return propertyState.getValue(Type.STRING, index);
+ }
+
private static PropertyState checkSingleValued(PropertyState property) {
checkArgument(!property.isArray());
return property;
@@ -208,9 +217,9 @@ public class ValueImpl implements Value {
switch (getType()) {
case PropertyType.NAME:
- return namePathMapper.getJcrName(propertyState.getValue(Type.STRING, index));
+ return namePathMapper.getJcrName(getOakString());
case PropertyType.PATH:
- String s = propertyState.getValue(Type.STRING, index);
+ String s = getOakString();
if (s.startsWith("[") && s.endsWith("]")) {
// identifier paths are returned as-is (JCR 2.0, 3.4.3.1)
return s;
@@ -218,7 +227,7 @@ public class ValueImpl implements Value {
return namePathMapper.getJcrPath(s);
}
default:
- return propertyState.getValue(Type.STRING, index);
+ return getOakString();
}
}
@@ -273,13 +282,13 @@ public class ValueImpl implements Value {
return propertyState.getValue(Type.BINARY, index).hashCode();
}
else {
- return propertyState.getValue(Type.STRING, index).hashCode();
+ return getOakString().hashCode();
}
}
@Override
public String toString() {
- return propertyState.getValue(Type.STRING, index);
+ return getOakString();
}
private static int compare(PropertyState p1, int i1, PropertyState p2, int i2) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-554_3f51fb09.diff |
bugs-dot-jar_data_OAK-2389_0fa892b3 | ---
BugID: OAK-2389
Summary: issues with JsopBuilder.encode and .escape
Description: |-
1) escape() escapes many characters that do not need to be escaped (>127)
2) encode() does not encode many control characters that would need to be escaped when read through a JSON parser.
diff --git a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
index 8489197..bffa237 100644
--- a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
+++ b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/json/JsopBuilder.java
@@ -254,7 +254,7 @@ public class JsopBuilder implements JsopWriter {
}
for (int i = 0; i < length; i++) {
char c = s.charAt(i);
- if (c == '\"' || c == '\\' || c < ' ') {
+ if (c == '\"' || c == '\\' || c < ' ' || (c >= 0xd800 && c <= 0xdbff)) {
StringBuilder buff = new StringBuilder(length + 2 + length / 8);
buff.append('\"');
escape(s, length, buff);
@@ -285,7 +285,6 @@ public class JsopBuilder implements JsopWriter {
private static void escape(String s, int length, StringBuilder buff) {
for (int i = 0; i < length; i++) {
char c = s.charAt(i);
- int ic = (int)c;
switch (c) {
case '"':
// quotation mark
@@ -317,8 +316,8 @@ public class JsopBuilder implements JsopWriter {
break;
default:
if (c < ' ') {
- buff.append(String.format("\\u%04x", ic));
- } else if (ic >= 0xD800 && ic <= 0xDBFF) {
+ buff.append(String.format("\\u%04x", (int) c));
+ } else if (c >= 0xd800 && c <= 0xdbff) {
// isSurrogate(), only available in Java 7
if (i < length - 1 && Character.isSurrogatePair(c, s.charAt(i + 1))) {
// ok surrogate
@@ -327,7 +326,7 @@ public class JsopBuilder implements JsopWriter {
i += 1;
} else {
// broken surrogate -> escape
- buff.append(String.format("\\u%04x", ic));
+ buff.append(String.format("\\u%04x", (int) c));
}
} else {
buff.append(c);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2389_0fa892b3.diff |
bugs-dot-jar_data_OAK-1789_9f7c1df0 | ---
BugID: OAK-1789
Summary: Upgraded version history has UUIDs as jcr:frozenUuid of non-referenceable
nodes
Description: |-
In Jackrabbit Classic each node, even non-referenceable ones, has a UUID as its identifier, and thus the {{jcr:frozenUuid}} properties of frozen nodes are always UUIDs. In contrast Oak uses path identifiers for non-referenceable frozen nodes (see OAK-1009), which presents a problem when dealing with version histories migrated from Jackrabbit Classic.
To avoid this mismatch, the upgrade code should check each frozen node for referenceability and replace the frozen UUID with a path identifier if needed.
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
index c063a26..3752b91 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/JackrabbitNodeState.java
@@ -32,6 +32,7 @@ import static org.apache.jackrabbit.JcrConstants.JCR_MIXINTYPES;
import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
import static org.apache.jackrabbit.JcrConstants.JCR_UUID;
import static org.apache.jackrabbit.JcrConstants.MIX_REFERENCEABLE;
+import static org.apache.jackrabbit.JcrConstants.NT_BASE;
import static org.apache.jackrabbit.JcrConstants.NT_FROZENNODE;
import static org.apache.jackrabbit.JcrConstants.NT_UNSTRUCTURED;
import static org.apache.jackrabbit.oak.api.Type.NAME;
@@ -131,6 +132,8 @@ class JackrabbitNodeState extends AbstractNodeState {
this.uriToPrefix = parent.uriToPrefix;
this.properties = createProperties(bundle);
this.nodes = createNodes(bundle);
+ setChildOrder();
+ fixFrozenUuid();
this.useBinaryReferences = parent.useBinaryReferences;
logNewNode(this);
}
@@ -151,6 +154,7 @@ class JackrabbitNodeState extends AbstractNodeState {
NodePropBundle bundle = loader.loadBundle(id);
this.properties = createProperties(bundle);
this.nodes = createNodes(bundle);
+ setChildOrder();
} catch (ItemStateException e) {
throw new IllegalStateException("Unable to access node " + id, e);
}
@@ -239,6 +243,13 @@ class JackrabbitNodeState extends AbstractNodeState {
//-----------------------------------------------------------< private >--
+ private void setChildOrder() {
+ if (isOrderable.apply(this)) {
+ properties.put(OAK_CHILD_ORDER, PropertyStates.createProperty(
+ OAK_CHILD_ORDER, nodes.keySet(), Type.NAMES));
+ }
+ }
+
private Map<String, NodeId> createNodes(NodePropBundle bundle) {
Map<String, NodeId> children = newLinkedHashMap();
for (ChildNodeEntry entry : bundle.getChildNodeEntries()) {
@@ -282,11 +293,6 @@ class JackrabbitNodeState extends AbstractNodeState {
JCR_UUID, bundle.getId().toString()));
}
- if (isOrderable.apply(primary, mixins)) {
- properties.put(OAK_CHILD_ORDER, PropertyStates.createProperty(
- OAK_CHILD_ORDER, nodes.keySet(), Type.NAMES));
- }
-
for (PropertyEntry property : bundle.getPropertyEntries()) {
String name = createName(property.getName());
try {
@@ -303,18 +309,22 @@ class JackrabbitNodeState extends AbstractNodeState {
}
}
+ return properties;
+ }
+
+ private void fixFrozenUuid() {
// OAK-1789: Convert the jcr:frozenUuid of a non-referenceable
// frozen node from UUID to a path identifier
PropertyState frozenUuid = properties.get(JCR_FROZENUUID);
if (frozenUuid != null
&& frozenUuid.getType() == STRING
- && isFrozenNode.apply(primary, mixins)) {
- String frozenPrimary = NT_UNSTRUCTURED;
+ && isFrozenNode.apply(this)) {
+ String frozenPrimary = NT_BASE;
Set<String> frozenMixins = newHashSet();
PropertyState property = properties.get(JCR_FROZENPRIMARYTYPE);
if (property != null && property.getType() == NAME) {
- primary = property.getValue(NAME);
+ frozenPrimary = property.getValue(NAME);
}
property = properties.get(JCR_FROZENMIXINTYPES);
if (property != null && property.getType() == NAMES) {
@@ -328,8 +338,6 @@ class JackrabbitNodeState extends AbstractNodeState {
properties.put(JCR_FROZENUUID, frozenUuid);
}
}
-
- return properties;
}
private org.apache.jackrabbit.oak.api.PropertyState createProperty(
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1789_9f7c1df0.diff |
bugs-dot-jar_data_OAK-296_5449bf39 | ---
BugID: OAK-296
Summary: PathUtils.isAncestor("/", "/") should return false but returns true
Description:
diff --git a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/PathUtils.java b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/PathUtils.java
index 6201804..883f197 100644
--- a/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/PathUtils.java
+++ b/oak-commons/src/main/java/org/apache/jackrabbit/oak/commons/PathUtils.java
@@ -16,10 +16,11 @@
*/
package org.apache.jackrabbit.oak.commons;
-import javax.annotation.Nonnull;
import java.util.Iterator;
import java.util.NoSuchElementException;
+import javax.annotation.Nonnull;
+
/**
* Utility methods to parse a path.
* <p/>
@@ -288,7 +289,12 @@ public class PathUtils {
if (ancestor.isEmpty() || path.isEmpty()) {
return false;
}
- if (!denotesRoot(ancestor)) {
+ if (denotesRoot(ancestor)) {
+ if (denotesRoot(path)) {
+ return false;
+ }
+ }
+ else {
ancestor += "/";
}
return path.startsWith(ancestor);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-296_5449bf39.diff |
bugs-dot-jar_data_OAK-1174_342809f7 | ---
BugID: OAK-1174
Summary: Inconsistent handling of invalid names/paths
Description: "Passing an invalid name to a JCR method might or might not throw a {{RepositoryException}}
depending on whether name re-mappings exist or not:\n\n{code}\nsession.itemExists(\"/jcr:cont]ent\");\n{code}\n\nreturns
{{false}} if no name re-mappings exist but throws a {{RepositoryException}} otherwise. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrNameParser.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrNameParser.java
index 205a754..ebc7c08 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrNameParser.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrNameParser.java
@@ -92,7 +92,6 @@ public final class JcrNameParser {
String prefix;
int nameStart = 0;
int state = STATE_PREFIX_START;
- boolean trailingSpaces = false;
for (int i = 0; i < len; i++) {
char c = jcrName.charAt(i);
@@ -101,10 +100,6 @@ public final class JcrNameParser {
listener.error("Prefix must not be empty");
return false;
} else if (state == STATE_PREFIX) {
- if (trailingSpaces) {
- listener.error("Trailing spaces not allowed");
- return false;
- }
prefix = jcrName.substring(0, i);
if (!XMLChar.isValidNCName(prefix)) {
listener.error("Invalid name prefix: "+ prefix);
@@ -117,14 +112,7 @@ public final class JcrNameParser {
listener.error("'" + c + "' not allowed in name");
return false;
}
- trailingSpaces = false;
- } else if (c == ' ') {
- if (state == STATE_PREFIX_START || state == STATE_NAME_START) {
- listener.error("'" + c + "' not valid name start");
- return false;
- }
- trailingSpaces = true;
- } else if (Character.isWhitespace(c) || c == '[' || c == ']' || c == '*' || c == '|') {
+ } else if (c == '[' || c == ']' || c == '*' || c == '|') {
listener.error("'" + c + "' not allowed in name");
return false;
} else if (c == '/') {
@@ -134,7 +122,6 @@ public final class JcrNameParser {
listener.error("'" + c + "' not allowed in name");
return false;
}
- trailingSpaces = false;
} else if (c == '{') {
if (state == STATE_PREFIX_START) {
state = STATE_URI_START;
@@ -147,7 +134,6 @@ public final class JcrNameParser {
state = STATE_NAME;
nameStart = i;
}
- trailingSpaces = false;
} else if (c == '}') {
if (state == STATE_URI_START || state == STATE_URI) {
String tmp = jcrName.substring(1, i);
@@ -178,7 +164,6 @@ public final class JcrNameParser {
state = STATE_NAME;
nameStart = i;
}
- trailingSpaces = false;
} else {
if (state == STATE_PREFIX_START) {
state = STATE_PREFIX; // prefix start
@@ -188,7 +173,6 @@ public final class JcrNameParser {
} else if (state == STATE_URI_START) {
state = STATE_URI;
}
- trailingSpaces = false;
}
}
@@ -203,10 +187,6 @@ public final class JcrNameParser {
listener.error("Local name must not be empty");
return false;
}
- if (trailingSpaces) {
- listener.error("Trailing spaces not allowed");
- return false;
- }
return listener.name(jcrName, index);
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrPathParser.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrPathParser.java
index b63958a..54c1c9e 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrPathParser.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/namepath/JcrPathParser.java
@@ -80,11 +80,7 @@ public final class JcrPathParser {
while (pos <= len) {
char c = pos == len ? EOF : jcrPath.charAt(pos);
pos++;
- // special check for whitespace
- if (c != ' ' && Character.isWhitespace(c)) {
- c = '\t';
- }
-
+
switch (c) {
case '/':
case EOF:
@@ -205,24 +201,6 @@ public final class JcrPathParser {
}
break;
- case ' ':
- if (state == STATE_PREFIX_START || state == STATE_NAME_START) {
- listener.error('\'' + jcrPath + "' is not a valid path. '" + c +
- "' not valid name start");
- return false;
- } else if (state == STATE_INDEX_END) {
- listener.error('\'' + jcrPath + "' is not a valid path. '" + c +
- "' not valid after index. '/' expected.");
- return false;
- } else if (state == STATE_DOT || state == STATE_DOTDOT) {
- state = STATE_PREFIX;
- }
- break;
-
- case '\t':
- listener.error('\'' + jcrPath + "' is not a valid path. " +
- "Whitespace not a allowed in name.");
- return false;
case '*':
case '|':
listener.error('\'' + jcrPath + "' is not a valid path. '" + c +
@@ -257,7 +235,7 @@ public final class JcrPathParser {
return false;
}
}
- wasSlash = c == ' ';
+ wasSlash = c == '/';
}
return true;
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/name/Namespaces.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/name/Namespaces.java
index 3b4219c..d0d1e26 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/name/Namespaces.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/name/Namespaces.java
@@ -244,10 +244,14 @@ public class Namespaces implements NamespaceConstants {
for (int i = 0; i < local.length(); i++) {
char ch = local.charAt(i);
- if (i == 0 && Character.isWhitespace(ch)) {
- return false; // leading whitespace
- } else if (i == local.length() - 1 && Character.isWhitespace(ch)) {
- return false; // trailing whitespace
+ if (Character.isSpaceChar(ch)) {
+ if (i == 0) {
+ return false; // leading whitespace
+ } else if (i == local.length() - 1) {
+ return false; // trailing whitespace
+ } else if (ch != ' ') {
+ return false; // only spaces are allowed as whitespace
+ }
} else if ("/:[]|*".indexOf(ch) != -1) { // TODO: XMLChar check
return false; // invalid name character
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1174_342809f7.diff |
bugs-dot-jar_data_OAK-2603_77d2d3b0 | ---
BugID: OAK-2603
Summary: Failure in one of the batch in VersionGC might lead to orphaned nodes
Description: "VersionGC logic currently performs deletion of nodes in batches. For
GC to work properly NodeDocument should always be removed in bottom-up mode i.e.
parent node should be removed *after* child has been removed\n\nCurrently the GC
logic deletes the NodeDocument in undefined order. In such mode if one of the batch
fails then its possible that parent might have got deleted but the child was not
deleted. \n\nNow in next run the child node would not be recognized as a deleted
node because the commit root would not be found."
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
index 2f816d1..9b12714 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/VersionGarbageCollector.java
@@ -20,6 +20,7 @@
package org.apache.jackrabbit.oak.plugins.document;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
@@ -109,6 +110,8 @@ public class VersionGarbageCollector {
Utils.closeIfCloseable(itr);
}
+ Collections.sort(docIdsToDelete, PathComparator.INSTANCE);
+
if(log.isDebugEnabled()) {
StringBuilder sb = new StringBuilder("Deleted document with following ids were deleted as part of GC \n");
Joiner.on(StandardSystemProperty.LINE_SEPARATOR.value()).appendTo(sb, docIdsToDelete);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2603_77d2d3b0.diff |
bugs-dot-jar_data_OAK-2524_977a31d8 | ---
BugID: OAK-2524
Summary: Error while configuring analyzer by composition
Description: "Error while creating analyzer by composition from osgi due to an illegal
argument {{jcr:primaryType}} passed to {{TokenizerFactory.forName(clazz, args)}}
in {{NodeStateAnalyzerFactory.loadTokenizer()}}\n\n{noformat}\nCaused by: java.lang.IllegalArgumentException:
Unknown parameters: {jcr:primaryType=nt:unstructured}\n\tat org.apache.lucene.analysis.core.LowerCaseFilterFactory.<init>(LowerCaseFilterFactory.java:45)\n{noformat}"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
index dce4d4f..589fc63 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/NodeStateAnalyzerFactory.java
@@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
+import org.apache.jackrabbit.JcrConstants;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Tree;
@@ -67,7 +68,11 @@ import static com.google.common.collect.Lists.newArrayList;
final class NodeStateAnalyzerFactory{
private static final AtomicBoolean versionWarningAlreadyLogged = new AtomicBoolean(false);
- private static final Set<String> IGNORE_PROP_NAMES = ImmutableSet.of(LuceneIndexConstants.ANL_CLASS, LuceneIndexConstants.ANL_NAME);
+ private static final Set<String> IGNORE_PROP_NAMES = ImmutableSet.of(
+ LuceneIndexConstants.ANL_CLASS,
+ LuceneIndexConstants.ANL_NAME,
+ JcrConstants.JCR_PRIMARYTYPE
+ );
private static final Logger log = LoggerFactory.getLogger(NodeStateAnalyzerFactory.class);
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2524_977a31d8.diff |
bugs-dot-jar_data_OAK-1933_2e16a983 | ---
BugID: OAK-1933
Summary: 'Query: UnsupportedOperationException for some combinations of "or" and "and"
conditions'
Description: "The following query throws an UnsupportedOperationException:\n\n{noformat}\nselect
* from [nt:base] \n where [a] = 1 and [b] = 2 and [b] = 3 or [c] = 4\n{noformat}"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/AndImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/AndImpl.java
index d0b9245..090c265 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/AndImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/ast/AndImpl.java
@@ -110,7 +110,13 @@ public class AndImpl extends ConstraintImpl {
for (Entry<DynamicOperandImpl, Set<StaticOperandImpl>> e2 : m2.entrySet()) {
Set<StaticOperandImpl> s = result.get(e2.getKey());
if (s != null) {
- s.retainAll(e2.getValue());
+ // OAK-1933
+ // a property can have multiple values at the same time,
+ // so that "where a=1 and a=2" needs to be kept and can not
+ // be reduced to "where false" - in fact, we could
+ // extend it to "where a in (1, 2)" so that an index can be used,
+ // but we might as well keep it at "where a = 1" as that would
+ // also use an index
} else {
result.put(e2.getKey(), e2.getValue());
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/QueryIndex.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/QueryIndex.java
index a5cfe10..f3cf5ef 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/QueryIndex.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/spi/query/QueryIndex.java
@@ -194,8 +194,8 @@ public interface QueryIndex {
double getCostPerEntry();
/**
- * The estimated number of entries. This value does not have to be
- * accurate.
+ * The estimated number of entries in the cursor that is returned by the query method,
+ * when using this plan. This value does not have to be accurate.
*
* @return the estimated number of entries
*/
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1933_2e16a983.diff |
bugs-dot-jar_data_OAK-2691_d2da7499 | ---
BugID: OAK-2691
Summary: Blob GC throws NPE
Description: |-
Blob GC when registered without a shared data store throws NPE.
The {{ClusterRepositoryInfo#getId}} method should check if clusterId is registered or not.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java
index 7b5f4da..d2755eb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/identifier/ClusterRepositoryInfo.java
@@ -23,8 +23,11 @@ import org.apache.jackrabbit.oak.api.Type;
import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeState;
import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import javax.annotation.CheckForNull;
+
/**
* Utility class to manage a unique cluster/repository id for the cluster.
*/
@@ -57,8 +60,13 @@ public class ClusterRepositoryInfo {
* @param store the NodeStore instance
* @return the repository id
*/
+ @CheckForNull
public static String getId(NodeStore store) {
- return store.getRoot().getChildNode(CLUSTER_CONFIG_NODE).getProperty(CLUSTER_ID_PROP).getValue(Type.STRING);
+ NodeState state = store.getRoot().getChildNode(CLUSTER_CONFIG_NODE);
+ if (state.hasProperty(CLUSTER_ID_PROP)) {
+ return state.getProperty(CLUSTER_ID_PROP).getValue(Type.STRING);
+ }
+ return null;
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2691_d2da7499.diff |
bugs-dot-jar_data_OAK-1614_86edbffb | ---
BugID: OAK-1614
Summary: Oak Analyzer can't tokenize chinese phrases
Description: |-
It looks like the _WhitespaceTokenizer_ cannot properly split Chinese phrases, for example '美女衬衫'.
I could not find a reference to this issue other than LUCENE-5096.
The fix is to switch to the _ClassicTokenizer_ which seems better equipped for this kind of task.
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
index cfdf7c4..fb0d8f3 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndexConstants.java
@@ -25,7 +25,7 @@ public interface LuceneIndexConstants {
String INDEX_DATA_CHILD_NAME = ":data";
- Version VERSION = Version.LUCENE_46;
+ Version VERSION = Version.LUCENE_47;
Analyzer ANALYZER = new OakAnalyzer(VERSION);
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakAnalyzer.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakAnalyzer.java
index 3fbc602..6368a85 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakAnalyzer.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/OakAnalyzer.java
@@ -21,8 +21,8 @@ import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
+import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.util.Version;
public class OakAnalyzer extends Analyzer {
@@ -43,13 +43,12 @@ public class OakAnalyzer extends Analyzer {
@Override
protected TokenStreamComponents createComponents(final String fieldName,
final Reader reader) {
- WhitespaceTokenizer src = new WhitespaceTokenizer(matchVersion, reader);
+ ClassicTokenizer src = new ClassicTokenizer(matchVersion, reader);
TokenStream tok = new LowerCaseFilter(matchVersion, src);
tok = new WordDelimiterFilter(tok,
WordDelimiterFilter.GENERATE_WORD_PARTS
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE
| WordDelimiterFilter.GENERATE_NUMBER_PARTS, null);
-
return new TokenStreamComponents(src, tok);
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1614_86edbffb.diff |
bugs-dot-jar_data_OAK-2434_8159fc21 | ---
BugID: OAK-2434
Summary: 'Lucene AND query with a complex OR phrase returns incorrect result '
Description: |-
Queries like this {noformat}/jcr:root/content//element(*, test:Asset)[(jcr:contains(., 'cube')) and (jcr:contains(jcr:content/@foo, '"a" OR "b"'))]
{noformat} returns wrong results.
This get converted to {noformat}+:fulltext:cube full:jcr:content/foo:"a" full:jcr:content/foo:"b"
{noformat}
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
index d35915c..7b0934e 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LuceneIndex.java
@@ -724,15 +724,7 @@ public class LuceneIndex implements AdvanceFulltextQueryIndex {
BooleanQuery q = new BooleanQuery();
for (FullTextExpression e : and.list) {
Query x = getFullTextQuery(e, analyzer, reader);
- // Lucene can't deal with "must(must_not(x))"
- if (x instanceof BooleanQuery) {
- BooleanQuery bq = (BooleanQuery) x;
- for (BooleanClause c : bq.clauses()) {
- q.add(c);
- }
- } else {
- q.add(x, MUST);
- }
+ q.add(x, MUST);
}
result.set(q);
return true;
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
index 136f973..e0bbf79 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/LucenePropertyIndex.java
@@ -836,15 +836,7 @@ public class LucenePropertyIndex implements AdvancedQueryIndex, QueryIndex, Nati
BooleanQuery q = new BooleanQuery();
for (FullTextExpression e : and.list) {
Query x = getFullTextQuery(plan, e, analyzer);
- // Lucene can't deal with "must(must_not(x))"
- if (x instanceof BooleanQuery) {
- BooleanQuery bq = (BooleanQuery) x;
- for (BooleanClause c : bq.clauses()) {
- q.add(c);
- }
- } else {
- q.add(x, MUST);
- }
+ q.add(x, MUST);
}
result.set(q);
return true;
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2434_8159fc21.diff |
bugs-dot-jar_data_OAK-3897_94c6c575 | ---
BugID: OAK-3897
Summary: Branch reset does not revert all changes
Description: This is caused by recent changes done for OAK-3646.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index 8643bf7..7fb76bb 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -335,7 +335,7 @@ public class DocumentMK {
throw new DocumentStoreException("Not a branch revision: " + ancestorRevisionId);
}
try {
- return nodeStore.reset(branch, ancestor, null).toString();
+ return nodeStore.reset(branch, ancestor).toString();
} catch (DocumentStoreException e) {
throw new DocumentStoreException(e);
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index fcc6bf9..14e608f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -22,6 +22,8 @@ import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.toArray;
import static com.google.common.collect.Iterables.transform;
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Lists.reverse;
import static java.util.Collections.singletonList;
import static org.apache.jackrabbit.oak.commons.PathUtils.concat;
import static org.apache.jackrabbit.oak.plugins.document.Collection.JOURNAL;
@@ -81,6 +83,7 @@ import org.apache.jackrabbit.oak.commons.jmx.AnnotatedStandardMBean;
import org.apache.jackrabbit.oak.plugins.blob.BlobStoreBlob;
import org.apache.jackrabbit.oak.plugins.blob.MarkSweepGarbageCollector;
import org.apache.jackrabbit.oak.plugins.blob.ReferencedBlob;
+import org.apache.jackrabbit.oak.plugins.document.Branch.BranchCommit;
import org.apache.jackrabbit.oak.plugins.document.cache.CacheInvalidationStats;
import org.apache.jackrabbit.oak.plugins.document.persistentCache.PersistentCache;
import org.apache.jackrabbit.oak.plugins.document.persistentCache.broadcast.DynamicBroadcastConfig;
@@ -1249,8 +1252,7 @@ public final class DocumentNodeStore
@Nonnull
RevisionVector reset(@Nonnull RevisionVector branchHead,
- @Nonnull RevisionVector ancestor,
- @Nullable DocumentNodeStoreBranch branch) {
+ @Nonnull RevisionVector ancestor) {
checkNotNull(branchHead);
checkNotNull(ancestor);
Branch b = getBranches().getBranch(branchHead);
@@ -1261,61 +1263,44 @@ public final class DocumentNodeStore
throw new DocumentStoreException(branchHead + " is not the head " +
"of a branch");
}
- if (!b.containsCommit(ancestor.getBranchRevision())) {
+ if (!b.containsCommit(ancestor.getBranchRevision())
+ && !b.getBase().asBranchRevision(getClusterId()).equals(ancestor)) {
throw new DocumentStoreException(ancestor + " is not " +
"an ancestor revision of " + branchHead);
}
- if (branchHead.equals(ancestor)) {
+ // tailSet is inclusive -> use an ancestorRev with a
+ // counter incremented by one to make the call exclusive
+ Revision ancestorRev = ancestor.getBranchRevision();
+ ancestorRev = new Revision(ancestorRev.getTimestamp(),
+ ancestorRev.getCounter() + 1, ancestorRev.getClusterId(), true);
+ List<Revision> revs = newArrayList(b.getCommits().tailSet(ancestorRev));
+ if (revs.isEmpty()) {
// trivial
return branchHead;
}
- boolean success = false;
- Commit commit = newCommit(branchHead, branch);
- try {
- Iterator<Revision> it = b.getCommits().tailSet(ancestor.getBranchRevision()).iterator();
- // first revision is the ancestor (tailSet is inclusive)
- // do not undo changes for this revision
- it.next();
- Map<String, UpdateOp> operations = Maps.newHashMap();
- if (it.hasNext()) {
- Revision reset = it.next();
- // TODO: correct?
- getRoot(b.getCommit(reset).getBase().update(reset))
- .compareAgainstBaseState(getRoot(ancestor),
- new ResetDiff(reset.asTrunkRevision(), operations));
- UpdateOp rootOp = operations.get("/");
- if (rootOp == null) {
- rootOp = new UpdateOp(Utils.getIdFromPath("/"), false);
- NodeDocument.setModified(rootOp, commit.getRevision());
- operations.put("/", rootOp);
- }
- NodeDocument.removeCollision(rootOp, reset.asTrunkRevision());
- NodeDocument.removeRevision(rootOp, reset.asTrunkRevision());
- }
- // update root document first
- if (store.findAndUpdate(Collection.NODES, operations.get("/")) != null) {
- // clean up in-memory branch data
- // first revision is the ancestor (tailSet is inclusive)
- List<Revision> revs = Lists.newArrayList(b.getCommits().tailSet(ancestor.getBranchRevision()));
- for (Revision r : revs.subList(1, revs.size())) {
- b.removeCommit(r);
- }
- // successfully updating the root document can be considered
- // as success because the changes are not marked as committed
- // anymore
- success = true;
+ UpdateOp rootOp = new UpdateOp(Utils.getIdFromPath("/"), false);
+ // reset each branch commit in reverse order
+ Map<String, UpdateOp> operations = Maps.newHashMap();
+ for (Revision r : reverse(revs)) {
+ NodeDocument.removeCollision(rootOp, r.asTrunkRevision());
+ NodeDocument.removeRevision(rootOp, r.asTrunkRevision());
+ operations.clear();
+ BranchCommit bc = b.getCommit(r);
+ if (bc.isRebase()) {
+ continue;
}
- operations.remove("/");
- // update remaining documents
+ getRoot(bc.getBase().update(r))
+ .compareAgainstBaseState(getRoot(bc.getBase()),
+ new ResetDiff(r.asTrunkRevision(), operations));
+ // apply reset operations
for (UpdateOp op : operations.values()) {
store.findAndUpdate(Collection.NODES, op);
}
- } finally {
- if (!success) {
- canceled(commit);
- } else {
- done(commit, true, null);
- }
+ }
+ store.findAndUpdate(Collection.NODES, rootOp);
+ // clean up in-memory branch data
+ for (Revision r : revs) {
+ b.removeCommit(r);
}
return ancestor;
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java
index 4a02213..6e99422 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStoreBranch.java
@@ -620,8 +620,7 @@ class DocumentNodeStoreBranch implements NodeStoreBranch {
try {
head = store.getRoot(
store.reset(branchHead.getRevision(),
- ancestor.getRevision(),
- DocumentNodeStoreBranch.this));
+ ancestor.getRevision()));
} catch (Exception e) {
CommitFailedException ex = new CommitFailedException(
OAK, 100, "Branch reset failed", e);
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ResetDiff.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ResetDiff.java
index 239928f..cc08635 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ResetDiff.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/ResetDiff.java
@@ -74,6 +74,7 @@ class ResetDiff implements NodeStateDiff {
@Override
public boolean childNodeAdded(String name, NodeState after) {
+ NodeDocument.removeCommitRoot(getUpdateOp(), revision);
String p = PathUtils.concat(path, name);
ResetDiff diff = new ResetDiff(revision, p, operations);
UpdateOp op = diff.getUpdateOp();
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3897_94c6c575.diff |
bugs-dot-jar_data_OAK-3123_f3c9c818 | ---
BugID: OAK-3123
Summary: NPE in RecordIdMap
Description: "{{RecordIdMap}} is not properly guarded against NPEs when calling accessors
on an empty map (which is represented by {{keys == null}}. \n\n{noformat}\ntestRecordIdMap(org.apache.jackrabbit.oak.plugins.segment.RecordIdMapTest)
\ Time elapsed: 0.019 sec <<< ERROR!\njava.lang.NullPointerException\nat org.apache.jackrabbit.oak.plugins.segment.RecordIdMap.size(RecordIdMap.java:100)\nat
org.apache.jackrabbit.oak.plugins.segment.RecordIdMapTest.testRecordIdMap(RecordIdMapTest.java:64)\n{noformat}"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java
index 0b6660c..fe62ffd 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/RecordIdMap.java
@@ -29,8 +29,11 @@ import javax.annotation.Nonnull;
* A memory optimised map of {@code short} key to {@link RecordId} values.
*/
public class RecordIdMap {
- private short[] keys;
- private RecordId[] values;
+ private static final short[] NO_KEYS = new short[0];
+ private static final RecordId[] NO_VALUES = new RecordId[0];
+
+ private short[] keys = NO_KEYS;
+ private RecordId[] values = NO_VALUES;
/**
* Associates {@code key} with {@code value} if not already present
@@ -39,7 +42,7 @@ public class RecordIdMap {
* @return {@code true} if added, {@code false} if already present
*/
public boolean put(short key, @Nonnull RecordId value) {
- if (keys == null) {
+ if (keys.length == 0) {
keys = new short[1];
values = new RecordId[1];
keys[0] = key;
@@ -90,7 +93,7 @@ public class RecordIdMap {
* @return {@code true} iff {@code key} is present.
*/
public boolean containsKey(short key) {
- return keys != null && binarySearch(keys, key) >= 0;
+ return binarySearch(keys, key) >= 0;
}
/**
@@ -105,6 +108,7 @@ public class RecordIdMap {
* the natural ordering of shorts.
* @param index
* @return the key at {@code index}
+ * @throws ArrayIndexOutOfBoundsException if not {@code 0 <= index < size()}
*/
public short getKey(int index) {
return keys[index];
@@ -115,6 +119,7 @@ public class RecordIdMap {
* the natural ordering of shorts.
* @param index
* @return the value at {@code index}
+ * @throws ArrayIndexOutOfBoundsException if not {@code 0 <= index < size()}
*/
@Nonnull
public RecordId getRecordId(int index) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3123_f3c9c818.diff |
bugs-dot-jar_data_OAK-3634_90ad50da | ---
BugID: OAK-3634
Summary: RDB/MongoDocumentStore may return stale documents
Description: |-
It appears that the implementations of the {{update}} method sometimes populate the memory cache with documents that do not reflect any current or previous state in the persistence (that is, miss changes made by another node).
(will attach test)
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
index 63304e7..ced74db 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/mongo/MongoDocumentStore.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -83,6 +82,8 @@ import com.mongodb.WriteConcern;
import com.mongodb.WriteResult;
import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Predicates.notNull;
+import static com.google.common.collect.Maps.filterValues;
/**
* A document store that uses MongoDB as the backend.
@@ -284,22 +285,9 @@ public class MongoDocumentStore implements DocumentStore {
LOG.trace("invalidateCache: batch size: {} of total so far {}",
ids.size(), size);
}
-
- QueryBuilder query = QueryBuilder.start(Document.ID).in(ids);
- // Fetch only the modCount and id
- final BasicDBObject fields = new BasicDBObject(Document.ID, 1);
- fields.put(Document.MOD_COUNT, 1);
-
- DBCursor cursor = nodes.find(query.get(), fields);
- cursor.setReadPreference(ReadPreference.primary());
- result.queryCount++;
- Map<String, Number> modCounts = new HashMap<String, Number>();
- for (DBObject obj : cursor) {
- String id = (String) obj.get(Document.ID);
- Number modCount = (Number) obj.get(Document.MOD_COUNT);
- modCounts.put(id, modCount);
- }
+ Map<String, Number> modCounts = getModCounts(ids);
+ result.queryCount++;
int invalidated = nodesCache.invalidateOutdated(modCounts);
result.cacheEntriesProcessedCount += modCounts.size();
@@ -906,18 +894,26 @@ public class MongoDocumentStore implements DocumentStore {
try {
dbCollection.update(query.get(), update, false, true);
if (collection == Collection.NODES) {
+ Map<String, Number> modCounts = getModCounts(filterValues(cachedDocs, notNull()).keySet());
// update cache
for (Entry<String, NodeDocument> entry : cachedDocs.entrySet()) {
// the cachedDocs is not empty, so the collection = NODES
Lock lock = nodeLocks.acquire(entry.getKey());
try {
- if (entry.getValue() == null || entry.getValue() == NodeDocument.NULL) {
+ Number postUpdateModCount = modCounts.get(entry.getKey());
+ if (postUpdateModCount != null
+ && entry.getValue() != null
+ && entry.getValue() != NodeDocument.NULL
+ && (postUpdateModCount.longValue() - 1) == entry.getValue().getModCount()) {
+ // post update modCount is one higher than
+ // what we currently see in the cache. we can
+ // replace the cached document
+ NodeDocument newDoc = applyChanges(Collection.NODES, entry.getValue(), updateOp.shallowCopy(entry.getKey()));
+ nodesCache.replaceCachedDocument(entry.getValue(), newDoc);
+ } else {
// make sure concurrently loaded document is
// invalidated
nodesCache.invalidate(entry.getKey());
- } else {
- NodeDocument newDoc = applyChanges(Collection.NODES, entry.getValue(), updateOp.shallowCopy(entry.getKey()));
- nodesCache.replaceCachedDocument(entry.getValue(), newDoc);
}
} finally {
lock.unlock();
@@ -925,6 +921,11 @@ public class MongoDocumentStore implements DocumentStore {
}
}
} catch (MongoException e) {
+ // some documents may still have been updated
+ // invalidate all documents affected by this update call
+ for (String k : keys) {
+ nodesCache.invalidate(k);
+ }
throw DocumentStoreException.convert(e);
}
} finally {
@@ -932,6 +933,35 @@ public class MongoDocumentStore implements DocumentStore {
}
}
+ /**
+ * Returns the {@link Document#MOD_COUNT} value of the documents with the
+ * given {@code keys}. The returned map will only contain entries for
+ * existing documents.
+ *
+ * @param keys the keys of the documents.
+ * @return map with key to {@link Document#MOD_COUNT} value mapping.
+ * @throws MongoException if the call fails
+ */
+ @Nonnull
+ private Map<String, Number> getModCounts(Iterable<String> keys)
+ throws MongoException {
+ QueryBuilder query = QueryBuilder.start(Document.ID).in(keys);
+ // Fetch only the modCount and id
+ final BasicDBObject fields = new BasicDBObject(Document.ID, 1);
+ fields.put(Document.MOD_COUNT, 1);
+
+ DBCursor cursor = nodes.find(query.get(), fields);
+ cursor.setReadPreference(ReadPreference.primary());
+
+ Map<String, Number> modCounts = Maps.newHashMap();
+ for (DBObject obj : cursor) {
+ String id = (String) obj.get(Document.ID);
+ Number modCount = (Number) obj.get(Document.MOD_COUNT);
+ modCounts.put(id, modCount);
+ }
+ return modCounts;
+ }
+
DocumentReadPreference getReadPreference(int maxCacheAge){
if(maxCacheAge >= 0 && maxCacheAge < maxReplicationLagMillis) {
return DocumentReadPreference.PRIMARY;
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
index f81e155..f07a827 100755
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
@@ -1261,6 +1261,9 @@ public class RDBDocumentStore implements DocumentStore {
qc.addKeys(chunkedIds);
seenQueryContext.add(qc);
}
+ for (String id : chunkedIds) {
+ nodesCache.invalidate(id);
+ }
}
Connection connection = null;
@@ -1285,23 +1288,8 @@ public class RDBDocumentStore implements DocumentStore {
qc.addKeys(chunkedIds);
}
}
- }
- for (Entry<String, NodeDocument> entry : cachedDocs.entrySet()) {
- T oldDoc = castAsT(entry.getValue());
- String id = entry.getKey();
- Lock lock = locks.acquire(id);
- try {
- if (oldDoc == null) {
- // make sure concurrently loaded document is
- // invalidated
- nodesCache.invalidate(id);
- } else {
- addUpdateCounters(update);
- T newDoc = createNewDocument(collection, oldDoc, update);
- nodesCache.replaceCachedDocument((NodeDocument) oldDoc, (NodeDocument) newDoc);
- }
- } finally {
- lock.unlock();
+ for (String id : chunkedIds) {
+ nodesCache.invalidate(id);
}
}
} else {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3634_90ad50da.diff |
bugs-dot-jar_data_OAK-1320_64045631 | ---
BugID: OAK-1320
Summary: Inconsistent state in Mongo/KernelRootBuilder
Description: The state of Kernel- and MongoRootBuilder may turn inconsistent when
a NodeStoreBranch.merge() performs a rebase followed by a failed merge on the underlying
storage. The head and base are not properly updated to reflect the successful rebase.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/kernel/KernelRootBuilder.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/kernel/KernelRootBuilder.java
index c7da536..440e47b 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/kernel/KernelRootBuilder.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/kernel/KernelRootBuilder.java
@@ -145,7 +145,19 @@ class KernelRootBuilder extends MemoryNodeBuilder implements FastCopyMove {
*/
NodeState merge(CommitHook hook, CommitInfo info) throws CommitFailedException {
purge();
- branch.merge(hook, info);
+ boolean success = false;
+ try {
+ branch.merge(hook, info);
+ success = true;
+ } finally {
+ if (!success) {
+ // need to adjust base and head of this builder
+ // in case branch.merge() did a rebase and then
+ // a commit hook failed the merge
+ super.reset(branch.getHead());
+ this.base = branch.getBase();
+ }
+ }
return reset();
}
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoRootBuilder.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoRootBuilder.java
index f4ac698..268f3df 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoRootBuilder.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/mongomk/MongoRootBuilder.java
@@ -142,7 +142,19 @@ class MongoRootBuilder extends MemoryNodeBuilder {
*/
NodeState merge(CommitHook hook, CommitInfo info) throws CommitFailedException {
purge();
- branch.merge(hook, info);
+ boolean success = false;
+ try {
+ branch.merge(hook, info);
+ success = true;
+ } finally {
+ if (!success) {
+ // need to adjust base and head of this builder
+ // in case branch.merge() did a rebase and then
+ // a commit hook failed the merge
+ super.reset(branch.getHead());
+ this.base = branch.getBase();
+ }
+ }
return reset();
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1320_64045631.diff |
bugs-dot-jar_data_OAK-1731_024e5d37 | ---
BugID: OAK-1731
Summary: Repository upgrade does not copy default values of property definitions
Description: The {{RepositoryUpgrade}} class needs to copy also the default values
of property definitions in the node types being upgraded. See the TODO in https://github.com/apache/jackrabbit-oak/blob/jackrabbit-oak-0.20.0/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java#L485.
diff --git a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
index 72899ea..7ba64cb 100644
--- a/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
+++ b/oak-upgrade/src/main/java/org/apache/jackrabbit/oak/upgrade/RepositoryUpgrade.java
@@ -16,18 +16,83 @@
*/
package org.apache.jackrabbit.oak.upgrade;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Lists.newArrayListWithCapacity;
+import static com.google.common.collect.Maps.newHashMap;
+import static java.util.Arrays.asList;
+import static org.apache.jackrabbit.JcrConstants.JCR_AUTOCREATED;
+import static org.apache.jackrabbit.JcrConstants.JCR_CHILDNODEDEFINITION;
+import static org.apache.jackrabbit.JcrConstants.JCR_DEFAULTPRIMARYTYPE;
+import static org.apache.jackrabbit.JcrConstants.JCR_DEFAULTVALUES;
+import static org.apache.jackrabbit.JcrConstants.JCR_HASORDERABLECHILDNODES;
+import static org.apache.jackrabbit.JcrConstants.JCR_ISMIXIN;
+import static org.apache.jackrabbit.JcrConstants.JCR_MANDATORY;
+import static org.apache.jackrabbit.JcrConstants.JCR_MULTIPLE;
+import static org.apache.jackrabbit.JcrConstants.JCR_NAME;
+import static org.apache.jackrabbit.JcrConstants.JCR_NODETYPENAME;
+import static org.apache.jackrabbit.JcrConstants.JCR_ONPARENTVERSION;
+import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYITEMNAME;
+import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
+import static org.apache.jackrabbit.JcrConstants.JCR_PROPERTYDEFINITION;
+import static org.apache.jackrabbit.JcrConstants.JCR_PROTECTED;
+import static org.apache.jackrabbit.JcrConstants.JCR_REQUIREDPRIMARYTYPES;
+import static org.apache.jackrabbit.JcrConstants.JCR_REQUIREDTYPE;
+import static org.apache.jackrabbit.JcrConstants.JCR_SAMENAMESIBLINGS;
+import static org.apache.jackrabbit.JcrConstants.JCR_SUPERTYPES;
+import static org.apache.jackrabbit.JcrConstants.JCR_SYSTEM;
+import static org.apache.jackrabbit.JcrConstants.JCR_VALUECONSTRAINTS;
+import static org.apache.jackrabbit.JcrConstants.JCR_VERSIONSTORAGE;
+import static org.apache.jackrabbit.JcrConstants.NT_CHILDNODEDEFINITION;
+import static org.apache.jackrabbit.JcrConstants.NT_NODETYPE;
+import static org.apache.jackrabbit.JcrConstants.NT_PROPERTYDEFINITION;
+import static org.apache.jackrabbit.core.RepositoryImpl.ACTIVITIES_NODE_ID;
+import static org.apache.jackrabbit.core.RepositoryImpl.ROOT_NODE_ID;
+import static org.apache.jackrabbit.core.RepositoryImpl.VERSION_STORAGE_NODE_ID;
+import static org.apache.jackrabbit.oak.api.Type.BOOLEANS;
+import static org.apache.jackrabbit.oak.api.Type.DECIMALS;
+import static org.apache.jackrabbit.oak.api.Type.DOUBLES;
+import static org.apache.jackrabbit.oak.api.Type.LONGS;
+import static org.apache.jackrabbit.oak.api.Type.NAME;
+import static org.apache.jackrabbit.oak.api.Type.NAMES;
+import static org.apache.jackrabbit.oak.api.Type.PATHS;
+import static org.apache.jackrabbit.oak.api.Type.STRINGS;
+import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProperty;
+import static org.apache.jackrabbit.oak.plugins.name.Namespaces.addCustomMapping;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_AVAILABLE_QUERY_OPERATORS;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_ABSTRACT;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_FULLTEXT_SEARCHABLE;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_QUERYABLE;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_QUERY_ORDERABLE;
+import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_NODE_TYPES;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.NT_REP_PRIVILEGE;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.NT_REP_PRIVILEGES;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_AGGREGATES;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_BITS;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_IS_ABSTRACT;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_NEXT;
+import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_PRIVILEGES;
+import static org.apache.jackrabbit.spi.commons.name.NameConstants.ANY_NAME;
+
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
+import java.math.BigDecimal;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.jcr.NamespaceException;
+import javax.jcr.PropertyType;
import javax.jcr.RepositoryException;
+import javax.jcr.UnsupportedRepositoryOperationException;
import javax.jcr.security.Privilege;
import javax.jcr.version.OnParentVersionAction;
+import com.google.common.collect.HashBiMap;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
import org.apache.jackrabbit.core.RepositoryContext;
import org.apache.jackrabbit.core.config.BeanConfig;
import org.apache.jackrabbit.core.config.LoginModuleConfig;
@@ -41,6 +106,8 @@ import org.apache.jackrabbit.core.security.authorization.PrivilegeRegistry;
import org.apache.jackrabbit.core.security.user.UserManagerImpl;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
+import org.apache.jackrabbit.oak.namepath.GlobalNameMapper;
+import org.apache.jackrabbit.oak.namepath.NameMapper;
import org.apache.jackrabbit.oak.plugins.index.CompositeIndexEditorProvider;
import org.apache.jackrabbit.oak.plugins.index.IndexUpdateProvider;
import org.apache.jackrabbit.oak.plugins.index.property.PropertyIndexEditorProvider;
@@ -68,6 +135,8 @@ import org.apache.jackrabbit.oak.spi.state.NodeStore;
import org.apache.jackrabbit.oak.upgrade.security.GroupEditorProvider;
import org.apache.jackrabbit.oak.upgrade.security.RestrictionEditorProvider;
import org.apache.jackrabbit.spi.Name;
+import org.apache.jackrabbit.spi.Path;
+import org.apache.jackrabbit.spi.Path.Element;
import org.apache.jackrabbit.spi.QItemDefinition;
import org.apache.jackrabbit.spi.QNodeDefinition;
import org.apache.jackrabbit.spi.QNodeTypeDefinition;
@@ -77,59 +146,6 @@ import org.apache.jackrabbit.spi.QValueConstraint;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.collect.ImmutableMap;
-
-import static com.google.common.base.Preconditions.checkState;
-import static com.google.common.collect.Lists.newArrayList;
-import static com.google.common.collect.Lists.newArrayListWithCapacity;
-import static com.google.common.collect.Maps.newHashMap;
-import static java.util.Arrays.asList;
-import static org.apache.jackrabbit.JcrConstants.JCR_AUTOCREATED;
-import static org.apache.jackrabbit.JcrConstants.JCR_CHILDNODEDEFINITION;
-import static org.apache.jackrabbit.JcrConstants.JCR_DEFAULTPRIMARYTYPE;
-import static org.apache.jackrabbit.JcrConstants.JCR_HASORDERABLECHILDNODES;
-import static org.apache.jackrabbit.JcrConstants.JCR_ISMIXIN;
-import static org.apache.jackrabbit.JcrConstants.JCR_MANDATORY;
-import static org.apache.jackrabbit.JcrConstants.JCR_MULTIPLE;
-import static org.apache.jackrabbit.JcrConstants.JCR_NAME;
-import static org.apache.jackrabbit.JcrConstants.JCR_NODETYPENAME;
-import static org.apache.jackrabbit.JcrConstants.JCR_ONPARENTVERSION;
-import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYITEMNAME;
-import static org.apache.jackrabbit.JcrConstants.JCR_PRIMARYTYPE;
-import static org.apache.jackrabbit.JcrConstants.JCR_PROPERTYDEFINITION;
-import static org.apache.jackrabbit.JcrConstants.JCR_PROTECTED;
-import static org.apache.jackrabbit.JcrConstants.JCR_REQUIREDPRIMARYTYPES;
-import static org.apache.jackrabbit.JcrConstants.JCR_REQUIREDTYPE;
-import static org.apache.jackrabbit.JcrConstants.JCR_SAMENAMESIBLINGS;
-import static org.apache.jackrabbit.JcrConstants.JCR_SUPERTYPES;
-import static org.apache.jackrabbit.JcrConstants.JCR_SYSTEM;
-import static org.apache.jackrabbit.JcrConstants.JCR_VALUECONSTRAINTS;
-import static org.apache.jackrabbit.JcrConstants.JCR_VERSIONSTORAGE;
-import static org.apache.jackrabbit.JcrConstants.NT_CHILDNODEDEFINITION;
-import static org.apache.jackrabbit.JcrConstants.NT_NODETYPE;
-import static org.apache.jackrabbit.JcrConstants.NT_PROPERTYDEFINITION;
-import static org.apache.jackrabbit.core.RepositoryImpl.ACTIVITIES_NODE_ID;
-import static org.apache.jackrabbit.core.RepositoryImpl.ROOT_NODE_ID;
-import static org.apache.jackrabbit.core.RepositoryImpl.VERSION_STORAGE_NODE_ID;
-import static org.apache.jackrabbit.oak.api.Type.NAME;
-import static org.apache.jackrabbit.oak.api.Type.NAMES;
-import static org.apache.jackrabbit.oak.api.Type.STRINGS;
-import static org.apache.jackrabbit.oak.plugins.name.Namespaces.addCustomMapping;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_AVAILABLE_QUERY_OPERATORS;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_ABSTRACT;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_FULLTEXT_SEARCHABLE;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_QUERYABLE;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_IS_QUERY_ORDERABLE;
-import static org.apache.jackrabbit.oak.plugins.nodetype.NodeTypeConstants.JCR_NODE_TYPES;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.NT_REP_PRIVILEGE;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.NT_REP_PRIVILEGES;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_AGGREGATES;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_BITS;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_IS_ABSTRACT;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_NEXT;
-import static org.apache.jackrabbit.oak.spi.security.privilege.PrivilegeConstants.REP_PRIVILEGES;
-import static org.apache.jackrabbit.spi.commons.name.NameConstants.ANY_NAME;
-
public class RepositoryUpgrade {
/**
@@ -236,10 +252,10 @@ public class RepositoryUpgrade {
sc.getWorkspaceInitializer().initialize(builder, workspace);
}
- Map<String, String> uriToPrefix = newHashMap();
+ HashBiMap<String, String> uriToPrefix = HashBiMap.create();
Map<Integer, String> idxToPrefix = newHashMap();
copyNamespaces(builder, uriToPrefix, idxToPrefix);
- copyNodeTypes(builder);
+ copyNodeTypes(builder, uriToPrefix.inverse());
copyPrivileges(builder);
NodeState root = builder.getNodeState();
@@ -458,7 +474,8 @@ public class RepositoryUpgrade {
return bits;
}
- private void copyNodeTypes(NodeBuilder root) throws RepositoryException {
+ private void copyNodeTypes(NodeBuilder root, Map<String, String> prefixToUri)
+ throws RepositoryException {
NodeTypeRegistry sourceRegistry = source.getNodeTypeRegistry();
NodeBuilder system = root.child(JCR_SYSTEM);
NodeBuilder types = system.child(JCR_NODE_TYPES);
@@ -470,13 +487,14 @@ public class RepositoryUpgrade {
if (!types.hasChildNode(oakName)) {
QNodeTypeDefinition def = sourceRegistry.getNodeTypeDef(name);
NodeBuilder type = types.child(oakName);
- copyNodeType(def, type);
+ copyNodeType(def, type, prefixToUri);
}
}
}
- private void copyNodeType(QNodeTypeDefinition def, NodeBuilder builder)
- throws NamespaceException {
+ private void copyNodeType(
+ QNodeTypeDefinition def, NodeBuilder builder, Map<String, String> prefixToUri)
+ throws RepositoryException {
builder.setProperty(JCR_PRIMARYTYPE, NT_NODETYPE, NAME);
// - jcr:nodeTypeName (NAME) protected mandatory
@@ -510,7 +528,7 @@ public class RepositoryUpgrade {
QPropertyDefinition[] properties = def.getPropertyDefs();
for (int i = 0; i < properties.length; i++) {
String name = JCR_PROPERTYDEFINITION + '[' + (i + 1) + ']';
- copyPropertyDefinition(properties[i], builder.child(name));
+ copyPropertyDefinition(properties[i], builder.child(name), prefixToUri);
}
// + jcr:childNodeDefinition (nt:childNodeDefinition) = nt:childNodeDefinition protected sns
@@ -522,8 +540,8 @@ public class RepositoryUpgrade {
}
private void copyPropertyDefinition(
- QPropertyDefinition def, NodeBuilder builder)
- throws NamespaceException {
+ QPropertyDefinition def, NodeBuilder builder, Map<String, String> prefixToUri)
+ throws RepositoryException {
builder.setProperty(JCR_PRIMARYTYPE, NT_PROPERTYDEFINITION, NAME);
copyItemDefinition(def, builder);
@@ -545,9 +563,9 @@ public class RepositoryUpgrade {
builder.setProperty(JCR_VALUECONSTRAINTS, strings, STRINGS);
}
// - jcr:defaultValues (UNDEFINED) protected multiple
- QValue[] values = def.getDefaultValues();
- if (values != null) {
- // TODO
+ QValue[] qValues = def.getDefaultValues();
+ if (qValues != null) {
+ copyDefaultValues(qValues, builder, new GlobalNameMapper(prefixToUri));
}
// - jcr:multiple (BOOLEAN) protected mandatory
builder.setProperty(JCR_MULTIPLE, def.isMultiple());
@@ -561,6 +579,99 @@ public class RepositoryUpgrade {
builder.setProperty(JCR_IS_QUERY_ORDERABLE, def.isQueryOrderable());
}
+ private static void copyDefaultValues(QValue[] qValues, NodeBuilder builder,
+ NameMapper nameMapper) throws RepositoryException {
+ if (qValues.length == 0) {
+ builder.setProperty(JCR_DEFAULTVALUES, Collections.<String>emptyList(), STRINGS);
+ } else {
+ int type = qValues[0].getType();
+ switch (type) {
+ case PropertyType.STRING:
+ List<String> strings = newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ strings.add(qValue.getString());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, strings, STRINGS));
+ return;
+ case PropertyType.LONG:
+ List<Long> longs = newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ longs.add(qValue.getLong());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, longs, LONGS));
+ return;
+ case PropertyType.DOUBLE:
+ List<Double> doubles = newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ doubles.add(qValue.getDouble());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, doubles, DOUBLES));
+ return;
+ case PropertyType.BOOLEAN:
+ List<Boolean> booleans = Lists.newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ booleans.add(qValue.getBoolean());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, booleans, BOOLEANS));
+ return;
+ case PropertyType.NAME:
+ List<String> names = Lists.newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ names.add(nameMapper.getOakName(qValue.getName().toString()));
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, names, NAMES));
+ return;
+ case PropertyType.PATH:
+ List<String> paths = Lists.newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ paths.add(getOakPath(qValue.getPath(), nameMapper));
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, paths, PATHS));
+ return;
+ case PropertyType.DECIMAL:
+ List<BigDecimal> decimals = Lists.newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ decimals.add(qValue.getDecimal());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, decimals, DECIMALS));
+ return;
+ case PropertyType.DATE:
+ case PropertyType.URI:
+ List<String> values = newArrayListWithCapacity(qValues.length);
+ for (QValue qValue : qValues) {
+ values.add(qValue.getString());
+ }
+ builder.setProperty(createProperty(JCR_DEFAULTVALUES, values, Type.fromTag(type, true)));
+ return;
+ default:
+ throw new UnsupportedRepositoryOperationException(
+ "Cannot copy default value of type " + Type.fromTag(type, true));
+ }
+ }
+ }
+
+ private static String getOakPath(Path path, NameMapper nameMapper)
+ throws RepositoryException {
+ StringBuilder oakPath = new StringBuilder();
+ String sep = "";
+ for (Element element: path.getElements()) {
+ if (element.denotesRoot()) {
+ oakPath.append('/');
+ continue;
+ } else if (element.denotesName()) {
+ oakPath.append(sep).append(nameMapper.getOakName(element.getString()));
+ } else if (element.denotesCurrent()) {
+ oakPath.append(sep).append('.');
+ } else if (element.denotesParent()) {
+ oakPath.append(sep).append("..");
+ } else {
+ throw new UnsupportedRepositoryOperationException("Cannot copy default value " + path);
+ }
+ sep = "/";
+ }
+ return oakPath.toString();
+ }
+
private void copyChildNodeDefinition(
QNodeDefinition def, NodeBuilder builder)
throws NamespaceException {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1731_024e5d37.diff |
bugs-dot-jar_data_OAK-1985_f620b79b | ---
BugID: OAK-1985
Summary: TokenLoginModule can't handle case insensitive userids
Description: |+
Login against TokenLoginModule with an userid different in case throws:
javax.security.auth.login.LoginException: Invalid token credentials.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenProviderImpl.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenProviderImpl.java
index 0709960..83a0e22 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenProviderImpl.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/security/authentication/token/TokenProviderImpl.java
@@ -207,9 +207,11 @@ class TokenProviderImpl implements TokenProvider {
@Override
public TokenInfo createToken(String userId, Map<String, ?> attributes) {
String error = "Failed to create login token. ";
- NodeUtil tokenParent = getTokenParent(userId);
+ User user = getUser(userId);
+ NodeUtil tokenParent = getTokenParent(user);
if (tokenParent != null) {
try {
+ String id = user.getID();
long creationTime = new Date().getTime();
NodeUtil tokenNode = createTokenNode(tokenParent, creationTime);
tokenNode.setString(JcrConstants.JCR_UUID, IdentifierManager.generateUUID());
@@ -218,7 +220,7 @@ class TokenProviderImpl implements TokenProvider {
String nodeId = getIdentifier(tokenNode.getTree());
String token = new StringBuilder(nodeId).append(DELIM).append(key).toString();
- String keyHash = PasswordUtil.buildPasswordHash(getKeyValue(key, userId), options);
+ String keyHash = PasswordUtil.buildPasswordHash(getKeyValue(key, id), options);
tokenNode.setString(TOKEN_ATTRIBUTE_KEY, keyHash);
long exp;
@@ -237,7 +239,7 @@ class TokenProviderImpl implements TokenProvider {
}
}
root.commit();
- return new TokenInfoImpl(tokenNode, token, userId);
+ return new TokenInfoImpl(tokenNode, token, id);
} catch (NoSuchAlgorithmException e) {
// error while generating login token
log.error(error, e.getMessage());
@@ -247,7 +249,7 @@ class TokenProviderImpl implements TokenProvider {
} catch (CommitFailedException e) {
// conflict while committing changes
log.warn(error, e.getMessage());
- } catch (AccessDeniedException e) {
+ } catch (RepositoryException e) {
log.warn(error, e.getMessage());
}
} else {
@@ -320,7 +322,7 @@ class TokenProviderImpl implements TokenProvider {
}
@Nonnull
- private static String getKeyValue(String key, String userId) {
+ private static String getKeyValue(@Nonnull String key, @Nonnull String userId) {
return key + userId;
}
@@ -359,26 +361,40 @@ class TokenProviderImpl implements TokenProvider {
}
@CheckForNull
- private NodeUtil getTokenParent(String userId) {
- NodeUtil tokenParent = null;
- String parentPath = null;
+ private User getUser(String userId) {
try {
Authorizable user = userManager.getAuthorizable(userId);
if (user != null && !user.isGroup()) {
- String userPath = user.getPath();
- NodeUtil userNode = new NodeUtil(root.getTree(userPath));
- tokenParent = userNode.getChild(TOKENS_NODE_NAME);
- if (tokenParent == null) {
- tokenParent = userNode.addChild(TOKENS_NODE_NAME, TOKENS_NT_NAME);
- parentPath = userPath + '/' + TOKENS_NODE_NAME;
- root.commit();
- }
+ return (User) user;
} else {
log.debug("Cannot create login token: No corresponding node for User " + userId + '.');
}
} catch (RepositoryException e) {
// error while accessing user.
log.debug("Error while accessing user " + userId + '.', e);
+ }
+ return null;
+ }
+
+ @CheckForNull
+ private NodeUtil getTokenParent(@CheckForNull User user) {
+ if (user == null) {
+ return null;
+ }
+ NodeUtil tokenParent = null;
+ String parentPath = null;
+ try {
+ String userPath = user.getPath();
+ NodeUtil userNode = new NodeUtil(root.getTree(userPath));
+ tokenParent = userNode.getChild(TOKENS_NODE_NAME);
+ if (tokenParent == null) {
+ tokenParent = userNode.addChild(TOKENS_NODE_NAME, TOKENS_NT_NAME);
+ parentPath = userPath + '/' + TOKENS_NODE_NAME;
+ root.commit();
+ }
+ } catch (RepositoryException e) {
+ // error while creating token node.
+ log.debug("Error while creating token node ", e.getMessage());
} catch (CommitFailedException e) {
// conflict while creating token store for this user -> refresh and
// try to get the tree from the updated root.
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1985_f620b79b.diff |
bugs-dot-jar_data_OAK-4067_56accddf | ---
BugID: OAK-4067
Summary: 'AssertionError thrown for Lucene index with empty suggest disctionary '
Description: "Create an index where one field is enabled for suggestion but no content
is indexed for that index i.e. no matching content. Then while performing any query
following exception is thrown\n\n{noformat}\njava.lang.AssertionError\n\tat org.apache.lucene.search.suggest.analyzing.AnalyzingInfixSuggester.<init>(AnalyzingInfixSuggester.java:167)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.util.SuggestHelper$2.<init>(SuggestHelper.java:127)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.util.SuggestHelper.getLookup(SuggestHelper.java:127)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.util.SuggestHelper.getLookup(SuggestHelper.java:123)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode.<init>(IndexNode.java:109)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexNode.open(IndexNode.java:69)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker.findIndexNode(IndexTracker.java:162)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.IndexTracker.acquireIndexNode(IndexTracker.java:137)\n\tat
org.apache.jackrabbit.oak.plugins.index.lucene.LucenePropertyIndex.getPlans(LucenePropertyIndex.java:249)\n\tat
org.apache.jackrabbit.oak.query.QueryImpl.getBestSelectorExecutionPlan(QueryImpl.java:1016)\n\tat
org.apache.jackrabbit.oak.query.QueryImpl.getBestSelectorExecutionPlan(QueryImpl.java:949)\n\tat
org.apache.jackrabbit.oak.query.ast.SelectorImpl.prepare(SelectorImpl.java:288)\n{noformat}\n\nThis
happens with {{-ea}} flag i.e. java assertions enabled. It caused [here|https://github.com/apache/lucene-solr/blob/releases/lucene-solr/4.7.1/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java#L167]"
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/SuggestHelper.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/SuggestHelper.java
index 4d0aa5c..e6de6fb 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/SuggestHelper.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/util/SuggestHelper.java
@@ -66,8 +66,10 @@ public class SuggestHelper {
tempDir = Files.createTempDir();
File tempSubChild = new File(tempDir, "non-existing-sub-child");
- Dictionary dictionary = new LuceneDictionary(reader, FieldNames.SUGGEST);
- getLookup(directory, analyzer, tempSubChild).build(dictionary);
+ if (reader.getDocCount(FieldNames.SUGGEST) > 0) {
+ Dictionary dictionary = new LuceneDictionary(reader, FieldNames.SUGGEST);
+ getLookup(directory, analyzer, tempSubChild).build(dictionary);
+ }
} catch (RuntimeException e) {
log.debug("could not update the suggester", e);
} finally {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4067_56accddf.diff |
bugs-dot-jar_data_OAK-2420_24cb1908 | ---
BugID: OAK-2420
Summary: DocumentNodeStore revision GC may lead to NPE
Description: The DocumentNodeStore revision GC may cause a NPE in a reader thread
when the GC deletes documents currently accessed by the reader. The {{docChildrenCache}}
is invalidated in {{VersionGarbageCollector.collectDeletedDocuments()}} after documents
are removed in the DocumentStore. The NPE may occur if removed documents are access
in between.
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 03dd859..fe60e13 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -18,6 +18,7 @@ package org.apache.jackrabbit.oak.plugins.document;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.toArray;
import static com.google.common.collect.Iterables.transform;
import static org.apache.jackrabbit.oak.api.CommitFailedException.MERGE;
@@ -27,6 +28,7 @@ import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.FAST_DIFF;
import static org.apache.jackrabbit.oak.plugins.document.DocumentMK.MANY_CHILDREN_THRESHOLD;
import static org.apache.jackrabbit.oak.plugins.document.UpdateOp.Key;
import static org.apache.jackrabbit.oak.plugins.document.UpdateOp.Operation;
+import static org.apache.jackrabbit.oak.plugins.document.util.Utils.getIdFromPath;
import static org.apache.jackrabbit.oak.plugins.document.util.Utils.unshareString;
import java.io.Closeable;
@@ -60,6 +62,7 @@ import javax.annotation.Nullable;
import javax.management.NotCompliantMBeanException;
import com.google.common.base.Function;
+import com.google.common.base.Predicates;
import com.google.common.cache.Cache;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
@@ -866,11 +869,11 @@ public final class DocumentNodeStore
* @return the child documents.
*/
@Nonnull
- Iterable<NodeDocument> readChildDocs(@Nonnull final String path,
- @Nullable String name,
- int limit) {
- String to = Utils.getKeyUpperLimit(checkNotNull(path));
- String from;
+ private Iterable<NodeDocument> readChildDocs(@Nonnull final String path,
+ @Nullable String name,
+ final int limit) {
+ final String to = Utils.getKeyUpperLimit(checkNotNull(path));
+ final String from;
if (name != null) {
from = Utils.getIdFromPath(concat(path, name));
} else {
@@ -881,7 +884,7 @@ public final class DocumentNodeStore
// or more than 16k child docs are requested
return store.query(Collection.NODES, from, to, limit);
}
- StringValue key = new StringValue(path);
+ final StringValue key = new StringValue(path);
// check cache
NodeDocument.Children c = docChildrenCache.getIfPresent(key);
if (c == null) {
@@ -898,10 +901,10 @@ public final class DocumentNodeStore
// fetch more and update cache
String lastName = c.childNames.get(c.childNames.size() - 1);
String lastPath = concat(path, lastName);
- from = Utils.getIdFromPath(lastPath);
+ String low = Utils.getIdFromPath(lastPath);
int remainingLimit = limit - c.childNames.size();
List<NodeDocument> docs = store.query(Collection.NODES,
- from, to, remainingLimit);
+ low, to, remainingLimit);
NodeDocument.Children clone = c.clone();
for (NodeDocument doc : docs) {
String p = doc.getPath();
@@ -911,22 +914,36 @@ public final class DocumentNodeStore
docChildrenCache.put(key, clone);
c = clone;
}
- Iterable<NodeDocument> it = transform(c.childNames, new Function<String, NodeDocument>() {
+ Iterable<NodeDocument> head = filter(transform(c.childNames,
+ new Function<String, NodeDocument>() {
@Override
public NodeDocument apply(String name) {
String p = concat(path, name);
NodeDocument doc = store.find(Collection.NODES, Utils.getIdFromPath(p));
if (doc == null) {
- docChildrenCache.invalidateAll();
- throw new NullPointerException("Document " + p + " not found");
+ docChildrenCache.invalidate(key);
}
return doc;
}
- });
- if (c.childNames.size() > limit * 2) {
- it = Iterables.limit(it, limit * 2);
+ }), Predicates.notNull());
+ Iterable<NodeDocument> it;
+ if (c.isComplete) {
+ it = head;
+ } else {
+ // OAK-2420: 'head' may have null documents when documents are
+ // concurrently removed from the store. concat 'tail' to fetch
+ // more documents if necessary
+ final String last = getIdFromPath(concat(
+ path, c.childNames.get(c.childNames.size() - 1)));
+ Iterable<NodeDocument> tail = new Iterable<NodeDocument>() {
+ @Override
+ public Iterator<NodeDocument> iterator() {
+ return store.query(NODES, last, to, limit).iterator();
+ }
+ };
+ it = Iterables.concat(head, tail);
}
- return it;
+ return Iterables.limit(it, limit);
}
/**
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2420_24cb1908.diff |
bugs-dot-jar_data_OAK-1104_7ae92779 | ---
BugID: OAK-1104
Summary: SegmentNodeStore rebase operation assumes wrong child node order
Description: "This popped up during the async merge process. The merge first does
a rebase which can fail, making some index files look like they disappeared [0],
wrapping the actual root cause.\n\nThe problem is that the rebase failed and removed
the missing file. This can be seen by analyzing the ':conflict' marker info:\nbq.
addExistingNode {_b_Lucene41_0.doc, _b.fdx, _b.fdt, _b_4.del, }\nso it points to
something trying to add some index related files twice, almost like a concurrent
commit exception.\n\nDigging even deeper I found that the rebase operation during
the state comparison phase assumes a certain order of child nodes [1], and based
on that tries to read the mentioned nodes again, thinking that they are new ones,
when if fact they are already present in the list [2].\nThis causes a conflict which
fails the entire async update process, but also any lucene search, as the index
files are now gone and the index is in a corrupted state.\n\n\n[0] \n{noformat}\n*WARN*
[pool-5-thread-2] org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate Index
update async failed org.apache.jackrabbit.oak.api.CommitFailedException: OakLucene0004:
Failed to close the Lucene index\n\tat org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditor.leave(LuceneIndexEditor.java:122)\n\tat
org.apache.jackrabbit.oak.spi.commit.VisibleEditor.leave(VisibleEditor.java:64)\n\tat
org.apache.jackrabbit.oak.spi.commit.VisibleEditor.leave(VisibleEditor.java:64)\n\tat
org.apache.jackrabbit.oak.plugins.index.IndexUpdate.leave(IndexUpdate.java:129)\n\tat
org.apache.jackrabbit.oak.spi.commit.EditorDiff.process(EditorDiff.java:56)\n\tat
org.apache.jackrabbit.oak.plugins.index.AsyncIndexUpdate.run(AsyncIndexUpdate.java:100)\n\tat
org.apache.sling.commons.scheduler.impl.QuartzJobExecutor.execute(QuartzJobExecutor.java:105)\n\tat
org.quartz.core.JobRunShell.run(JobRunShell.java:207)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n\tat
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n\tat
java.lang.Thread.run(Thread.java:724)\nCaused by: java.io.FileNotFoundException:
_b_Lucene41_0.doc at org.apache.jackrabbit.oak.plugins.index.lucene.OakDirectory.openInput(OakDirectory.java:145)\n{noformat}\n\n[1]
http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java?view=markup#l329\n\n[2]\nbefore
child list\n{noformat}\n[_b_Lucene41_0.doc, _b.fdx, _b.fdt, segments_34, _b_4.del,
_b_Lucene41_0.pos, _b.nvm, _b.nvd, _b.fnm, _3n.si, _b_Lucene41_0.tip, _b_Lucene41_0.tim,
_3n.cfe, segments.gen, _3n.cfs, _b.si]\n{noformat}\n\nafter list\n{noformat}\n_b_Lucene41_0.pos,
_3k.cfs, _3j_1.del, _b.nvm, _b.nvd, _3d.cfe, _3d.cfs, _b.fnm, _3j.si, _3h.si, _3i.cfe,
_3i.cfs, _3e_2.del, _3f.si, _b_Lucene41_0.tip, _b_Lucene41_0.tim, segments.gen,
_3e.cfe, _3e.cfs, _b.si,_3g.si, _3l.si, _3i_1.del, _3d_3.del, _3e.si, _3d.si, _b_Lucene41_0.doc,
_3h_2.del, _3i.si, _3k_1.del, _3j.cfe, _3j.cfs, _b.fdx, _b.fdt, _3g_1.del, _3k.si,
_3l.cfe, _3l.cfs, segments_33, _3f_1.del, _3h.cfe, _3h.cfs, _b_4.del, _3f.cfe, _3f.cfs,
_3g.cfe, _3g.cfs\n{noformat}\n\n\n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java
index ad93d4b..93f798d 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapEntry.java
@@ -18,6 +18,7 @@ package org.apache.jackrabbit.oak.plugins.segment;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
+import static org.apache.jackrabbit.oak.plugins.segment.MapRecord.HASH_MASK;
import java.util.Map;
@@ -87,7 +88,7 @@ class MapEntry extends AbstractChildNodeEntry
@Override
public int compareTo(MapEntry that) {
return ComparisonChain.start()
- .compare(getHash(), that.getHash())
+ .compare(getHash() & HASH_MASK, that.getHash() & HASH_MASK)
.compare(name, that.name)
.compare(value, that.value)
.result();
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java
index 9e0c76e..e39b928 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java
@@ -37,6 +37,7 @@ class MapRecord extends Record {
private static final long M = 0x5DEECE66DL;
private static final long A = 0xBL;
+ static final long HASH_MASK = 0xFFFFFFFFL;
static int getHash(String name) {
return (int) (((name.hashCode() ^ M) * M + A) >> 16);
@@ -153,7 +154,7 @@ class MapRecord extends Record {
int bitmap = segment.readInt(getOffset(4));
int mask = BUCKETS_PER_LEVEL - 1;
int shift = 32 - (level + 1) * LEVEL_BITS;
- int index = (hash >> shift) & mask;
+ int index = (int) (hash >> shift) & mask;
int bit = 1 << index;
if ((bitmap & bit) != 0) {
int ids = bitCount(bitmap & (bit - 1));
@@ -167,8 +168,8 @@ class MapRecord extends Record {
// this is a leaf record; scan the list to find a matching entry
int d = -1;
for (int i = 0; i < size && d < 0; i++) {
- d = Integer.valueOf(segment.readInt(getOffset(4 + i * 4)))
- .compareTo(Integer.valueOf(hash));
+ d = Long.valueOf(segment.readInt(getOffset(4 + i * 4)) & HASH_MASK)
+ .compareTo(Long.valueOf(hash & HASH_MASK));
if (d == 0) {
RecordId keyId = segment.readRecordId(
getOffset(4 + size * 4, i));
@@ -379,7 +380,7 @@ class MapRecord extends Record {
return -1; // see above
} else {
return ComparisonChain.start()
- .compare(before.getHash(), after.getHash())
+ .compare(before.getHash() & HASH_MASK, after.getHash() & HASH_MASK)
.compare(before.getName(), after.getName())
.result();
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1104_7ae92779.diff |
bugs-dot-jar_data_OAK-2311_ca85ecce | ---
BugID: OAK-2311
Summary: 'Released checkpoint can still be retrieved '
Description: "The following fails on the 2nd assertion on the MongoMK\n\n{code}\nassertTrue(store.release(cp));\nassertNull(store.retrieve(cp));\n{code}\n\nThe
JavaDoc on the {{release}} method is a bit vague, but I assume it is safe to assume
that when it returns {{true}} the checkpoint should be gone. If not, we should update
the JavaDoc. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index a46af36..c93221c 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -1383,7 +1383,12 @@ public final class DocumentNodeStore
@CheckForNull
@Override
public NodeState retrieve(@Nonnull String checkpoint) {
- return getRoot(Revision.fromString(checkpoint));
+ Revision r = Revision.fromString(checkpoint);
+ if (checkpoints.getCheckpoints().containsKey(r)) {
+ return getRoot(r);
+ } else {
+ return null;
+ }
}
@Override
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-2311_ca85ecce.diff |
bugs-dot-jar_data_OAK-3396_c83755c3 | ---
BugID: OAK-3396
Summary: NPE during syncAllExternalUsers in LdapIdentityProvider.createUser
Description: |-
When executing the JMX method syncAllExternalUsers the following NPE has been encountered. This likely indicates that - for a particular user - there is no attribute '{{uid}}':
{code}
java.lang.NullPointerException
at org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider.createUser(LdapIdentityProvider.java:667)
at org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider.access$000(LdapIdentityProvider.java:88)
at org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider$1.getNext(LdapIdentityProvider.java:281)
at org.apache.jackrabbit.oak.security.authentication.ldap.impl.LdapIdentityProvider$1.getNext(LdapIdentityProvider.java:273)
at org.apache.jackrabbit.commons.iterator.AbstractLazyIterator.hasNext(AbstractLazyIterator.java:39)
at org.apache.jackrabbit.oak.spi.security.authentication.external.impl.jmx.SyncMBeanImpl$Delegatee.syncAllExternalUsers(SyncMBeanImpl.java:245)
at org.apache.jackrabbit.oak.spi.security.authentication.external.impl.jmx.SyncMBeanImpl.syncAllExternalUsers(SyncMBeanImpl.java:426)
{code}
diff --git a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
index e6b2ba4..512a1bf 100644
--- a/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
+++ b/oak-auth-ldap/src/main/java/org/apache/jackrabbit/oak/security/authentication/ldap/impl/LdapIdentityProvider.java
@@ -702,7 +702,13 @@ public class LdapIdentityProvider implements ExternalIdentityProvider {
throws LdapInvalidAttributeValueException {
ExternalIdentityRef ref = new ExternalIdentityRef(entry.getDn().getName(), this.getName());
if (id == null) {
- id = entry.get(config.getUserConfig().getIdAttribute()).getString();
+ String idAttribute = config.getUserConfig().getIdAttribute();
+ Attribute attr = entry.get(idAttribute);
+ if (attr == null) {
+ throw new LdapInvalidAttributeValueException(ResultCodeEnum.CONSTRAINT_VIOLATION,
+ "no value found for attribute '" + idAttribute + "' for entry " + entry);
+ }
+ id = attr.getString();
}
String path = config.getUserConfig().makeDnPath()
? createDNPath(entry.getDn())
@@ -718,7 +724,13 @@ public class LdapIdentityProvider implements ExternalIdentityProvider {
throws LdapInvalidAttributeValueException {
ExternalIdentityRef ref = new ExternalIdentityRef(entry.getDn().getName(), this.getName());
if (name == null) {
- name = entry.get(config.getGroupConfig().getIdAttribute()).getString();
+ String idAttribute = config.getGroupConfig().getIdAttribute();
+ Attribute attr = entry.get(idAttribute);
+ if (attr == null) {
+ throw new LdapInvalidAttributeValueException(ResultCodeEnum.CONSTRAINT_VIOLATION,
+ "no value found for attribute '" + idAttribute + "' for entry " + entry);
+ }
+ name = attr.getString();
}
String path = config.getGroupConfig().makeDnPath()
? createDNPath(entry.getDn())
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-3396_c83755c3.diff |
bugs-dot-jar_data_OAK-1429_c2f5ca6c | ---
BugID: OAK-1429
Summary: Slow event listeners do not scale as expected
Description: "{{org.apache.jackrabbit.oak.jcr.LargeOperationIT#slowListener}} does
not scale to {{O n log n}} on the document node store. "
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
index 4e14f42..7c70035 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentMK.java
@@ -214,7 +214,7 @@ public class DocumentMK implements MicroKernel {
if (maxChildNodes-- <= 0) {
break;
}
- String name = PathUtils.getName(c.children.get((int) i));
+ String name = c.children.get((int) i);
json.key(name).object().endObject();
}
if (c.hasMore) {
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
index 64a1eb8..7396f4e 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeState.java
@@ -381,28 +381,25 @@ class DocumentNodeState extends AbstractNodeState implements CacheValue {
}
switch (r) {
case '+': {
- String path = t.readString();
+ String name = t.readString();
t.read(':');
t.read('{');
while (t.read() != '}') {
// skip properties
}
- String name = PathUtils.getName(path);
continueComparison = diff.childNodeAdded(name, getChildNode(name));
break;
}
case '-': {
- String path = t.readString();
- String name = PathUtils.getName(path);
+ String name = t.readString();
continueComparison = diff.childNodeDeleted(name, base.getChildNode(name));
break;
}
case '^': {
- String path = t.readString();
+ String name = t.readString();
t.read(':');
if (t.matches('{')) {
t.read('}');
- String name = PathUtils.getName(path);
continueComparison = diff.childNodeChanged(name,
base.getChildNode(name), getChildNode(name));
} else if (t.matches('[')) {
@@ -416,21 +413,6 @@ class DocumentNodeState extends AbstractNodeState implements CacheValue {
}
break;
}
- case '>': {
- String from = t.readString();
- t.read(':');
- String to = t.readString();
- String fromName = PathUtils.getName(from);
- continueComparison = diff.childNodeDeleted(
- fromName, base.getChildNode(fromName));
- if (!continueComparison) {
- break;
- }
- String toName = PathUtils.getName(to);
- continueComparison = diff.childNodeAdded(
- toName, getChildNode(toName));
- break;
- }
default:
throw new IllegalArgumentException("jsonDiff: illegal token '"
+ t.getToken() + "' at pos: " + t.getLastPos() + ' ' + jsonDiff);
@@ -478,6 +460,9 @@ class DocumentNodeState extends AbstractNodeState implements CacheValue {
*/
public static class Children implements CacheValue {
+ /**
+ * Ascending sorted list of names of child nodes.
+ */
final ArrayList<String> children = new ArrayList<String>();
boolean hasMore;
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
index 3a520e0..26bb4b4 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/DocumentNodeStore.java
@@ -56,6 +56,8 @@ import com.google.common.collect.Sets;
import org.apache.jackrabbit.mk.api.MicroKernelException;
import org.apache.jackrabbit.oak.api.PropertyState;
+import org.apache.jackrabbit.oak.commons.json.JsopReader;
+import org.apache.jackrabbit.oak.commons.json.JsopTokenizer;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
import org.apache.jackrabbit.oak.commons.json.JsopStream;
import org.apache.jackrabbit.oak.commons.json.JsopWriter;
@@ -665,7 +667,7 @@ public final class DocumentNodeStore
}
if (c.children.size() < limit) {
// add to children until limit is reached
- c.children.add(p);
+ c.children.add(Utils.unshareString(PathUtils.getName(p)));
} else {
// enough collected and we know there are more
c.hasMore = true;
@@ -785,7 +787,8 @@ public final class DocumentNodeStore
new Function<String, DocumentNodeState>() {
@Override
public DocumentNodeState apply(String input) {
- return getNode(input, readRevision);
+ String p = PathUtils.concat(parent.getPath(), input);
+ return getNode(p, readRevision);
}
});
}
@@ -832,10 +835,9 @@ public final class DocumentNodeStore
if (isNew) {
CacheValue key = childNodeCacheKey(path, rev, null);
DocumentNodeState.Children c = new DocumentNodeState.Children();
- Set<String> set = Sets.newTreeSet(added);
- set.removeAll(removed);
+ Set<String> set = Sets.newTreeSet();
for (String p : added) {
- set.add(Utils.unshareString(p));
+ set.add(Utils.unshareString(PathUtils.getName(p)));
}
c.children.addAll(set);
nodeChildrenCache.put(key, c);
@@ -844,13 +846,13 @@ public final class DocumentNodeStore
PathRev key = diffCacheKey(path, before, rev);
JsopWriter w = new JsopStream();
for (String p : added) {
- w.tag('+').key(p).object().endObject().newline();
+ w.tag('+').key(PathUtils.getName(p)).object().endObject().newline();
}
for (String p : removed) {
- w.tag('-').value(p).newline();
+ w.tag('-').value(PathUtils.getName(p)).newline();
}
for (String p : changed) {
- w.tag('^').key(p).object().endObject().newline();
+ w.tag('^').key(PathUtils.getName(p)).object().endObject().newline();
}
diffCache.put(key, new StringValue(w.toString()));
}
@@ -1148,12 +1150,35 @@ public final class DocumentNodeStore
try {
JsopWriter writer = new JsopStream();
diffProperties(from, to, writer);
- return writer.toString() + diffCache.get(key, new Callable<StringValue>() {
+ String compactDiff = diffCache.get(key, new Callable<StringValue>() {
@Override
public StringValue call() throws Exception {
return new StringValue(diffImpl(from, to));
}
- });
+ }).toString();
+ JsopTokenizer t = new JsopTokenizer(compactDiff);
+ int r;
+ do {
+ r = t.read();
+ switch (r) {
+ case '+':
+ case '^': {
+ String name = t.readString();
+ t.read(':');
+ t.read('{');
+ t.read('}');
+ writer.tag((char) r).key(PathUtils.concat(path, name));
+ writer.object().endObject().newline();
+ break;
+ }
+ case '-': {
+ String name = t.readString();
+ writer.tag('-').value(PathUtils.concat(path, name));
+ writer.newline();
+ }
+ }
+ } while (r != JsopReader.END);
+ return writer.toString();
} catch (ExecutionException e) {
if (e.getCause() instanceof MicroKernelException) {
throw (MicroKernelException) e.getCause();
@@ -1414,7 +1439,6 @@ public final class DocumentNodeStore
private String diffImpl(DocumentNodeState from, DocumentNodeState to)
throws MicroKernelException {
JsopWriter w = new JsopStream();
- diffProperties(from, to, w);
// TODO this does not work well for large child node lists
// use a document store index instead
int max = MANY_CHILDREN_THRESHOLD;
@@ -1422,8 +1446,8 @@ public final class DocumentNodeStore
fromChildren = getChildren(from, null, max);
toChildren = getChildren(to, null, max);
if (!fromChildren.hasMore && !toChildren.hasMore) {
- diffFewChildren(w, fromChildren, from.getLastRevision(),
- toChildren, to.getLastRevision());
+ diffFewChildren(w, from.getPath(), fromChildren,
+ from.getLastRevision(), toChildren, to.getLastRevision());
} else {
if (FAST_DIFF) {
diffManyChildren(w, from.getPath(),
@@ -1432,8 +1456,8 @@ public final class DocumentNodeStore
max = Integer.MAX_VALUE;
fromChildren = getChildren(from, null, max);
toChildren = getChildren(to, null, max);
- diffFewChildren(w, fromChildren, from.getLastRevision(),
- toChildren, to.getLastRevision());
+ diffFewChildren(w, from.getPath(), fromChildren,
+ from.getLastRevision(), toChildren, to.getLastRevision());
}
}
return w.toString();
@@ -1463,23 +1487,24 @@ public final class DocumentNodeStore
for (String p : paths) {
DocumentNodeState fromNode = getNode(p, fromRev);
DocumentNodeState toNode = getNode(p, toRev);
+ String name = PathUtils.getName(p);
if (fromNode != null) {
// exists in fromRev
if (toNode != null) {
// exists in both revisions
// check if different
if (!fromNode.getLastRevision().equals(toNode.getLastRevision())) {
- w.tag('^').key(p).object().endObject().newline();
+ w.tag('^').key(name).object().endObject().newline();
}
} else {
// does not exist in toRev -> was removed
- w.tag('-').value(p).newline();
+ w.tag('-').value(name).newline();
}
} else {
// does not exist in fromRev
if (toNode != null) {
// exists in toRev
- w.tag('+').key(p).object().endObject().newline();
+ w.tag('+').key(name).object().endObject().newline();
} else {
// does not exist in either revisions
// -> do nothing
@@ -1503,21 +1528,22 @@ public final class DocumentNodeStore
}
}
- private void diffFewChildren(JsopWriter w, DocumentNodeState.Children fromChildren, Revision fromRev, DocumentNodeState.Children toChildren, Revision toRev) {
+ private void diffFewChildren(JsopWriter w, String parentPath, DocumentNodeState.Children fromChildren, Revision fromRev, DocumentNodeState.Children toChildren, Revision toRev) {
Set<String> childrenSet = Sets.newHashSet(toChildren.children);
for (String n : fromChildren.children) {
if (!childrenSet.contains(n)) {
w.tag('-').value(n).newline();
} else {
- DocumentNodeState n1 = getNode(n, fromRev);
- DocumentNodeState n2 = getNode(n, toRev);
+ String path = PathUtils.concat(parentPath, n);
+ DocumentNodeState n1 = getNode(path, fromRev);
+ DocumentNodeState n2 = getNode(path, toRev);
// this is not fully correct:
// a change is detected if the node changed recently,
// even if the revisions are well in the past
// if this is a problem it would need to be changed
- checkNotNull(n1, "Node at [%s] not found for fromRev [%s]", n, fromRev);
- checkNotNull(n2, "Node at [%s] not found for toRev [%s]", n, toRev);
- if (!n1.getId().equals(n2.getId())) {
+ checkNotNull(n1, "Node at [%s] not found for fromRev [%s]", path, fromRev);
+ checkNotNull(n2, "Node at [%s] not found for toRev [%s]", path, toRev);
+ if (!n1.getLastRevision().equals(n2.getLastRevision())) {
w.tag('^').key(n).object().endObject().newline();
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1429_c2f5ca6c.diff |
bugs-dot-jar_data_OAK-4423_275eca83 | ---
BugID: OAK-4423
Summary: Possible overflow in checkpoint creation
Description: Creating a checkpoint with {{Long.MAX_VALUE}} lifetime will overflow
the value, allowing the store to immediately release the checkpoint.
diff --git a/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java b/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
index 2191a61..c5efa1a 100644
--- a/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
+++ b/oak-segment/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentNodeStore.java
@@ -416,7 +416,11 @@ public class SegmentNodeStore implements NodeStore, Observable {
}
NodeBuilder cp = checkpoints.child(name);
- cp.setProperty("timestamp", now + lifetime);
+ if (Long.MAX_VALUE - now > lifetime) {
+ cp.setProperty("timestamp", now + lifetime);
+ } else {
+ cp.setProperty("timestamp", Long.MAX_VALUE);
+ }
cp.setProperty("created", now);
NodeBuilder props = cp.setChildNode("properties");
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-4423_275eca83.diff |
bugs-dot-jar_data_OAK-1024_2b5d3afb | ---
BugID: OAK-1024
Summary: Full-text search on the traversing index fails if the condition contains
a slash
Description: "A full-text search on the traversing index falls back to a sort of manual
evaluation of results. \nThis is handled by the _FullTextTerm_ class, and it appears
that it passes the constraint text through a cleanup process where it strips most
of the characters that are neither _Character.isLetterOrDigit(c)_ not in the list
_+-:&_\n\nI'm not exactly sure where this list comes from, but I see the '/' character
is missing which causes a certain type of query to fail.\n\nExample:\n{code}\n//*[jcr:contains(.,
'text/plain')]\n{code}\n\n"
diff --git a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
index 2d33a29..78a672f 100644
--- a/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
+++ b/oak-core/src/main/java/org/apache/jackrabbit/oak/query/fulltext/FullTextTerm.java
@@ -67,7 +67,7 @@ public class FullTextTerm extends FullTextExpression {
} else if (c == '_') {
buff.append("\\_");
pattern = true;
- } else if (Character.isLetterOrDigit(c) || " +-:&".indexOf(c) >= 0) {
+ } else if (Character.isLetterOrDigit(c) || " +-:&/".indexOf(c) >= 0) {
buff.append(c);
}
}
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-1024_2b5d3afb.diff |
bugs-dot-jar_data_OAK-43_668f08f2 | ---
BugID: OAK-43
Summary: Incomplete journal when move and copy operations are involved
Description: "Given a node at /source:\n\n{code}\nhead = mk.commit(\"/\",\n \">\\\"source\\\"
: \\\"moved\\\"\" +\n \"*\\\"moved\\\" : \\\"copy\\\"\",\n head, \"\");\n{code}\n\nresults
in the following journal:\n\n{code}\n>\"/source\":\"/copy\"\n{code}\n\nwhere the
freshly created node at /moved is missing.\n\nSimilarly \n\n{code}\nhead = mk.commit(\"/\",\n
\ \"*\\\"source\\\" : \\\"copy\\\"\" +\n \">\\\"copy\\\" : \\\"moved\\\"\",\n
\ head, \"\");\n{code}\n\nresults in\n\n{code}\n+\"/moved\":{}\n{code}\n\nwhere
moved away node at /source is missing."
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/core/MicroKernelImpl.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/core/MicroKernelImpl.java
index 2445959..2cbb066 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/core/MicroKernelImpl.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/core/MicroKernelImpl.java
@@ -18,9 +18,7 @@ package org.apache.jackrabbit.mk.core;
import java.io.InputStream;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -31,6 +29,7 @@ import org.apache.jackrabbit.mk.json.JsopTokenizer;
import org.apache.jackrabbit.mk.model.ChildNodeEntry;
import org.apache.jackrabbit.mk.model.Commit;
import org.apache.jackrabbit.mk.model.CommitBuilder;
+import org.apache.jackrabbit.mk.model.CommitBuilder.NodeTree;
import org.apache.jackrabbit.mk.model.Id;
import org.apache.jackrabbit.mk.model.NodeState;
import org.apache.jackrabbit.mk.model.PropertyState;
@@ -40,7 +39,6 @@ import org.apache.jackrabbit.mk.store.NotFoundException;
import org.apache.jackrabbit.mk.store.RevisionProvider;
import org.apache.jackrabbit.mk.util.CommitGate;
import org.apache.jackrabbit.mk.util.PathUtils;
-import org.apache.jackrabbit.mk.util.SimpleLRUCache;
/**
*
@@ -49,11 +47,6 @@ public class MicroKernelImpl implements MicroKernel {
protected Repository rep;
private final CommitGate gate = new CommitGate();
-
- /**
- * Key: revision id, Value: diff string
- */
- private final Map<Id, String> diffCache = Collections.synchronizedMap(SimpleLRUCache.<Id, String>newInstance(100));
public MicroKernelImpl(String homeDir) throws MicroKernelException {
init(homeDir);
@@ -97,7 +90,6 @@ public class MicroKernelImpl implements MicroKernel {
}
rep = null;
}
- diffCache.clear();
}
public String getHeadRevision() throws MicroKernelException {
@@ -211,13 +203,8 @@ public class MicroKernelImpl implements MicroKernel {
commitBuff.object().
key("id").value(commit.getId().toString()).
key("ts").value(commit.getCommitTS()).
- key("msg").value(commit.getMsg());
- String diff = diffCache.get(commit.getId());
- if (diff == null) {
- diff = diff(commit.getParentId(), commit.getId(), filter);
- diffCache.put(commit.getId(), diff);
- }
- commitBuff.key("changes").value(diff).endObject();
+ key("msg").value(commit.getMsg()).
+ key("changes").value(commit.getChanges()).endObject();
}
return commitBuff.endArray().toString();
}
@@ -478,12 +465,7 @@ public class MicroKernelImpl implements MicroKernel {
}
String parentPath = PathUtils.getParentPath(nodePath);
String nodeName = PathUtils.getName(nodePath);
- // build the list of added nodes recursively
- LinkedList<AddNodeOperation> list = new LinkedList<AddNodeOperation>();
- addNode(list, parentPath, nodeName, t);
- for (AddNodeOperation op : list) {
- cb.addNode(op.path, op.name, op.props);
- }
+ cb.addNode(parentPath, nodeName, parseNode(t));
} else {
String value;
if (t.matches(JsopTokenizer.NULL)) {
@@ -637,30 +619,20 @@ public class MicroKernelImpl implements MicroKernel {
}
}
- static void addNode(LinkedList<AddNodeOperation> list, String path, String name, JsopTokenizer t) throws Exception {
- AddNodeOperation op = new AddNodeOperation();
- op.path = path;
- op.name = name;
- list.add(op);
+ NodeTree parseNode(JsopTokenizer t) throws Exception {
+ NodeTree node = new NodeTree();
if (!t.matches('}')) {
do {
String key = t.readString();
t.read(':');
if (t.matches('{')) {
- addNode(list, PathUtils.concat(path, name), key, t);
+ node.nodes.put(key, parseNode(t));
} else {
- op.props.put(key, t.readRawValue().trim());
+ node.props.put(key, t.readRawValue().trim());
}
} while (t.matches(','));
t.read('}');
}
+ return node;
}
-
- //--------------------------------------------------------< inner classes >
- static class AddNodeOperation {
- String path;
- String name;
- Map<String, String> props = new HashMap<String, String>();
- }
-
}
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/AbstractCommit.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/AbstractCommit.java
index 97f2dea..4f766ee 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/AbstractCommit.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/AbstractCommit.java
@@ -32,6 +32,9 @@ public abstract class AbstractCommit implements Commit {
// commit message
protected String msg;
+ // changes
+ protected String changes;
+
// id of parent commit
protected Id parentId;
@@ -42,6 +45,7 @@ public abstract class AbstractCommit implements Commit {
this.parentId = other.getParentId();
this.rootNodeId = other.getRootNodeId();
this.msg = other.getMsg();
+ this.changes = other.getChanges();
this.commitTS = other.getCommitTS();
}
@@ -61,10 +65,15 @@ public abstract class AbstractCommit implements Commit {
return msg;
}
+ public String getChanges() {
+ return changes;
+ }
+
public void serialize(Binding binding) throws Exception {
binding.write("rootNodeId", rootNodeId.getBytes());
binding.write("commitTS", commitTS);
binding.write("msg", msg == null ? "" : msg);
+ binding.write("changes", changes == null ? "" : changes);
binding.write("parentId", parentId == null ? "" : parentId.toString());
}
}
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/Commit.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/Commit.java
index 82501a0..697693c 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/Commit.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/Commit.java
@@ -31,5 +31,7 @@ public interface Commit {
public String getMsg();
+ public String getChanges();
+
void serialize(Binding binding) throws Exception;
}
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/CommitBuilder.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/CommitBuilder.java
index 645851b..b7cfc9c 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/CommitBuilder.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/CommitBuilder.java
@@ -50,130 +50,39 @@ public class CommitBuilder {
this.store = store;
}
- public void addNode(String parentNodePath, String nodeName) throws Exception {
- addNode(parentNodePath, nodeName, Collections.<String, String>emptyMap());
- }
-
- public void addNode(String parentNodePath, String nodeName, Map<String, String> properties) throws Exception {
- MutableNode modParent = getOrCreateStagedNode(parentNodePath);
- if (modParent.getChildNodeEntry(nodeName) != null) {
- throw new Exception("there's already a child node with name '" + nodeName + "'");
- }
- String newPath = PathUtils.concat(parentNodePath, nodeName);
- MutableNode newChild = new MutableNode(store, newPath);
- newChild.getProperties().putAll(properties);
-
- // id will be computed on commit
- modParent.add(new ChildNode(nodeName, null));
- staged.put(newPath, newChild);
+ public void addNode(String parentNodePath, String nodeName, NodeTree node) throws Exception {
+ Change change = new AddNode(parentNodePath, nodeName, node);
+ change.apply();
// update change log
- changeLog.add(new AddNode(parentNodePath, nodeName, properties));
+ changeLog.add(change);
}
public void removeNode(String nodePath) throws NotFoundException, Exception {
- String parentPath = PathUtils.getParentPath(nodePath);
- String nodeName = PathUtils.getName(nodePath);
-
- MutableNode parent = getOrCreateStagedNode(parentPath);
- if (parent.remove(nodeName) == null) {
- throw new NotFoundException(nodePath);
- }
-
- // update staging area
- removeStagedNodes(nodePath);
-
+ Change change = new RemoveNode(nodePath);
+ change.apply();
// update change log
- changeLog.add(new RemoveNode(nodePath));
+ changeLog.add(change);
}
public void moveNode(String srcPath, String destPath) throws NotFoundException, Exception {
- if (PathUtils.isAncestor(srcPath, destPath)) {
- throw new Exception("target path cannot be descendant of source path: " + destPath);
- }
-
- String srcParentPath = PathUtils.getParentPath(srcPath);
- String srcNodeName = PathUtils.getName(srcPath);
-
- String destParentPath = PathUtils.getParentPath(destPath);
- String destNodeName = PathUtils.getName(destPath);
-
- MutableNode srcParent = getOrCreateStagedNode(srcParentPath);
- if (srcParentPath.equals(destParentPath)) {
- if (srcParent.getChildNodeEntry(destNodeName) != null) {
- throw new Exception("node already exists at move destination path: " + destPath);
- }
- if (srcParent.rename(srcNodeName, destNodeName) == null) {
- throw new NotFoundException(srcPath);
- }
- } else {
- ChildNode srcCNE = srcParent.remove(srcNodeName);
- if (srcCNE == null) {
- throw new NotFoundException(srcPath);
- }
-
- MutableNode destParent = getOrCreateStagedNode(destParentPath);
- if (destParent.getChildNodeEntry(destNodeName) != null) {
- throw new Exception("node already exists at move destination path: " + destPath);
- }
- destParent.add(new ChildNode(destNodeName, srcCNE.getId()));
- }
-
- // update staging area
- moveStagedNodes(srcPath, destPath);
-
+ Change change = new MoveNode(srcPath, destPath);
+ change.apply();
// update change log
- changeLog.add(new MoveNode(srcPath, destPath));
+ changeLog.add(change);
}
public void copyNode(String srcPath, String destPath) throws NotFoundException, Exception {
- String srcParentPath = PathUtils.getParentPath(srcPath);
- String srcNodeName = PathUtils.getName(srcPath);
-
- String destParentPath = PathUtils.getParentPath(destPath);
- String destNodeName = PathUtils.getName(destPath);
-
- MutableNode srcParent = getOrCreateStagedNode(srcParentPath);
- ChildNode srcCNE = srcParent.getChildNodeEntry(srcNodeName);
- if (srcCNE == null) {
- throw new NotFoundException(srcPath);
- }
-
- MutableNode destParent = getOrCreateStagedNode(destParentPath);
- destParent.add(new ChildNode(destNodeName, srcCNE.getId()));
-
- if (srcCNE.getId() == null) {
- // a 'new' node is being copied
-
- // update staging area
- copyStagedNodes(srcPath, destPath);
- }
-
+ Change change = new CopyNode(srcPath, destPath);
+ change.apply();
// update change log
- changeLog.add(new CopyNode(srcPath, destPath));
+ changeLog.add(change);
}
public void setProperty(String nodePath, String propName, String propValue) throws Exception {
- MutableNode node = getOrCreateStagedNode(nodePath);
-
- Map<String, String> properties = node.getProperties();
- if (propValue == null) {
- properties.remove(propName);
- } else {
- properties.put(propName, propValue);
- }
-
+ Change change = new SetProperty(nodePath, propName, propValue);
+ change.apply();
// update change log
- changeLog.add(new SetProperty(nodePath, propName, propValue));
- }
-
- public void setProperties(String nodePath, Map<String, String> properties) throws Exception {
- MutableNode node = getOrCreateStagedNode(nodePath);
-
- node.getProperties().clear();
- node.getProperties().putAll(properties);
-
- // update change log
- changeLog.add(new SetProperties(nodePath, properties));
+ changeLog.add(change);
}
public Id /* new revId */ doCommit() throws Exception {
@@ -190,9 +99,7 @@ public class CommitBuilder {
// clear staging area
staged.clear();
// replay change log on new base revision
- // copy log in order to avoid concurrent modifications
- List<Change> log = new ArrayList<Change>(changeLog);
- for (Change change : log) {
+ for (Change change : changeLog) {
change.apply();
}
}
@@ -222,19 +129,29 @@ public class CommitBuilder {
newCommit.setParentId(baseRevId);
newCommit.setCommitTS(System.currentTimeMillis());
newCommit.setMsg(msg);
+ StringBuilder diff = new StringBuilder();
+ for (Change change : changeLog) {
+ if (diff.length() > 0) {
+ diff.append('\n');
+ }
+ diff.append(change.asDiff());
+ }
+ newCommit.setChanges(diff.toString());
newCommit.setRootNodeId(rootNodeId);
newRevId = store.putHeadCommit(newCommit);
} finally {
store.unlockHead();
}
- // reset instance in order to be reusable
+ // reset instance
staged.clear();
changeLog.clear();
return newRevId;
}
+ //--------------------------------------------------------< inner classes >
+
MutableNode getOrCreateStagedNode(String nodePath) throws Exception {
MutableNode node = staged.get(nodePath);
if (node == null) {
@@ -418,23 +335,79 @@ public class CommitBuilder {
}
//--------------------------------------------------------< inner classes >
+
+ public static class NodeTree {
+ public Map<String, String> props = new HashMap<String, String>();
+ public Map<String, NodeTree> nodes = new HashMap<String, NodeTree>();
+
+ void toJson(StringBuffer buf) {
+ toJson(buf, this);
+ }
+
+ private static void toJson(StringBuffer buf, NodeTree node) {
+ buf.append('{');
+ for (String name : node.props.keySet()) {
+ if (buf.charAt(buf.length() - 1) != '{') {
+ buf.append(',');
+ }
+ buf.append('"').append(name).append("\":").append(node.props.get(name));
+ }
+ for (String name : node.nodes.keySet()) {
+ if (buf.charAt(buf.length() - 1) != '{') {
+ buf.append(',');
+ }
+ buf.append('"').append(name).append("\":");
+ toJson(buf, node.nodes.get(name));
+ }
+ buf.append('}');
+ }
+ }
+
abstract class Change {
abstract void apply() throws Exception;
+ abstract String asDiff();
}
class AddNode extends Change {
String parentNodePath;
String nodeName;
- Map<String, String> properties;
+ NodeTree node;
- AddNode(String parentNodePath, String nodeName, Map<String, String> properties) {
+ AddNode(String parentNodePath, String nodeName, NodeTree node) {
this.parentNodePath = parentNodePath;
this.nodeName = nodeName;
- this.properties = properties;
+ this.node = node;
}
+ @Override
void apply() throws Exception {
- addNode(parentNodePath, nodeName, properties);
+ recursiveAddNode(parentNodePath, nodeName, node);
+ }
+
+ @Override
+ String asDiff() {
+ StringBuffer diff = new StringBuffer("+");
+ diff.append('"').append(PathUtils.concat(parentNodePath, nodeName)).append("\":");
+ node.toJson(diff);
+ return diff.toString();
+ }
+
+ private void recursiveAddNode(String parentPath, String name, NodeTree node) throws Exception {
+ MutableNode modParent = getOrCreateStagedNode(parentPath);
+ if (modParent.getChildNodeEntry(name) != null) {
+ throw new Exception("there's already a child node with name '" + name + "'");
+ }
+ String newPath = PathUtils.concat(parentPath, name);
+ MutableNode newChild = new MutableNode(store, newPath);
+ newChild.getProperties().putAll(node.props);
+
+ // id will be computed on commit
+ modParent.add(new ChildNode(name, null));
+ staged.put(newPath, newChild);
+
+ for (String childName : node.nodes.keySet()) {
+ recursiveAddNode(PathUtils.concat(parentPath, name), childName, node.nodes.get(childName));
+ }
}
}
@@ -445,8 +418,25 @@ public class CommitBuilder {
this.nodePath = nodePath;
}
+ @Override
void apply() throws Exception {
- removeNode(nodePath);
+ String parentPath = PathUtils.getParentPath(nodePath);
+ String nodeName = PathUtils.getName(nodePath);
+
+ MutableNode parent = getOrCreateStagedNode(parentPath);
+ if (parent.remove(nodeName) == null) {
+ throw new NotFoundException(nodePath);
+ }
+
+ // update staging area
+ removeStagedNodes(nodePath);
+ }
+
+ @Override
+ String asDiff() {
+ StringBuffer diff = new StringBuffer("-");
+ diff.append('"').append(nodePath).append('"');
+ return diff.toString();
}
}
@@ -459,8 +449,48 @@ public class CommitBuilder {
this.destPath = destPath;
}
+ @Override
void apply() throws Exception {
- moveNode(srcPath, destPath);
+ if (PathUtils.isAncestor(srcPath, destPath)) {
+ throw new Exception("target path cannot be descendant of source path: " + destPath);
+ }
+
+ String srcParentPath = PathUtils.getParentPath(srcPath);
+ String srcNodeName = PathUtils.getName(srcPath);
+
+ String destParentPath = PathUtils.getParentPath(destPath);
+ String destNodeName = PathUtils.getName(destPath);
+
+ MutableNode srcParent = getOrCreateStagedNode(srcParentPath);
+ if (srcParentPath.equals(destParentPath)) {
+ if (srcParent.getChildNodeEntry(destNodeName) != null) {
+ throw new Exception("node already exists at move destination path: " + destPath);
+ }
+ if (srcParent.rename(srcNodeName, destNodeName) == null) {
+ throw new NotFoundException(srcPath);
+ }
+ } else {
+ ChildNode srcCNE = srcParent.remove(srcNodeName);
+ if (srcCNE == null) {
+ throw new NotFoundException(srcPath);
+ }
+
+ MutableNode destParent = getOrCreateStagedNode(destParentPath);
+ if (destParent.getChildNodeEntry(destNodeName) != null) {
+ throw new Exception("node already exists at move destination path: " + destPath);
+ }
+ destParent.add(new ChildNode(destNodeName, srcCNE.getId()));
+ }
+
+ // update staging area
+ moveStagedNodes(srcPath, destPath);
+ }
+
+ @Override
+ String asDiff() {
+ StringBuffer diff = new StringBuffer(">");
+ diff.append('"').append(srcPath).append("\":\"").append(destPath).append('"');
+ return diff.toString();
}
}
@@ -473,8 +503,36 @@ public class CommitBuilder {
this.destPath = destPath;
}
+ @Override
void apply() throws Exception {
- copyNode(srcPath, destPath);
+ String srcParentPath = PathUtils.getParentPath(srcPath);
+ String srcNodeName = PathUtils.getName(srcPath);
+
+ String destParentPath = PathUtils.getParentPath(destPath);
+ String destNodeName = PathUtils.getName(destPath);
+
+ MutableNode srcParent = getOrCreateStagedNode(srcParentPath);
+ ChildNode srcCNE = srcParent.getChildNodeEntry(srcNodeName);
+ if (srcCNE == null) {
+ throw new NotFoundException(srcPath);
+ }
+
+ MutableNode destParent = getOrCreateStagedNode(destParentPath);
+ destParent.add(new ChildNode(destNodeName, srcCNE.getId()));
+
+ if (srcCNE.getId() == null) {
+ // a 'new' node is being copied
+
+ // update staging area
+ copyStagedNodes(srcPath, destPath);
+ }
+ }
+
+ @Override
+ String asDiff() {
+ StringBuffer diff = new StringBuffer("*");
+ diff.append('"').append(srcPath).append("\":\"").append(destPath).append('"');
+ return diff.toString();
}
}
@@ -489,22 +547,23 @@ public class CommitBuilder {
this.propValue = propValue;
}
+ @Override
void apply() throws Exception {
- setProperty(nodePath, propName, propValue);
- }
- }
+ MutableNode node = getOrCreateStagedNode(nodePath);
- class SetProperties extends Change {
- String nodePath;
- Map<String, String> properties;
-
- SetProperties(String nodePath, Map<String, String> properties) {
- this.nodePath = nodePath;
- this.properties = properties;
+ Map<String, String> properties = node.getProperties();
+ if (propValue == null) {
+ properties.remove(propName);
+ } else {
+ properties.put(propName, propValue);
+ }
}
- void apply() throws Exception {
- setProperties(nodePath, properties);
+ @Override
+ String asDiff() {
+ StringBuffer diff = new StringBuffer("^");
+ diff.append('"').append(PathUtils.concat(nodePath, propName)).append("\":").append(propValue);
+ return diff.toString();
}
}
}
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/MutableCommit.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/MutableCommit.java
index 0992938..4a3312c 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/MutableCommit.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/MutableCommit.java
@@ -39,6 +39,7 @@ public class MutableCommit extends AbstractCommit {
setRootNodeId(other.getRootNodeId());
setCommitTS(other.getCommitTS());
setMsg(other.getMsg());
+ setChanges(other.getChanges());
this.id = other.getId();
}
@@ -57,7 +58,11 @@ public class MutableCommit extends AbstractCommit {
public void setMsg(String msg) {
this.msg = msg;
}
-
+
+ public void setChanges(String changes) {
+ this.changes = changes;
+ }
+
/**
* Return the commit id.
*
diff --git a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/StoredCommit.java b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/StoredCommit.java
index 0cb15b7..95206c4 100644
--- a/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/StoredCommit.java
+++ b/oak-mk/src/main/java/org/apache/jackrabbit/mk/model/StoredCommit.java
@@ -29,17 +29,19 @@ public class StoredCommit extends AbstractCommit {
Id rootNodeId = new Id(binding.readBytesValue("rootNodeId"));
long commitTS = binding.readLongValue("commitTS");
String msg = binding.readStringValue("msg");
+ String changes = binding.readStringValue("changes");
String parentId = binding.readStringValue("parentId");
return new StoredCommit(id, "".equals(parentId) ? null : Id.fromString(parentId),
- commitTS, rootNodeId, "".equals(msg) ? null : msg);
+ commitTS, rootNodeId, "".equals(msg) ? null : msg, changes);
}
- public StoredCommit(Id id, Id parentId, long commitTS, Id rootNodeId, String msg) {
+ public StoredCommit(Id id, Id parentId, long commitTS, Id rootNodeId, String msg, String changes) {
this.id = id;
this.parentId = parentId;
this.commitTS = commitTS;
this.rootNodeId = rootNodeId;
this.msg = msg;
+ this.changes = changes;
}
public StoredCommit(Id id, Commit commit) {
| bugs-dot-jar/jackrabbit-oak_extracted_diff/developer-patch_bugs-dot-jar_OAK-43_668f08f2.diff |