diff --git a/dev-docs/overseer/overseer.adoc b/dev-docs/overseer/overseer.adoc index 6929270a6de..350096b6633 100644 --- a/dev-docs/overseer/overseer.adoc +++ b/dev-docs/overseer/overseer.adoc @@ -322,7 +322,7 @@ These messages are received from the Collection API queue (there’s a single qu The `createConfigSet()` call implementing `CREATE` copies all the files of an existing config set (by default the `_default` config set) into a new config set, merges the existing config set properties if any with new ones specified in the message (config set properties in the message are properties that start with `"configSetProp."`, for example `configSetProp.immutable` is a property that prevents the config set from ever being deleted) and writes the resulting properties into `/configs//configsetprops.json` (note: creating a config set based on an `immutable` config set makes the new one immutable as well unless it explicitly specifies that `configSetProp.immutable` is false). -Note the `_default` config set is defined in the SolrCloud distribution and copied if absent into Zookeeper when SolrCloud starts (`ZkController.bootstrapDefaultConfigSet`) then used only from Zookeeper. This config set has a directory `lang` with language specific stop words, contractions and other, and it contains files `managed-schema`, `params.json`, `protwords.txt`, `solrconfig.xml`, `stopwords.txt` and `synonyms.txt`. +Note the `_default` config set is defined in the SolrCloud distribution and copied if absent into Zookeeper when SolrCloud starts (`ZkController.bootstrapDefaultConfigSet`) then used only from Zookeeper. This config set has a directory `lang` with language specific stop words, contractions and other, and it contains files `managed-schema.xml`, `params.json`, `protwords.txt`, `solrconfig.xml`, `stopwords.txt` and `synonyms.txt`. deleteConfigSet() deletes the whole znode structure at `/configs/__` assuming the config set is not used by any collection and is not immutable (the only case where an immutable config set can be deleted is when its creation has failed midway). diff --git a/dev-tools/scripts/releaseWizard.py b/dev-tools/scripts/releaseWizard.py index 9d885fb9b0c..b10fada4ff5 100755 --- a/dev-tools/scripts/releaseWizard.py +++ b/dev-tools/scripts/releaseWizard.py @@ -116,7 +116,7 @@ def expand_jinja(text, vars=None): 'set_java_home': set_java_home, 'latest_version': state.get_latest_version(), 'latest_lts_version': state.get_latest_lts_version(), - 'master_version': state.get_master_version(), + 'main_version': state.get_main_version(), 'mirrored_versions': state.get_mirrored_versions(), 'mirrored_versions_to_delete': state.get_mirrored_versions_to_delete(), 'home': os.path.expanduser("~") @@ -365,7 +365,7 @@ def get_mirrored_versions_to_delete(self): raise Exception("Release version %s must have same major version as current minor or lts release") return [ver for ver in versions if ver not in to_keep] - def get_master_version(self): + def get_main_version(self): v = Version.parse(self.get_latest_version()) return "%s.%s.%s" % (v.major + 1, 0, 0) @@ -394,10 +394,10 @@ def validate_release_version(self, branch_type, branch, release_version): if not ver.is_minor_release(): sys.exit("You can only release minor releases from an existing stable branch") elif branch_type == BranchType.unstable: - if not branch == 'master': + if not branch == 'main': sys.exit("Incompatible branch and branch_type") if not ver.is_major_release(): - sys.exit("You can only release a new major version from master branch") + sys.exit("You can only release a new major version from main branch") if not getScriptVersion() == release_version: print("WARNING: Expected release version %s when on branch %s, but got %s" % ( getScriptVersion(), branch, release_version)) @@ -405,7 +405,7 @@ def validate_release_version(self, branch_type, branch, release_version): def get_base_branch_name(self): v = Version.parse(self.release_version) if v.is_major_release(): - return 'master' + return 'main' elif v.is_minor_release(): return self.get_stable_branch_name() elif v.major == Version.parse(self.get_latest_version()).major: @@ -573,7 +573,7 @@ def get_minor_branch_name(self): def get_stable_branch_name(self): if self.release_type == 'major': - v = Version.parse(self.get_master_version()) + v = Version.parse(self.get_main_version()) else: v = Version.parse(self.get_latest_version()) return "branch_%sx" % v.major diff --git a/dev-tools/scripts/releaseWizard.yaml b/dev-tools/scripts/releaseWizard.yaml index 71966fd005b..4b6d31bd9fa 100644 --- a/dev-tools/scripts/releaseWizard.yaml +++ b/dev-tools/scripts/releaseWizard.yaml @@ -410,7 +410,7 @@ groups: cmd: "{{ gradle_cmd }} clean check -x test" - !Todo id: create_stable_branch - title: Create a new stable branch, off from master + title: Create a new stable branch, off from main description: In our case we'll create {{ stable_branch }} types: - major @@ -420,7 +420,7 @@ groups: commands_text: Run these commands to create a stable branch commands: - !Command - cmd: git checkout master + cmd: git checkout main tee: true - !Command cmd: git pull --ff-only @@ -467,7 +467,7 @@ groups: tee: true - !Todo id: add_version_major - title: Add a new major version on master branch + title: Add a new major version on main branch types: - major depends: clean_git_checkout @@ -475,10 +475,10 @@ groups: next_version: "{{ release_version_major + 1 }}.0.0" commands: !Commands root_folder: '{{ git_checkout_folder }}' - commands_text: Run these commands to add the new major version {{ next_version }} to the master branch + commands_text: Run these commands to add the new major version {{ next_version }} to the main branch commands: - !Command - cmd: git checkout master + cmd: git checkout main tee: true - !Command cmd: python3 -u dev-tools/scripts/addVersion.py {{ next_version }} @@ -632,7 +632,7 @@ groups: Go to the JIRA "Manage Versions" Administration pages and add the new version: {% if release_type == 'major' -%} - . Change name of version `master ({{ release_version_major }}.0)` into `{{ release_version_major }}.0` + . Change name of version `main ({{ release_version_major }}.0)` into `{{ release_version_major }}.0` {%- endif %} . Create a new (unreleased) version `{{ get_next_version }}` @@ -972,7 +972,7 @@ groups: logfile: svn_rm_containing.log comment: Clean up containing folder on the staging repo tee: true - post_description: 'Note at this point you will see the Jenkins job "Lucene-Solr-SmokeRelease-master" begin to fail, until you run the "Generate Backcompat Indexes" ' + post_description: 'Note at this point you will see the Jenkins job "Lucene-Solr-SmokeRelease-main" begin to fail, until you run the "Generate Backcompat Indexes" ' - !Todo id: stage_maven title: Stage the maven artifacts for publishing @@ -1190,7 +1190,7 @@ groups: depends: - prepare_announce_solr description: | - Push the website changes to 'master' branch, and check the staging site. + Push the website changes to 'main' branch, and check the staging site. You will get a chance to preview the diff of all changes before you push. If you need to do changes, do the changes (e.g. by re-running previous step 'Update rest of webpage') and commit your changes. Then re-run this step and push when everything is OK. @@ -1201,7 +1201,7 @@ groups: You have to exit the editor after review to continue. commands: - !Command - cmd: git checkout master && git status + cmd: git checkout main && git status stdout: true - !Command cmd: git diff @@ -1240,7 +1240,7 @@ groups: cmd: git checkout production && git pull --ff-only stdout: true - !Command - cmd: git merge master + cmd: git merge main stdout: true - !Command cmd: git push origin @@ -1269,9 +1269,9 @@ groups: commands_text: Edit DOAP files commands: - !Command - cmd: git checkout master && git pull --ff-only + cmd: git checkout main && git pull --ff-only stdout: true - comment: Goto master branch + comment: Goto main branch - !Command cmd: "{{ editor }} dev-tools/doap/lucene.rdf" comment: Edit Lucene DOAP, add version {{ release_version }} @@ -1288,16 +1288,16 @@ groups: cmd: git push origin logfile: push.log stdout: true - comment: Push the master branch + comment: Push the main branch - !Command cmd: "git checkout {{ stable_branch }} && git pull --ff-only" stdout: true comment: Checkout the stable branch - !Command - cmd: "git cherry-pick master" + cmd: "git cherry-pick main" logfile: commit.log stdout: true - comment: Cherrypick the DOAP changes from master onto the stable branch. + comment: Cherrypick the DOAP changes from main onto the stable branch. - !Command cmd: git show HEAD stdout: true @@ -1407,24 +1407,24 @@ groups: commands: !Commands root_folder: '{{ git_checkout_folder }}' commands_text: | - Update versions on master and stable branch. + Update versions on main and stable branch. You may have to hand-edit some files before commit, so go slowly :) confirm_each_command: true commands: - !Command - cmd: git checkout master && git pull --ff-only && git clean -df && git checkout -- . - comment: Go to master branch - logfile: checkout-master.log + cmd: git checkout main && git pull --ff-only && git clean -df && git checkout -- . + comment: Go to main branch + logfile: checkout-main.log - !Command cmd: python3 -u dev-tools/scripts/addVersion.py {{ release_version }} - logfile: addversion-master.log + logfile: addversion-main.log - !Command cmd: git diff - logfile: diff-master.log + logfile: diff-main.log tee: true - !Command cmd: git add -u . && git commit -m "Add bugfix version {{ release_version }}" && git push - logfile: commit-master.log + logfile: commit-main.log - !Command cmd: git checkout {{ stable_branch }} && git pull --ff-only && git clean -df && git checkout -- . logfile: checkout-stable.log @@ -1465,9 +1465,9 @@ groups: tee: true comment: Find version regexes - !Command - cmd: git checkout master && git pull --ff-only && git clean -df && git checkout -- . - comment: Go to master branch - logfile: checkout-master.log + cmd: git checkout main && git pull --ff-only && git clean -df && git checkout -- . + comment: Go to main branch + logfile: checkout-main.log - !Command cmd: "{{ editor }} solr/CHANGES.txt" comment: Edit Solr CHANGES, do necessary changes @@ -1478,7 +1478,7 @@ groups: stdout: true - !Command cmd: git add -u . && git commit -m "Sync CHANGES for {{ release_version }}" && git push - logfile: commit-master.log + logfile: commit-main.log - !Command cmd: git checkout {{ stable_branch }} && git pull --ff-only && git clean -df && git checkout -- . comment: Go to stable branch diff --git a/dev-tools/scripts/scriptutil.py b/dev-tools/scripts/scriptutil.py index 04983c0ea98..97992e116f7 100644 --- a/dev-tools/scripts/scriptutil.py +++ b/dev-tools/scripts/scriptutil.py @@ -124,7 +124,7 @@ def find_branch_type(): else: raise Exception('git status missing branch name') - if branchName == b'master': + if branchName == b'main': return BranchType.unstable if re.match(r'branch_(\d+)x', branchName.decode('UTF-8')): return BranchType.stable diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 4d53d9c8f06..12bfdde6f6e 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -143,6 +143,11 @@ when told to. The admin UI now tells it to. (Nazerke Seidan, David Smiley) This was already working for XML & "javabin"/SolrJ. Previously, omitting the ID would be confused for a partial/atomic update. (David Smiley) +* SOLR-10887: Migrate "managed-schema" file naming to "managed-schema.xml" file name, with a fallback to the legacy "managed-schema". (Eric Pugh, David Smiley) + +* SOLR-15630: Logging MDC values no longer include a hardcoded prefix, allowing custom logging configurations access to the plain values. + The default log4j2.xml PatternLayout has been updated to ensure the values are formatted with the existing prefixes. (hossman) + Build --------------------- diff --git a/solr/benchmark/log4j2-bench.xml b/solr/benchmark/log4j2-bench.xml index ae4cecb5030..c422685c8c9 100644 --- a/solr/benchmark/log4j2-bench.xml +++ b/solr/benchmark/log4j2-bench.xml @@ -23,7 +23,7 @@ - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -31,11 +31,11 @@ - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -48,11 +48,11 @@ - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -69,14 +69,11 @@ fileName="${sys:solr.log.dir:-build/work/solr-logs}/${sys:solr.log.name:-solr}_random_counts.log"> - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n - - - @@ -92,6 +89,9 @@ + + + diff --git a/solr/benchmark/src/java/org/apache/solr/bench/generators/StringsDSL.java b/solr/benchmark/src/java/org/apache/solr/bench/generators/StringsDSL.java index 33d573c173e..fb32b21b2d1 100644 --- a/solr/benchmark/src/java/org/apache/solr/bench/generators/StringsDSL.java +++ b/solr/benchmark/src/java/org/apache/solr/bench/generators/StringsDSL.java @@ -19,13 +19,14 @@ import static org.apache.solr.bench.generators.SourceDSL.checkArguments; import static org.apache.solr.bench.generators.SourceDSL.integers; +import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Collections; import java.util.List; +import java.util.ListIterator; import java.util.Objects; -import java.util.Random; +import java.util.RandomAccess; import java.util.Scanner; import java.util.SplittableRandom; import org.apache.solr.bench.BaseBenchState; @@ -51,14 +52,18 @@ public class StringsDSL { // english word list via https://github.com/dwyl/english-words words = new ArrayList<>(1000); - InputStream inputStream = StringsDSL.class.getClassLoader().getResourceAsStream("words.txt"); - try (Scanner scanner = - new Scanner(Objects.requireNonNull(inputStream), StandardCharsets.UTF_8.name())) { + + try (InputStream inputStream = + StringsDSL.class.getClassLoader().getResourceAsStream("words.txt"); + Scanner scanner = + new Scanner(Objects.requireNonNull(inputStream), StandardCharsets.UTF_8.name())) { while (scanner.hasNextLine()) { words.add(scanner.nextLine()); } + } catch (IOException e) { + throw new RuntimeException(e); } - Collections.shuffle(words, new Random(BaseBenchState.getRandomSeed())); + shuffle(words, new SplittableRandom(BaseBenchState.getRandomSeed())); WORD_SIZE = words.size(); } @@ -439,6 +444,33 @@ public String generate(RandomnessSource in) { } } + private static void shuffle(List list, SplittableRandom random) { + @SuppressWarnings("unchecked") // we won't put foreign objects in + final List objectList = (List) list; + + if (list instanceof RandomAccess) { + for (int i = objectList.size() - 1; i > 0; i--) { + int index = random.nextInt(i + 1); + objectList.set(index, objectList.set(i, objectList.get(index))); + } + } else { + Object[] array = objectList.toArray(); + for (int i = array.length - 1; i > 0; i--) { + int index = random.nextInt(i + 1); + Object temp = array[i]; + array[i] = array[index]; + array[index] = temp; + } + + int i = 0; + ListIterator it = objectList.listIterator(); + while (it.hasNext()) { + it.next(); + it.set(array[i++]); + } + } + } + private static final int[] blockStarts = { 0x0000, 0x0080, 0x0100, 0x0180, 0x0250, 0x02B0, 0x0300, 0x0370, 0x0400, 0x0500, 0x0530, 0x0590, 0x0600, 0x0700, 0x0750, 0x0780, 0x07C0, 0x0800, 0x0900, 0x0980, 0x0A00, 0x0A80, 0x0B00, 0x0B80, diff --git a/solr/benchmark/src/java/org/apache/solr/bench/index/CloudIndexing.java b/solr/benchmark/src/java/org/apache/solr/bench/index/CloudIndexing.java index 981ce0dc455..0e7127d6206 100755 --- a/solr/benchmark/src/java/org/apache/solr/bench/index/CloudIndexing.java +++ b/solr/benchmark/src/java/org/apache/solr/bench/index/CloudIndexing.java @@ -37,11 +37,9 @@ import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Threads; import org.openjdk.jmh.annotations.Timeout; import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.BenchmarkParams; @BenchmarkMode(Mode.Throughput) @OutputTimeUnit(TimeUnit.SECONDS) @@ -70,12 +68,6 @@ public static class BenchState { @Param({"1", "3"}) int numReplicas; - @Param({"0", "15", "30", "70", "100", "500", "1000"}) - int useStringUtf8Over; - - @Param({"true", "false"}) - boolean directBuffer; - private final org.apache.solr.bench.Docs largeDocs; private Iterator largeDocIterator; @@ -101,7 +93,7 @@ public BenchState() { smallDocs = docs() .field("id", integers().incrementing()) - .field("text", strings().basicLatinAlphabet().multi(2).ofLengthBetween(20, 32)) + .field("text", strings().basicLatinAlphabet().multi(3).ofLengthBetween(20, 32)) .field("int1_i", integers().all()) .field("int2_i", integers().all()) .field("long1_l", longs().all()); @@ -129,21 +121,10 @@ public SolrInputDocument getSmallDoc() { @Setup(Level.Trial) public void doSetup(MiniClusterState.MiniClusterBenchState miniClusterState) throws Exception { - System.setProperty("useStringUtf8Over", Integer.toString(useStringUtf8Over)); - System.setProperty("httpClientDirectBuffer", Boolean.toString(directBuffer)); - System.setProperty("mergePolicyFactory", "org.apache.solr.index.NoMergePolicyFactory"); miniClusterState.startMiniCluster(nodeCount); miniClusterState.createCollection(COLLECTION, numShards, numReplicas); } - - @TearDown(Level.Trial) - public void doTearDown( - MiniClusterState.MiniClusterBenchState miniClusterState, BenchmarkParams benchmarkParams) - throws Exception { - - // miniClusterState.shutdownMiniCluster(benchmarkParams); - } } @Benchmark diff --git a/solr/benchmark/src/java/org/apache/solr/bench/javabin/JavaBinBasicPerf.java b/solr/benchmark/src/java/org/apache/solr/bench/javabin/JavaBinBasicPerf.java new file mode 100755 index 00000000000..feadd11bbca --- /dev/null +++ b/solr/benchmark/src/java/org/apache/solr/bench/javabin/JavaBinBasicPerf.java @@ -0,0 +1,542 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.solr.bench.javabin; + +import static org.apache.solr.bench.Docs.docs; +import static org.apache.solr.bench.generators.SourceDSL.dates; +import static org.apache.solr.bench.generators.SourceDSL.doubles; +import static org.apache.solr.bench.generators.SourceDSL.floats; +import static org.apache.solr.bench.generators.SourceDSL.integers; +import static org.apache.solr.bench.generators.SourceDSL.longs; +import static org.apache.solr.bench.generators.SourceDSL.maps; +import static org.apache.solr.bench.generators.SourceDSL.strings; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.solr.bench.BaseBenchState; +import org.apache.solr.bench.Docs; +import org.apache.solr.bench.SolrGenerate; +import org.apache.solr.bench.SplittableRandomGenerator; +import org.apache.solr.bench.generators.LazyGen; +import org.apache.solr.bench.generators.NamedListGen; +import org.apache.solr.bench.generators.SolrGen; +import org.apache.solr.common.SolrDocument; +import org.apache.solr.common.SolrDocumentList; +import org.apache.solr.common.util.BytesInputStream; +import org.apache.solr.common.util.BytesOutputStream; +import org.apache.solr.common.util.JavaBinCodec; +import org.apache.solr.common.util.NamedList; +import org.apache.solr.common.util.SolrNamedThreadFactory; +import org.apache.solr.common.util.SuppressForbidden; +import org.eclipse.jetty.io.RuntimeIOException; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Timeout; +import org.openjdk.jmh.annotations.Warmup; +import org.quicktheories.api.Pair; +import org.quicktheories.core.Gen; +import org.quicktheories.impl.BenchmarkRandomSource; + +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.SECONDS) +@Threads(1) +@Warmup(time = 15, iterations = 10) +@Measurement(time = 30, iterations = 5) +@Fork(value = 1) +@Timeout(time = 60) +public class JavaBinBasicPerf { + + public static final int COUNT = 10; + + @State(Scope.Thread) + public static class ThreadState { + private final BytesOutputStream baos = new BytesOutputStream(1024 * 1024 * 24); + } + + @State(Scope.Benchmark) + public static class BenchState { + + @Param({"1.0"}) + public float scale; + + @Param({ + "default" + }) // nested, numeric, large_strings, very_large_text_and_strings, many_token_field, + // small_strings + public String content; + + private final Queue responseByteArrays = new ConcurrentLinkedQueue<>(); + private final Queue responses = new ConcurrentLinkedQueue<>(); + + private volatile Iterator responseiterator; + private volatile Iterator responseByteArrayIterator; + + @SuppressForbidden(reason = "NoMDCAwareNecessary") + @Setup(Level.Trial) + public void doSetup(BaseBenchState baseBenchState) throws Exception { + + BaseBenchState.log("scale=" + scale); + ExecutorService executorService = + Executors.newFixedThreadPool( + Runtime.getRuntime().availableProcessors(), + new SolrNamedThreadFactory("JavaBinPerf DataGen")); + + responseByteArrays.clear(); + responses.clear(); + + AtomicBoolean stop = new AtomicBoolean(false); + AtomicReference failed = new AtomicReference<>(); + for (int i = 0; i < 100 && !stop.get(); i++) { + int finalI = i; + executorService.submit( + () -> { + try { + Object response; + switch (content) { + case "default": + if (scale > 2 && finalI >= 50) { + stop.set(true); + } + + response = defaultContent(COUNT, scale); + break; + case "numeric": + response = numericsContent((int) (COUNT * scale)); + break; + case "large_strings": + if (scale > 2 && finalI >= 10) { + stop.set(true); + } + response = largeStringsContent(COUNT, scale); + break; + case "very_large_text_and_strings": + if (finalI >= 10) { + stop.set(true); + } + response = veryLargeTextAndStrings(COUNT, scale); + break; + case "many_token_field": + response = manyTokenFieldContent(COUNT, scale); + break; + case "small_strings": + response = smallStrings(COUNT, scale); + break; + case "nested": + response = nested(baseBenchState, scale); + break; + default: + BaseBenchState.log( + String.format(Locale.ENGLISH, "Unknown content type: %s", content)); + throw new IllegalArgumentException("Unknown content type: " + content); + } + + try (final JavaBinCodec jbc = new JavaBinCodec()) { + BytesOutputStream baos = new BytesOutputStream(1024 << 8); + jbc.marshal(response, baos, true); + responseByteArrays.add(baos.toBytes()); + responses.add(response); + } catch (IOException e) { + BaseBenchState.log("IOException " + e.getMessage()); + throw new RuntimeIOException(e); + } + } catch (Exception e) { + e.printStackTrace(); + failed.set(e); + executorService.shutdownNow(); + } + }); + } + + if (failed.get() != null) { + throw failed.get(); + } + + executorService.shutdown(); + boolean result = false; + while (!result) { + result = executorService.awaitTermination(600, TimeUnit.MINUTES); + } + + BaseBenchState.log( + "setup responses=" + + responses.size() + + " responseByteArrays=" + + responseByteArrays.size()); + + responseiterator = responses.iterator(); + responseByteArrayIterator = responseByteArrays.iterator(); + } + + public Object getResponse() { + if (!responseiterator.hasNext()) { + responseiterator = responses.iterator(); + } + while (true) { + try { + return responseiterator.next(); + } catch (NoSuchElementException e) { + responseiterator = responses.iterator(); + } + } + } + + public byte[] getResponseByteArray() { + Iterator rbai = responseByteArrayIterator; + if (!rbai.hasNext()) { + rbai = responseByteArrays.iterator(); + responseByteArrayIterator = rbai; + } + while (true) { + try { + byte[] array = rbai.next(); + if (array == null) { + throw new NoSuchElementException(); + } + return array; + } catch (NoSuchElementException e) { + rbai = responseByteArrays.iterator(); + responseByteArrayIterator = rbai; + } + } + } + + private Object nested(BaseBenchState baseBenchState, float scale) { + SplittableRandomGenerator random = + new SplittableRandomGenerator(BaseBenchState.getRandomSeed()); + + Gen> mapGen = + maps().of(getKey(), getValue(10)).ofSizeBetween((int) (20 * scale), (int) (30 * scale)); + + // BaseBenchState.log("map:" + map); + + return mapGen.generate(new BenchmarkRandomSource(random)); + } + + private static SolrGen getKey() { + return strings().betweenCodePoints('a', 'z' + 1).ofLengthBetween(1, 257); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static SolrGen getValue(int depth) { + if (depth == 0) { + return integers().from(1).upToAndIncluding(5000); + } + List values = new ArrayList(4); + values.add( + Pair.of( + 4, maps().of(getKey(), new LazyGen(() -> getValue(depth - 1))).ofSizeBetween(1, 25))); + values.add( + Pair.of( + 4, + new NamedListGen( + maps() + .of(getKey(), new LazyGen(() -> getValue(depth - 1))) + .ofSizeBetween(1, 35)))); + values.add(Pair.of(COUNT, integers().all())); + values.add(Pair.of(16, longs().all())); + values.add(Pair.of(8, doubles().all())); + values.add(Pair.of(5, floats().all())); + values.add(Pair.of(9, dates().all())); + return SolrGenerate.frequency(values); + } + + private static Object numericsContent(int count) { + List topLevel = new ArrayList<>(16); + for (int i = 0; i < count; i++) { + List types = new ArrayList<>(16); + + types.add((short) 2); + types.add((double) 3); + + types.add(-4); + types.add(4); + types.add(42); + + types.add((long) -56547532); + types.add((long) 578675675); + types.add((long) 500000); + topLevel.add(types); + } + + return topLevel; + } + + private static Object defaultContent(int count, float scale) { + NamedList response = new NamedList<>(); + + NamedList header = new NamedList<>(); + header.add("status", 0); + header.add("headerStuff", "values"); + response.add("header", header); + + Docs docs = + docs() + .field("id", integers().incrementing()) + .field( + "facet_s", + strings() + .basicLatinAlphabet() + .maxCardinality(5) + .ofLengthBetween(50, (int) (64 * scale))) + .field( + "facet2_s", + strings().basicLatinAlphabet().maxCardinality(100).ofLengthBetween(12, 16)) + .field( + "facet3_s", + strings().basicLatinAlphabet().maxCardinality(1200).ofLengthBetween(110, 128)) + .field( + "text", + strings() + .basicLatinAlphabet() + .multi((int) (50 * scale)) + .ofLengthBetween(10, (int) (100 * scale))) + .field( + "text2_s", + strings() + .basicLatinAlphabet() + .multi((int) (150 * scale)) + .ofLengthBetween(6, (int) (25 * scale))) + .field( + "text3_t", + strings() + .basicLatinAlphabet() + .multi((int) (1000 * scale)) + .ofLengthBetween(4, (int) (COUNT * scale))) + .field("int_i", integers().all()) + .field("long1_l", longs().all()) + .field("long2_l", longs().all()) + .field("long3_l", longs().all()) + .field("int2_i", integers().allWithMaxCardinality(500)); + + SolrDocumentList docList = new SolrDocumentList(); + for (int i = 0; i < count; i++) { + SolrDocument doc = docs.document(); + docList.add(doc); + } + docList.setNumFound((long) scale); + docList.setMaxScore(1.0f); + docList.setStart(0); + + response.add("docs", docList); + + response.add("int", 42); + response.add("long", 5000_023L); + response.add("date", new Date()); + + return response; + } + } + + @Benchmark + @Timeout(time = 300) + public Object encode(BenchState state, ThreadState threadState) throws Exception { + try (final JavaBinCodec jbc = new JavaBinCodec()) { + jbc.marshal(state.getResponse(), threadState.baos, false); + return threadState.baos; + } finally { + threadState.baos.reset(); + } + } + + @Benchmark + @Timeout(time = 300) + public Object decode(BenchState state) throws Exception { + try (JavaBinCodec jbc = new JavaBinCodec()) { + return jbc.unmarshal(new BytesInputStream(state.getResponseByteArray())); + } + } + + private static Object largeStringsContent(int count, float scale) { + Docs docs = + docs() + .field( + "string_s", + strings().basicLatinAlphabet().ofLengthBetween(2000, (int) (2800 * scale))); + + SolrDocumentList docList = new SolrDocumentList(); + for (int i = 0; i < count * scale; i++) { + SolrDocument doc = docs.document(); + docList.add(doc); + } + docList.setNumFound((long) (count * scale)); + docList.setMaxScore(1.0f); + docList.setStart(0); + + return docList; + } + + private static Object manyTokenFieldContent(int count, float scale) { + Docs docs = + docs() + .field( + "string_s", + strings() + .basicLatinAlphabet() + .multi(Math.round(1500 * scale)) + .ofLengthBetween(50, 100)); + SolrDocumentList docList = new SolrDocumentList(); + for (int i = 0; i < count; i++) { + SolrDocument doc = docs.document(); + docList.add(doc); + } + docList.setNumFound(count); + docList.setMaxScore(1.0f); + docList.setStart(0); + + return docList; + } + + private static Object smallStrings(int count, float scale) { + NamedList response = new NamedList<>(); + + NamedList header = new NamedList<>(); + header.add("status", 0); + header.add("headerStuff", "values"); + response.add("header", header); + + Docs docs = + docs() + .field("id", integers().incrementing()) + .field( + "facet_s", + strings() + .basicLatinAlphabet() + .maxCardinality(5) + .ofLengthBetween(10, (int) (25 * scale))) + .field( + "facet2_s", + strings().basicLatinAlphabet().maxCardinality(100).ofLengthBetween(6, 12)) + .field( + "facet3_s", + strings().basicLatinAlphabet().maxCardinality(1200).ofLengthBetween(15, 35)) + .field( + "text", + strings() + .basicLatinAlphabet() + .multi((int) (80 * scale)) + .ofLengthBetween(100, (int) (200 * scale))) + .field( + "text2_s", + strings() + .basicLatinAlphabet() + .multi((int) (800 * scale)) + .ofLengthBetween(50, (int) (150 * scale))); + + SolrDocumentList docList = new SolrDocumentList(); + for (int i = 0; i < count; i++) { + SolrDocument doc = docs.document(); + docList.add(doc); + } + docList.setNumFound((long) scale); + docList.setMaxScore(1.0f); + docList.setStart(0); + + response.add("docs", docList); + + return response; + } + + private static Object veryLargeTextAndStrings(int count, float scale) { + // BaseBenchState.log("count=" + count + ' ' + "scale=" + scale + ' ' + "count * scale=" + count + // * scale); + NamedList response = new NamedList<>(); + + NamedList header = new NamedList<>(); + header.add("status", 0); + header.add("headerStuff", "values"); + response.add("header", header); + + Docs docs = + docs() + .field("id", integers().incrementing()) + .field( + "facet_s", + strings() + .basicLatinAlphabet() + .maxCardinality(5) + .ofLengthBetween(50, (int) (64 * scale))) + .field( + "facet2_s", + strings().basicLatinAlphabet().maxCardinality(100).ofLengthBetween(12, 16)) + .field( + "facet3_s", + strings().basicLatinAlphabet().maxCardinality(1200).ofLengthBetween(110, 128)) + .field( + "text", + strings() + .basicLatinAlphabet() + .multi((int) (800 * scale)) + .ofLengthBetween(400, (int) (500 * scale))) + .field( + "text2_s", + strings() + .basicLatinAlphabet() + .multi((int) (800 * scale)) + .ofLengthBetween(1000, (int) (1500 * scale))) + .field( + "text3_t", + strings() + .basicLatinAlphabet() + .multi((int) (800 * scale)) + .ofLengthBetween(1500, (int) (2000 * scale))) + .field("int_i", integers().all()) + .field("long1_l", longs().all()) + .field("long2_l", longs().all()) + .field("long3_l", longs().all()) + .field("int2_i", integers().allWithMaxCardinality(500)); + + SolrDocumentList docList = new SolrDocumentList(); + for (int i = 0; i < count; i++) { + SolrDocument doc = docs.document(); + docList.add(doc); + } + docList.setNumFound(count); + docList.setMaxScore(1.0f); + docList.setStart(0); + + response.add("docs", docList); + + response.add("int", 42); + response.add("long", 5000_023L); + response.add("date", new Date()); + + return response; + } +} diff --git a/solr/benchmark/src/java/org/apache/solr/bench/javabin/package-info.java b/solr/benchmark/src/java/org/apache/solr/bench/javabin/package-info.java new file mode 100644 index 00000000000..acedda15fd5 --- /dev/null +++ b/solr/benchmark/src/java/org/apache/solr/bench/javabin/package-info.java @@ -0,0 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Solr JMH benchmarks focused on JavaBinCodec */ +package org.apache.solr.bench.javabin; diff --git a/solr/benchmark/src/test-files/log4j2.xml b/solr/benchmark/src/test-files/log4j2.xml index b77dd4a08ff..e54e11b474b 100644 --- a/solr/benchmark/src/test-files/log4j2.xml +++ b/solr/benchmark/src/test-files/log4j2.xml @@ -23,7 +23,7 @@ - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -35,7 +35,7 @@ filePattern="${sys:solr.log.dir:-build/work/solr-logs}/${sys:solr.log.name:-solr}.log.%i"> - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -52,7 +52,7 @@ filePattern="${sys:solr.log.dir:-build/work/solr-logs}/${sys:solr.log.name:-solr}_slow_requests.log.%i"> - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -68,7 +68,7 @@ fileName="${sys:solr.log.dir:-build/work/solr-logs}/${sys:solr.log.name:-solr}_random_counts.log"> - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n diff --git a/solr/contrib/analytics/src/test/org/apache/solr/analytics/legacy/facet/LegacyAbstractAnalyticsFacetTest.java b/solr/contrib/analytics/src/test/org/apache/solr/analytics/legacy/facet/LegacyAbstractAnalyticsFacetTest.java index 94f679d1ba1..b82872ee570 100644 --- a/solr/contrib/analytics/src/test/org/apache/solr/analytics/legacy/facet/LegacyAbstractAnalyticsFacetTest.java +++ b/solr/contrib/analytics/src/test/org/apache/solr/analytics/legacy/facet/LegacyAbstractAnalyticsFacetTest.java @@ -16,6 +16,7 @@ */ package org.apache.solr.analytics.legacy.facet; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.ByteArrayInputStream; import java.io.FileNotFoundException; import java.io.IOException; @@ -53,6 +54,7 @@ import javax.xml.xpath.XPathExpressionException; import javax.xml.xpath.XPathFactory; +@ThreadLeakLingering(linger = 5000) public class LegacyAbstractAnalyticsFacetTest extends SolrTestCaseJ4 { protected static final HashMap defaults = new HashMap<>(); diff --git a/solr/contrib/clustering/src/test-files/log4j2.xml b/solr/contrib/clustering/src/test-files/log4j2.xml index ed01a6b2c76..5e696935965 100644 --- a/solr/contrib/clustering/src/test-files/log4j2.xml +++ b/solr/contrib/clustering/src/test-files/log4j2.xml @@ -21,7 +21,7 @@ - %maxLen{%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core} %X{trace_id}] %c{1.} %m%notEmpty{ + %maxLen{%-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n diff --git a/solr/contrib/gcs-repository/src/test-files/log4j2.xml b/solr/contrib/gcs-repository/src/test-files/log4j2.xml index 46ad20c18ee..6b02a3b06b4 100644 --- a/solr/contrib/gcs-repository/src/test-files/log4j2.xml +++ b/solr/contrib/gcs-repository/src/test-files/log4j2.xml @@ -21,7 +21,7 @@ - %maxLen{%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core} %X{trace_id}] %c{1.} %m%notEmpty{ + %maxLen{%-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -50,7 +50,7 @@ - %-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n + %-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%n diff --git a/solr/contrib/s3-repository/src/test-files/log4j2.xml b/solr/contrib/s3-repository/src/test-files/log4j2.xml index 229a9316418..950caea970d 100644 --- a/solr/contrib/s3-repository/src/test-files/log4j2.xml +++ b/solr/contrib/s3-repository/src/test-files/log4j2.xml @@ -21,7 +21,7 @@ - %maxLen{%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core} %X{trace_id}] %c{1.} %m%notEmpty{ + %maxLen{%-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -50,7 +50,7 @@ - %-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n + %-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%n diff --git a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/AbstractS3ClientTest.java b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/AbstractS3ClientTest.java index 70a63444798..4c033daa8c9 100644 --- a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/AbstractS3ClientTest.java +++ b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/AbstractS3ClientTest.java @@ -23,15 +23,16 @@ import org.apache.commons.io.IOUtils; import org.apache.solr.SolrTestCaseJ4; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; -import org.junit.ClassRule; +import org.junit.Rule; /** Abstract class for test with S3Mock. */ public class AbstractS3ClientTest extends SolrTestCaseJ4 { private static final String BUCKET_NAME = "test-bucket"; - @ClassRule + @Rule public static final S3MockRule S3_MOCK_RULE = S3MockRule.builder().silent().withInitialBuckets(BUCKET_NAME).build(); @@ -52,6 +53,11 @@ public void tearDownClient() { client.close(); } + @AfterClass + public static void afterS3OutputStreamTest() { + interruptThreadsOnTearDown(); // not closed properly + } + /** * Helper method to push a string to S3. * diff --git a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3BackupRepositoryTest.java b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3BackupRepositoryTest.java index 2a5828c1c6a..73af8d50ae0 100644 --- a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3BackupRepositoryTest.java +++ b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3BackupRepositoryTest.java @@ -39,7 +39,7 @@ import org.apache.solr.cloud.api.collections.AbstractBackupRepositoryTest; import org.apache.solr.common.util.NamedList; import org.apache.solr.core.backup.repository.BackupRepository; -import org.junit.ClassRule; +import org.junit.AfterClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -53,10 +53,15 @@ public class S3BackupRepositoryTest extends AbstractBackupRepositoryTest { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); - @ClassRule + @Rule public static final S3MockRule S3_MOCK_RULE = S3MockRule.builder().silent().withInitialBuckets(BUCKET_NAME).build(); + @AfterClass + public static void afterS3OutputStreamTest() { + interruptThreadsOnTearDown(); // not closed properly + } + /** * Sent by {@link org.apache.solr.handler.ReplicationHandler}, ensure we don't choke on the bare * URI. diff --git a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3IncrementalBackupTest.java b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3IncrementalBackupTest.java index 60ee8a84325..1b5ce3aedda 100644 --- a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3IncrementalBackupTest.java +++ b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3IncrementalBackupTest.java @@ -17,12 +17,11 @@ package org.apache.solr.s3; -import com.adobe.testing.s3mock.junit4.S3MockRule; import java.lang.invoke.MethodHandles; import org.apache.lucene.util.LuceneTestCase; import org.apache.solr.cloud.api.collections.AbstractIncrementalBackupTest; +import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.ClassRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.awssdk.regions.Region; @@ -35,9 +34,8 @@ public class S3IncrementalBackupTest extends AbstractIncrementalBackupTest { private static final String BUCKET_NAME = S3IncrementalBackupTest.class.getSimpleName(); - @ClassRule - public static final S3MockRule S3_MOCK_RULE = - S3MockRule.builder().silent().withInitialBuckets(BUCKET_NAME).build(); + public static final S3Mock S3_MOCK = + S3Mock.builder().silent().withInitialBuckets(BUCKET_NAME).build(); public static final String SOLR_XML = "\n" @@ -84,20 +82,28 @@ public static void ensureCompatibleLocale() { } @BeforeClass - public static void setupClass() throws Exception { + public static void beforeS3IncrementalBackupTest() throws Exception { System.setProperty("aws.accessKeyId", "foo"); System.setProperty("aws.secretAccessKey", "bar"); + S3_MOCK.start(); + configureCluster(NUM_SHARDS) // nodes .addConfig("conf1", getFile("conf/solrconfig.xml").getParentFile().toPath()) .withSolrXml( SOLR_XML .replace("BUCKET", BUCKET_NAME) .replace("REGION", Region.US_EAST_1.id()) - .replace("ENDPOINT", "http://localhost:" + S3_MOCK_RULE.getHttpPort())) + .replace("ENDPOINT", "http://localhost:" + S3_MOCK.getHttpPort())) .configure(); } + @AfterClass + public static void afterS3IncrementalBackupTest() throws Exception { + cluster.shutdown(); + S3_MOCK.stop(); + } + @Override public String getCollectionNamePrefix() { return "backuprestore"; diff --git a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3Mock.java b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3Mock.java new file mode 100644 index 00000000000..bc6437089e0 --- /dev/null +++ b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3Mock.java @@ -0,0 +1,36 @@ +package org.apache.solr.s3; + +import com.adobe.testing.s3mock.testsupport.common.S3MockStarter; +import java.util.Map; + +public class S3Mock extends S3MockStarter { + + /** Creates an instance with the default configuration. */ + public S3Mock() { + super(null); + } + + public static Builder builder() { + return new Builder(); + } + + private S3Mock(final Map properties) { + super(properties); + } + + public void start() { + super.start(); + } + + public void stop() { + super.stop(); + } + + public static class Builder extends S3MockStarter.BaseBuilder { + + @Override + public S3Mock build() { + return new S3Mock(arguments); + } + } +} diff --git a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3OutputStreamTest.java b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3OutputStreamTest.java index 3fcd5a3d781..8bcfa78ec78 100644 --- a/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3OutputStreamTest.java +++ b/solr/contrib/s3-repository/src/test/org/apache/solr/s3/S3OutputStreamTest.java @@ -24,8 +24,9 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.solr.SolrTestCaseJ4; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; -import org.junit.ClassRule; +import org.junit.Rule; import org.junit.Test; import software.amazon.awssdk.services.s3.S3Client; @@ -33,7 +34,7 @@ public class S3OutputStreamTest extends SolrTestCaseJ4 { private static final String BUCKET = S3OutputStreamTest.class.getSimpleName(); - @ClassRule + @Rule public static final S3MockRule S3_MOCK_RULE = S3MockRule.builder().silent().withInitialBuckets(BUCKET).build(); @@ -49,6 +50,11 @@ public void tearDownClient() { s3.close(); } + @AfterClass + public static void afterS3OutputStreamTest() { + interruptThreadsOnTearDown(); // not closed properly + } + /** * Basic check writing content byte-by-byte. They should be kept in the internal buffer and * flushed to S3 only once. diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java index 1066c3ef7e7..7a8374a4d52 100644 --- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java +++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java @@ -28,8 +28,6 @@ import java.util.Properties; import java.util.Set; import java.util.function.Supplier; - -import org.apache.commons.io.output.ByteArrayOutputStream; import org.apache.lucene.search.TotalHits.Relation; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrRequest; @@ -45,6 +43,7 @@ import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; +import org.apache.solr.common.util.BytesOutputStream; import org.apache.solr.common.util.ContentStream; import org.apache.solr.common.util.ContentStreamBase; import org.apache.solr.common.util.JavaBinCodec; @@ -74,7 +73,8 @@ public class EmbeddedSolrServer extends SolrClient { private boolean containerIsLocal = false; public enum RequestWriterSupplier { - JavaBin(() -> new BinaryRequestWriter()), XML(() -> new RequestWriter()); + JavaBin(() -> new BinaryRequestWriter()), + XML(() -> new RequestWriter()); private Supplier supplier; @@ -90,7 +90,7 @@ public RequestWriter newRequestWriter() { /** * Create an EmbeddedSolrServer using a given solr home directory * - * @param solrHome the solr home directory + * @param solrHome the solr home directory * @param defaultCoreName the core to route requests to by default (optional) */ public EmbeddedSolrServer(Path solrHome, String defaultCoreName) { @@ -101,7 +101,7 @@ public EmbeddedSolrServer(Path solrHome, String defaultCoreName) { /** * Create an EmbeddedSolrServer using a NodeConfig * - * @param nodeConfig the configuration + * @param nodeConfig the configuration * @param defaultCoreName the core to route requests to by default (optional) */ public EmbeddedSolrServer(NodeConfig nodeConfig, String defaultCoreName) { @@ -114,9 +114,7 @@ private static CoreContainer load(CoreContainer cc) { return cc; } - /** - * Create an EmbeddedSolrServer wrapping a particular SolrCore - */ + /** Create an EmbeddedSolrServer wrapping a particular SolrCore */ public EmbeddedSolrServer(SolrCore core) { this(core.getCoreContainer(), core.getName()); } @@ -125,7 +123,7 @@ public EmbeddedSolrServer(SolrCore core) { * Create an EmbeddedSolrServer wrapping a CoreContainer. * * @param coreContainer the core container - * @param coreName the core to route requests to by default (optional) + * @param coreName the core to route requests to by default (optional) */ public EmbeddedSolrServer(CoreContainer coreContainer, String coreName) { this(coreContainer, coreName, RequestWriterSupplier.JavaBin); @@ -134,15 +132,12 @@ public EmbeddedSolrServer(CoreContainer coreContainer, String coreName) { /** * Create an EmbeddedSolrServer wrapping a CoreContainer. * - * @param coreContainer - * the core container - * @param coreName - * the core to route requests to by default - * @param supplier - * the supplier used to create a {@link RequestWriter} + * @param coreContainer the core container + * @param coreName the core to route requests to by default + * @param supplier the supplier used to create a {@link RequestWriter} */ - public EmbeddedSolrServer(CoreContainer coreContainer, String coreName, - RequestWriterSupplier supplier) { + public EmbeddedSolrServer( + CoreContainer coreContainer, String coreName, RequestWriterSupplier supplier) { if (coreContainer == null) { throw new NullPointerException("CoreContainer instance required"); } @@ -156,7 +151,8 @@ public EmbeddedSolrServer(CoreContainer coreContainer, String coreName, // It *should* be able to convert the response directly into a named list. @Override - public NamedList request(SolrRequest request, String coreName) throws SolrServerException, IOException { + public NamedList request(SolrRequest request, String coreName) + throws SolrServerException, IOException { String path = request.getPath(); if (path == null || !path.startsWith("/")) { @@ -166,7 +162,8 @@ public NamedList request(SolrRequest request, String coreName) throws SolrRequestHandler handler = coreContainer.getRequestHandler(path); if (handler != null) { try { - SolrQueryRequest req = _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request)); + SolrQueryRequest req = + _parser.buildRequestFrom(null, request.getParams(), getContentStreams(request)); req.getContext().put("httpMethod", request.getMethod().name()); req.getContext().put(PATH, path); SolrQueryResponse resp = new SolrQueryResponse(); @@ -183,7 +180,8 @@ public NamedList request(SolrRequest request, String coreName) throws if (coreName == null) { coreName = this.coreName; if (coreName == null) { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, + throw new SolrException( + SolrException.ErrorCode.BAD_REQUEST, "No core specified on request and no default core has been set."); } } @@ -247,13 +245,15 @@ public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOExcepti } }; + try (BytesOutputStream out = new BytesOutputStream(128)) { + createJavaBinCodec(callback, resolver) + .setWritableDocFields(resolver) + .marshal(rsp.getValues(), out, true); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - createJavaBinCodec(callback, resolver).setWritableDocFields(resolver).marshal(rsp.getValues(), out); - - try (InputStream in = out.toInputStream()) { + try (InputStream in = out.inputStream()) { @SuppressWarnings({"unchecked"}) - NamedList resolved = (NamedList) new JavaBinCodec(resolver).unmarshal(in); + NamedList resolved = + (NamedList) new JavaBinCodec(resolver).unmarshal(in); return resolved; } } @@ -300,46 +300,27 @@ private Set getContentStreams(SolrRequest request) throws IOEx final byte[] buf = baos.toByteArray(); if (buf.length > 0) { - return Collections.singleton(new ContentStreamBase() { + return Collections.singleton( + new ContentStreamBase() { - @Override - public InputStream getStream() throws IOException { - return new ByteArrayInputStream(buf); - } + @Override + public InputStream getStream() throws IOException { + return new ByteArrayInputStream(buf); + } - @Override - public String getContentType() { - return cType; - } - }); + @Override + public String getContentType() { + return cType; + } + }); } return null; } - private JavaBinCodec createJavaBinCodec(final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) { - return new JavaBinCodec(resolver) { - - @Override - public void writeSolrDocument(SolrDocument doc) { - callback.streamSolrDocument(doc); - //super.writeSolrDocument( doc, fields ); - } - - @Override - public void writeSolrDocumentList(SolrDocumentList docs) throws IOException { - if (docs.size() > 0) { - SolrDocumentList tmp = new SolrDocumentList(); - tmp.setMaxScore(docs.getMaxScore()); - tmp.setNumFound(docs.getNumFound()); - tmp.setStart(docs.getStart()); - docs = tmp; - } - callback.streamDocListInfo(docs.getNumFound(), docs.getStart(), docs.getMaxScore()); - super.writeSolrDocumentList(docs); - } - - }; + private static JavaBinCodec createJavaBinCodec( + final StreamingResponseCallback callback, final BinaryResponseWriter.Resolver resolver) { + return new EmbeddedJavaBinCodec(resolver, callback); } private static void checkForExceptions(SolrQueryResponse rsp) throws Exception { @@ -349,12 +330,9 @@ private static void checkForExceptions(SolrQueryResponse rsp) throws Exception { } throw new SolrServerException(rsp.getException()); } - } - /** - * Closes any resources created by this instance - */ + /** Closes any resources created by this instance */ @Override public void close() throws IOException { if (containerIsLocal) { @@ -370,4 +348,34 @@ public void close() throws IOException { public CoreContainer getCoreContainer() { return coreContainer; } + + private static class EmbeddedJavaBinCodec extends JavaBinCodec { + + private final StreamingResponseCallback callback; + + public EmbeddedJavaBinCodec( + BinaryResponseWriter.Resolver resolver, StreamingResponseCallback callback) { + super(resolver); + this.callback = callback; + } + + @Override + public void writeSolrDocument(SolrDocument doc) { + callback.streamSolrDocument(doc); + // super.writeSolrDocument( doc, fields ); + } + + @Override + public void writeSolrDocumentList(SolrDocumentList docs) throws IOException { + if (docs.size() > 0) { + SolrDocumentList tmp = new SolrDocumentList(); + tmp.setMaxScore(docs.getMaxScore()); + tmp.setNumFound(docs.getNumFound()); + tmp.setStart(docs.getStart()); + docs = tmp; + } + callback.streamDocListInfo(docs.getNumFound(), docs.getStart(), docs.getMaxScore()); + super.writeSolrDocumentList(docs); + } + } } diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java index 5f481c48e92..0a752419e79 100644 --- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java +++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java @@ -270,7 +270,7 @@ public JettySolrRunner(String solrHome, Properties nodeProperties, JettyConfig c } catch (Exception e) { throw new RuntimeException(e); } - setProxyPort(proxy.getListenPort()); + proxyPort = proxy.getListenPort(); } this.init(this.config.port); @@ -299,6 +299,7 @@ private void init(int port) { final SslContextFactory.Server sslcontext = SSLConfig.createContextFactory(config.sslConfig); HttpConfiguration configuration = new HttpConfiguration(); + configuration.setOutputBufferSize(32 * 1024); // jetty 10/11 default ServerConnector connector; if (sslcontext != null) { configuration.setSecureScheme("https"); @@ -318,6 +319,8 @@ private void init(int port) { connector.setDefaultProtocol(sslConnectionFactory.getProtocol()); HTTP2ServerConnectionFactory http2ConnectionFactory = new HTTP2ServerConnectionFactory(configuration); + http2ConnectionFactory.setInputBufferSize(16384 + 9); // Jetty 10/11 default - max frame len + head len + http2ConnectionFactory.setMaxConcurrentStreams(512); ALPNServerConnectionFactory alpn = new ALPNServerConnectionFactory( http2ConnectionFactory.getProtocol(), @@ -340,11 +343,17 @@ private void init(int port) { connector.setHost("127.0.0.1"); connector.setIdleTimeout(THREAD_POOL_MAX_IDLE_TIME_MS); + server.setConnectors(new Connector[] {connector}); server.setSessionIdManager(new DefaultSessionIdManager(server, new Random())); } else { HttpConfiguration configuration = new HttpConfiguration(); - ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory(configuration), new HTTP2CServerConnectionFactory(configuration)); + configuration.setOutputBufferSize(32 * 1024); // jetty 10/11 default + HTTP2CServerConnectionFactory http2ConnectionFactory = new HTTP2CServerConnectionFactory(configuration); + ServerConnector connector = new ServerConnector(server, new HttpConnectionFactory(configuration), http2ConnectionFactory); + http2ConnectionFactory.setInputBufferSize(16384 + 9); // Jetty 10/11 default - max frame len + head len + http2ConnectionFactory.setMaxConcurrentStreams(512); + connector.setReuseAddress(true); connector.setPort(port); connector.setHost("127.0.0.1"); diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java index 87d1dc1350d..25c612d3ea3 100644 --- a/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java +++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/Assign.java @@ -180,7 +180,7 @@ private static int defaultCounterValue(DocCollection collection, boolean newColl if (newCollection) return 0; int defaultValue; - if (collection.getSlice(shard) != null && collection.getSlice(shard).getReplicas().isEmpty()) { + if (collection == null || collection.getSlice(shard) != null && collection.getSlice(shard).getReplicas().isEmpty()) { return 0; } else { defaultValue = collection.getReplicas().size() * 2; @@ -201,20 +201,20 @@ private static int defaultCounterValue(DocCollection collection, boolean newColl return defaultValue; } - public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type, boolean newCollection) { - Slice slice = collection.getSlice(shard); + public static String buildSolrCoreName(DistribStateManager stateManager, String collectionName, DocCollection collection, String shard, Replica.Type type, boolean newCollection) { + int defaultValue = defaultCounterValue(collection, newCollection, shard); - int replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue); - String coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum); - while (existCoreName(coreName, slice)) { - replicaNum = incAndGetId(stateManager, collection.getName(), defaultValue); - coreName = buildSolrCoreName(collection.getName(), shard, type, replicaNum); + int replicaNum = incAndGetId(stateManager, collectionName, defaultValue); + String coreName = buildSolrCoreName(collectionName, shard, type, replicaNum); + while (collection != null && existCoreName(coreName, collection.getSlice(shard))) { + replicaNum = incAndGetId(stateManager, collectionName, defaultValue); + coreName = buildSolrCoreName(collectionName, shard, type, replicaNum); } return coreName; } public static String buildSolrCoreName(DistribStateManager stateManager, DocCollection collection, String shard, Replica.Type type) { - return buildSolrCoreName(stateManager, collection, shard, type, false); + return buildSolrCoreName(stateManager, collection.getName(), collection, shard, type, false); } private static boolean existCoreName(String coreName, Slice slice) { diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java index 04cb7b846ed..45211d4513a 100644 --- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java +++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java @@ -231,8 +231,8 @@ public void call(ClusterState clusterState, ZkNodeProps message, NamedList { - Iterator> iterator = rb.req.getCore().getCancellableQueryTracker().getActiveQueriesGenerated(); + rb.rsp.add( + "taskList", + (MapWriter) + ew -> { + Iterator> iterator = + rb.req.getCore().getCancellableQueryTracker().getActiveQueriesGenerated(); - while (iterator.hasNext()) { + while (iterator.hasNext()) { Map.Entry entry = iterator.next(); ew.put(entry.getKey(), entry.getValue()); - } - }); + } + }); + } + + @Override + @SuppressWarnings("unchecked") + public void handleResponses(ResponseBuilder rb, ShardRequest sreq) { + if (!shouldProcess) { + return; } - @Override - @SuppressWarnings("unchecked") - public void handleResponses(ResponseBuilder rb, ShardRequest sreq) { - if (!shouldProcess) { - return; - } + NamedList resultList = new NamedList<>(); - NamedList resultList = new NamedList<>(); + for (ShardResponse r : sreq.responses) { - for (ShardResponse r : sreq.responses) { + if (rb.getTaskStatusCheckUUID() != null) { + boolean isTaskActiveOnShard = r.getSolrResponse().getResponse().getBooleanArg("taskStatus"); - if (rb.getTaskStatusCheckUUID() != null) { - boolean isTaskActiveOnShard = r.getSolrResponse().getResponse().getBooleanArg("taskStatus"); - - if (isTaskActiveOnShard) { - rb.rsp.getValues().add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: active"); - return; - } else { - continue; - } - } - - LinkedHashMap result = (LinkedHashMap) r.getSolrResponse() - .getResponse().get("taskList"); - - Iterator> iterator = result.entrySet().iterator(); + if (isTaskActiveOnShard) { + rb.rsp + .getValues() + .add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: active"); + return; + } else { + continue; + } + } - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); + Map result = + (Map) r.getSolrResponse().getResponse().get("taskList"); - resultList.add(entry.getKey(), entry.getValue()); - } - } + Iterator> iterator = result.entrySet().iterator(); - if (rb.getTaskStatusCheckUUID() != null) { - // We got here with the specific taskID check being specified -- this means that the taskID was not - // found in active tasks on any shard - rb.rsp.getValues().add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: inactive"); - return; - } + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); - rb.rsp.getValues().add("taskList", resultList); + resultList.add(entry.getKey(), entry.getValue()); + } } - @Override - public String getDescription() { - return "Responsible for listing all active cancellable tasks and also supports checking the status of " + - "a particular task"; + if (rb.getTaskStatusCheckUUID() != null) { + // We got here with the specific taskID check being specified -- this means that the taskID + // was not + // found in active tasks on any shard + rb.rsp + .getValues() + .add("taskStatus", "id:" + rb.getTaskStatusCheckUUID() + ", status: inactive"); + return; } - @Override - public Category getCategory() { - return Category.OTHER; - } + rb.rsp.getValues().add("taskList", resultList); + } + + @Override + public String getDescription() { + return "Responsible for listing all active cancellable tasks and also supports checking the status of " + + "a particular task"; + } + + @Override + public Category getCategory() { + return Category.OTHER; + } } diff --git a/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerAPI.java b/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerAPI.java index 1108065a2fb..4eb26b2125c 100644 --- a/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerAPI.java +++ b/solr/core/src/java/org/apache/solr/handler/designer/SchemaDesignerAPI.java @@ -1042,7 +1042,7 @@ protected Map buildResponse(String configSet, final String prefix = configPathInZk + "/"; final int prefixLen = prefix.length(); Set stripPrefix = files.stream().map(f -> f.startsWith(prefix) ? f.substring(prefixLen) : f).collect(Collectors.toSet()); - stripPrefix.remove(DEFAULT_MANAGED_SCHEMA_RESOURCE_NAME); + stripPrefix.remove(schema.getResourceName()); stripPrefix.remove("lang"); stripPrefix.remove(CONFIGOVERLAY_JSON); // treat this file as private diff --git a/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java b/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java index 72380fb4d74..c1f3e3adbd7 100644 --- a/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java +++ b/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java @@ -17,6 +17,9 @@ package org.apache.solr.handler.export; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; + import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; @@ -27,7 +30,6 @@ import java.util.List; import java.util.Map; import java.util.TreeSet; - import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; @@ -78,9 +80,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; - /** * Prepares and writes the documents requested by /export requests * @@ -184,8 +183,8 @@ public void write(OutputStream os) throws IOException { private void _write(OutputStream os) throws IOException { QueryResponseWriter rw = req.getCore().getResponseWriters().get(wt); if (rw instanceof BinaryResponseWriter) { - //todo add support for other writers after testing - writer = new JavaBinCodec(os, null); + // todo add support for other writers after testing + writer = new JavaBinCodec(os, null, false); } else { respWriter = new OutputStreamWriter(os, StandardCharsets.UTF_8); writer = JSONResponseWriter.getPushWriter(respWriter, req, res); @@ -214,7 +213,7 @@ private void _write(OutputStream os) throws IOException { return; } - if (sort != null && sort.needsScores()) { + if (sort.needsScores()) { writeException((new IOException(new SyntaxError("Scoring is not currently supported with xsort."))), writer, true); return; } diff --git a/solr/core/src/java/org/apache/solr/handler/export/ExportWriterStream.java b/solr/core/src/java/org/apache/solr/handler/export/ExportWriterStream.java index 3d0b3b13ada..3bb3c29ba78 100644 --- a/solr/core/src/java/org/apache/solr/handler/export/ExportWriterStream.java +++ b/solr/core/src/java/org/apache/solr/handler/export/ExportWriterStream.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.TimeoutException; - import org.apache.solr.client.solrj.io.Tuple; import org.apache.solr.client.solrj.io.comp.ComparatorOrder; import org.apache.solr.client.solrj.io.comp.FieldComparator; @@ -45,8 +44,11 @@ /** * Stream implementation that helps supporting 'expr' streaming in export writer. - *

Note: this class is made public only to allow access from {@link org.apache.solr.handler.ExportHandler}, - * it should be treated as an internal detail of implementation.

+ * + *

Note: this class is made public only to allow access from {@link + * org.apache.solr.handler.ExportHandler}, it should be treated as an internal detail of + * implementation. + * * @lucene.experimental */ public class ExportWriterStream extends TupleStream implements Expressible { @@ -59,18 +61,18 @@ public class ExportWriterStream extends TupleStream implements Expressible { ExportBuffers exportBuffers; ExportBuffers.Buffer buffer; - private static final class TupleEntryWriter implements EntryWriter { + private static final class TupleEntryWriter extends EntryWriter { Tuple tuple; @Override public EntryWriter put(CharSequence k, Object v) throws IOException { if (v instanceof IteratorWriter) { List lst = new ArrayList<>(); - ((IteratorWriter)v).toList(lst); + ((IteratorWriter) v).toList(lst); v = lst; } else if (v instanceof MapWriter) { Map map = new HashMap<>(); - ((MapWriter)v).toMap(map); + ((MapWriter) v).toMap(map); v = map; } tuple.put(k.toString(), v); @@ -83,8 +85,8 @@ public ExportWriterStream(StreamExpression expression, StreamFactory factory) th } /** - * NOTE: this context must contain an instance of {@link ExportBuffers} under the - * {@link ExportBuffers#EXPORT_BUFFERS_KEY} key. + * NOTE: this context must contain an instance of {@link ExportBuffers} under the {@link + * ExportBuffers#EXPORT_BUFFERS_KEY} key. */ @Override public void setStreamContext(StreamContext context) { @@ -103,7 +105,8 @@ private StreamComparator parseComp(String sort) throws IOException { for (int i = 0; i < sorts.length; i++) { String s = sorts[i]; - String[] spec = s.trim().split("\\s+"); //This should take into account spaces in the sort spec. + String[] spec = + s.trim().split("\\s+"); // This should take into account spaces in the sort spec. if (spec.length != 2) { throw new IOException("Invalid sort spec:" + s); @@ -112,7 +115,12 @@ private StreamComparator parseComp(String sort) throws IOException { String fieldName = spec[0].trim(); String order = spec[1].trim(); - comps[i] = new FieldComparator(fieldName, order.equalsIgnoreCase("asc") ? ComparatorOrder.ASCENDING : ComparatorOrder.DESCENDING); + comps[i] = + new FieldComparator( + fieldName, + order.equalsIgnoreCase("asc") + ? ComparatorOrder.ASCENDING + : ComparatorOrder.DESCENDING); } if (comps.length > 1) { @@ -141,15 +149,17 @@ public Tuple read() throws IOException { try { buffer.outDocsIndex = ExportBuffers.Buffer.EMPTY; - //log.debug("--- ews exchange empty buffer {}", buffer); + // log.debug("--- ews exchange empty buffer {}", buffer); boolean exchanged = false; while (!exchanged) { try { long startExchangeBuffers = System.nanoTime(); exportBuffers.exchangeBuffers(); long endExchangeBuffers = System.nanoTime(); - if(log.isDebugEnabled()) { - log.debug("Waited for reader thread:{}", Long.toString(((endExchangeBuffers - startExchangeBuffers) / 1000000))); + if (log.isDebugEnabled()) { + log.debug( + "Waited for reader thread:{}", + Long.toString(((endExchangeBuffers - startExchangeBuffers) / 1000000))); } exchanged = true; } catch (TimeoutException e) { @@ -193,7 +203,7 @@ public Tuple read() throws IOException { res = Tuple.EOF(); } else { pos = buffer.outDocsIndex; - index = -1; //restart index. + index = -1; // restart index. log.debug("--- ews new pos={}", pos); } } @@ -209,7 +219,8 @@ public Tuple read() throws IOException { SortDoc sortDoc = buffer.outDocs[++index]; tupleEntryWriter.tuple = new Tuple(); - exportBuffers.exportWriter.writeDoc(sortDoc, exportBuffers.leaves, tupleEntryWriter, exportBuffers.exportWriter.fieldWriters); + exportBuffers.exportWriter.writeDoc( + sortDoc, exportBuffers.leaves, tupleEntryWriter, exportBuffers.exportWriter.fieldWriters); pos--; return tupleEntryWriter.tuple; } diff --git a/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java index 5671359f707..cdec0bffe30 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/JavabinLoader.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; - import org.apache.solr.client.solrj.request.JavaBinUpdateRequestCodec; import org.apache.solr.client.solrj.request.UpdateRequest; import org.apache.solr.common.SolrException; @@ -34,8 +33,6 @@ import org.apache.solr.common.params.UpdateParams; import org.apache.solr.common.util.ContentStream; import org.apache.solr.common.util.ContentStreamBase; -import org.apache.solr.common.util.DataInputInputStream; -import org.apache.solr.common.util.FastInputStream; import org.apache.solr.common.util.JavaBinCodec; import org.apache.solr.common.util.NamedList; import org.apache.solr.request.SolrQueryRequest; @@ -63,63 +60,76 @@ public JavabinLoader(ContentStreamLoader contentStreamLoader) { } @Override - public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream, UpdateRequestProcessor processor) throws Exception { + public void load( + SolrQueryRequest req, + SolrQueryResponse rsp, + ContentStream stream, + UpdateRequestProcessor processor) + throws Exception { InputStream is = null; try { is = stream.getStream(); parseAndLoadDocs(req, rsp, is, processor); } finally { - if(is != null) { + if (is != null) { is.close(); } } } - - private void parseAndLoadDocs(final SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream, - final UpdateRequestProcessor processor) throws IOException { + + private void parseAndLoadDocs( + final SolrQueryRequest req, + SolrQueryResponse rsp, + InputStream stream, + final UpdateRequestProcessor processor) + throws IOException { if (req.getParams().getBool("multistream", false)) { handleMultiStream(req, rsp, stream, processor); return; } UpdateRequest update = null; - JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = new JavaBinUpdateRequestCodec.StreamingUpdateHandler() { - private AddUpdateCommand addCmd = null; + JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = + new JavaBinUpdateRequestCodec.StreamingUpdateHandler() { + private AddUpdateCommand addCmd = null; - @Override - public void update(SolrInputDocument document, UpdateRequest updateRequest, Integer commitWithin, Boolean overwrite) { - if (document == null) { - return; - } - if (addCmd == null) { - addCmd = getAddCommand(req, updateRequest.getParams()); - } - addCmd.solrDoc = document; - if (commitWithin != null) { - addCmd.commitWithin = commitWithin; - } - if (overwrite != null) { - addCmd.overwrite = overwrite; - } + @Override + public void update( + SolrInputDocument document, + UpdateRequest updateRequest, + Integer commitWithin, + Boolean overwrite) { + if (document == null) { + return; + } + if (addCmd == null) { + addCmd = getAddCommand(req, updateRequest.getParams()); + } + addCmd.solrDoc = document; + if (commitWithin != null) { + addCmd.commitWithin = commitWithin; + } + if (overwrite != null) { + addCmd.overwrite = overwrite; + } - if (updateRequest.isLastDocInBatch()) { - // this is a hint to downstream code that indicates we've sent the last doc in a batch - addCmd.isLastDocInBatch = true; - } + if (updateRequest.isLastDocInBatch()) { + // this is a hint to downstream code that indicates we've sent the last doc in a batch + addCmd.isLastDocInBatch = true; + } + + try { + processor.processAdd(addCmd); + addCmd.clear(); + } catch (IOException e) { + throw new SolrException( + SolrException.ErrorCode.SERVER_ERROR, "ERROR adding document " + document, e); + } + } + }; - try { - processor.processAdd(addCmd); - addCmd.clear(); - } catch (IOException e) { - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ERROR adding document " + document, e); - } - } - }; - FastInputStream in = FastInputStream.wrap(stream); for (; ; ) { - if (in.peek() == -1) return; try { - update = new JavaBinUpdateRequestCodec() - .unmarshal(in, handler); + update = new JavaBinUpdateRequestCodec().unmarshal(stream, handler); } catch (EOFException e) { break; // this is expected } @@ -129,43 +139,48 @@ public void update(SolrInputDocument document, UpdateRequest updateRequest, Inte } } - private void handleMultiStream(SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream, UpdateRequestProcessor processor) + private void handleMultiStream( + SolrQueryRequest req, + SolrQueryResponse rsp, + InputStream stream, + UpdateRequestProcessor processor) throws IOException { - FastInputStream in = FastInputStream.wrap(stream); + SolrParams old = req.getParams(); - try (JavaBinCodec jbc = new JavaBinCodec() { - SolrParams params; - AddUpdateCommand addCmd = null; - - @Override - public List readIterator(DataInputInputStream fis) throws IOException { - while (true) { - Object o = readVal(fis); - if (o == END_OBJ) break; - if (o instanceof NamedList) { - params = ((NamedList) o).toSolrParams(); - } else { - try { - if (o instanceof byte[]) { - if (params != null) req.setParams(params); - byte[] buf = (byte[]) o; - contentStreamLoader.load(req, rsp, new ContentStreamBase.ByteArrayStream(buf, null), processor); + try (JavaBinCodec jbc = + new JavaBinCodec() { + SolrParams params; + AddUpdateCommand addCmd = null; + + @Override + public List readIterator(JavaBinCodec javaBinCodec) throws IOException { + while (true) { + Object o = readVal(this); + if (o == END_OBJ) break; + if (o instanceof NamedList) { + params = ((NamedList) o).toSolrParams(); } else { - throw new RuntimeException("unsupported type "); + try { + if (o instanceof byte[]) { + if (params != null) req.setParams(params); + byte[] buf = (byte[]) o; + contentStreamLoader.load( + req, rsp, new ContentStreamBase.ByteArrayStream(buf, null), processor); + } else { + throw new RuntimeException("unsupported type "); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + params = null; + req.setParams(old); + } } - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - params = null; - req.setParams(old); } + return Collections.emptyList(); } - } - return Collections.emptyList(); - } - - }) { - jbc.unmarshal(in); + }) { + jbc.unmarshal(stream); } } @@ -176,18 +191,19 @@ private AddUpdateCommand getAddCommand(SolrQueryRequest req, SolrParams params) return addCmd; } - private void delete(SolrQueryRequest req, UpdateRequest update, UpdateRequestProcessor processor) throws IOException { + private void delete(SolrQueryRequest req, UpdateRequest update, UpdateRequestProcessor processor) + throws IOException { SolrParams params = update.getParams(); DeleteUpdateCommand delcmd = new DeleteUpdateCommand(req); - if(params != null) { + if (params != null) { delcmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1); } - - if(update.getDeleteByIdMap() != null) { - Set>> entries = update.getDeleteByIdMap().entrySet(); - for (Entry> e : entries) { + + if (update.getDeleteByIdMap() != null) { + Set>> entries = update.getDeleteByIdMap().entrySet(); + for (Entry> e : entries) { delcmd.id = e.getKey(); - Map map = e.getValue(); + Map map = e.getValue(); if (map != null) { Long version = (Long) map.get("ver"); if (version != null) { @@ -204,8 +220,8 @@ private void delete(SolrQueryRequest req, UpdateRequest update, UpdateRequestPro delcmd.clear(); } } - - if(update.getDeleteQuery() != null) { + + if (update.getDeleteQuery() != null) { for (String s : update.getDeleteQuery()) { delcmd.query = s; processor.processDelete(delcmd); diff --git a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java index 0295ad0fc87..c253725c7fd 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java @@ -170,7 +170,7 @@ void processUpdate(Reader reader) throws IOException { handleSplitMode(split, f, reader); return; } - parser = new JSONParser(reader); + parser = new JSONParser(reader, new char[16834]); int ev = parser.nextEvent(); while (ev != JSONParser.EOF) { @@ -247,7 +247,7 @@ private void handleSplitMode(String split, String[] fields, final Reader reader) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Raw data can be stored only if split=/"); parser = new RecordingJSONParser(reader); } else { - parser = new JSONParser(reader); + parser = new JSONParser(reader, new char[16834]); } diff --git a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java index 551959c9bab..f2a20126846 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java @@ -19,6 +19,7 @@ import static org.apache.solr.common.params.CommonParams.ID; import static org.apache.solr.common.params.CommonParams.NAME; +import com.google.common.collect.Lists; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; @@ -35,8 +36,6 @@ import javax.xml.stream.XMLStreamConstants; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; -import com.google.common.collect.Lists; -import org.apache.commons.io.IOUtils; import org.apache.solr.common.EmptyEntityResolver; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; @@ -46,6 +45,7 @@ import org.apache.solr.common.params.UpdateParams; import org.apache.solr.common.util.ContentStream; import org.apache.solr.common.util.ContentStreamBase; +import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.StrUtils; import org.apache.solr.common.util.XMLErrorLogger; import org.apache.solr.handler.RequestHandlerUtils; @@ -85,9 +85,10 @@ public XMLLoader init(SolrParams args) { } catch (IllegalArgumentException ex) { // Other implementations will likely throw this exception since "reuse-instance" // isimplementation specific. - log.debug("Unable to set the 'reuse-instance' property for the input chain: {}", inputFactory); + log.debug( + "Unable to set the 'reuse-instance' property for the input chain: {}", inputFactory); } - + // Init SAX parser (for XSL): saxFactory = SAXParserFactory.newInstance(); saxFactory.setNamespaceAware(true); // XSL needs this! @@ -102,9 +103,14 @@ public String getDefaultWT() { } @Override - public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream, UpdateRequestProcessor processor) throws Exception { + public void load( + SolrQueryRequest req, + SolrQueryResponse rsp, + ContentStream stream, + UpdateRequestProcessor processor) + throws Exception { final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType()); - + InputStream is = null; XMLStreamReader parser = null; @@ -112,18 +118,21 @@ public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stre try { is = stream.getStream(); if (log.isTraceEnabled()) { - final byte[] body = IOUtils.toByteArray(is); + final byte[] body = org.apache.commons.io.IOUtils.toByteArray(is); // TODO: The charset may be wrong, as the real charset is later // determined by the XML parser, the content-type is only used as a hint! if (log.isTraceEnabled()) { - log.trace("body: {}", new String(body, (charset == null) ? - ContentStreamBase.DEFAULT_CHARSET : charset)); + log.trace( + "body: {}", + new String(body, (charset == null) ? ContentStreamBase.DEFAULT_CHARSET : charset)); } IOUtils.closeQuietly(is); is = new ByteArrayInputStream(body); } - parser = (charset == null) ? - inputFactory.createXMLStreamReader(is) : inputFactory.createXMLStreamReader(is, charset); + parser = + (charset == null) + ? inputFactory.createXMLStreamReader(is) + : inputFactory.createXMLStreamReader(is, charset); this.processUpdate(req, processor, parser); } catch (XMLStreamException e) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e.getMessage(), e); @@ -133,11 +142,10 @@ public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stre } } - /** - * @since solr 1.2 - */ - protected void processUpdate(SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser) - throws XMLStreamException, IOException, FactoryConfigurationError { + /** @since solr 1.2 */ + protected void processUpdate( + SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser) + throws XMLStreamException, IOException, FactoryConfigurationError { AddUpdateCommand addCmd = null; SolrParams params = req.getParams(); while (true) { @@ -154,10 +162,11 @@ protected void processUpdate(SolrQueryRequest req, UpdateRequestProcessor proces addCmd = new AddUpdateCommand(req); - // First look for commitWithin parameter on the request, will be overwritten for individual 's + // First look for commitWithin parameter on the request, will be overwritten for + // individual 's addCmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1); addCmd.overwrite = params.getBool(UpdateParams.OVERWRITE, true); - + for (int i = 0; i < parser.getAttributeCount(); i++) { String attrName = parser.getAttributeLocalName(i); String attrVal = parser.getAttributeValue(i); @@ -171,20 +180,24 @@ protected void processUpdate(SolrQueryRequest req, UpdateRequestProcessor proces } } else if ("doc".equals(currTag)) { - if(addCmd != null) { + if (addCmd != null) { log.trace("adding doc..."); addCmd.clear(); addCmd.solrDoc = readDoc(parser); processor.processAdd(addCmd); } else { - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected tag without an tag surrounding it."); + throw new SolrException( + SolrException.ErrorCode.BAD_REQUEST, + "Unexpected tag without an tag surrounding it."); } - } else if (UpdateRequestHandler.COMMIT.equals(currTag) || UpdateRequestHandler.OPTIMIZE.equals(currTag)) { + } else if (UpdateRequestHandler.COMMIT.equals(currTag) + || UpdateRequestHandler.OPTIMIZE.equals(currTag)) { log.trace("parsing {}", currTag); - CommitUpdateCommand cmd = new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag)); + CommitUpdateCommand cmd = + new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag)); ModifiableSolrParams mp = new ModifiableSolrParams(); - + for (int i = 0; i < parser.getAttributeCount(); i++) { String attrName = parser.getAttributeLocalName(i); String attrVal = parser.getAttributeValue(i); @@ -192,7 +205,9 @@ protected void processUpdate(SolrQueryRequest req, UpdateRequestProcessor proces } RequestHandlerUtils.validateCommitParams(mp); - SolrParams p = SolrParams.wrapDefaults(mp, req.getParams()); // default to the normal request params for commit options + SolrParams p = + SolrParams.wrapDefaults( + mp, req.getParams()); // default to the normal request params for commit options RequestHandlerUtils.updateCommit(cmd, p); processor.processCommit(cmd); @@ -213,14 +228,14 @@ else if (UpdateRequestHandler.DELETE.equals(currTag)) { } } - /** - * @since solr 1.3 - */ - void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser) throws XMLStreamException, IOException { + /** @since solr 1.3 */ + void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser) + throws XMLStreamException, IOException { // Parse the command DeleteUpdateCommand deleteCmd = new DeleteUpdateCommand(req); - // First look for commitWithin parameter on the request, will be overwritten for individual 's + // First look for commitWithin parameter on the request, will be overwritten for individual + // 's SolrParams params = req.getParams(); deleteCmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1); @@ -247,11 +262,10 @@ void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLSt if (!(ID.equals(mode) || "query".equals(mode))) { String msg = "XML element has invalid XML child element: " + mode; log.warn(msg); - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - msg); + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg); } text.setLength(0); - + if (ID.equals(mode)) { for (int i = 0; i < parser.getAttributeCount(); i++) { String attrName = parser.getAttributeLocalName(i); @@ -269,7 +283,7 @@ void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLSt case XMLStreamConstants.END_ELEMENT: String currTag = parser.getLocalName(); if (ID.equals(currTag)) { - deleteCmd.setId(text.toString()); + deleteCmd.setId(text.toString()); } else if ("query".equals(currTag)) { deleteCmd.setQuery(text.toString()); } else if ("delete".equals(currTag)) { @@ -277,8 +291,7 @@ void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLSt } else { String msg = "XML element has invalid XML (closing) child element: " + currTag; log.warn(msg); - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - msg); + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg); } processor.processDelete(deleteCmd); deleteCmd.clear(); @@ -294,7 +307,6 @@ void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLSt } } - /** * Given the input stream, read a document * @@ -308,7 +320,10 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti for (int i = 0; i < parser.getAttributeCount(); i++) { attrName = parser.getAttributeLocalName(i); if ("boost".equals(attrName)) { - String message = "Ignoring document boost: " + parser.getAttributeValue(i) + " as index-time boosts are not supported anymore"; + String message = + "Ignoring document boost: " + + parser.getAttributeValue(i) + + " as index-time boosts are not supported anymore"; if (WARNED_ABOUT_INDEX_TIME_BOOSTS.compareAndSet(false, true)) { log.warn(message); } else { @@ -330,7 +345,7 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti while (!complete) { int event = parser.next(); switch (event) { - // Add everything to the text + // Add everything to the text case XMLStreamConstants.SPACE: case XMLStreamConstants.CDATA: case XMLStreamConstants.CHARACTERS: @@ -373,7 +388,7 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti } break; } - if(!isLabeledChildDoc){ + if (!isLabeledChildDoc) { // only add data if this is not a childDoc, since it was added already doc.addField(name, v); } else { @@ -389,25 +404,22 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti text.setLength(0); String localName = parser.getLocalName(); if ("doc".equals(localName)) { - if(name != null) { + if (name != null) { // flag to prevent spaces after doc from being added isLabeledChildDoc = true; - if(!doc.containsKey(name)) { + if (!doc.containsKey(name)) { doc.setField(name, Lists.newArrayList()); } doc.addField(name, readDoc(parser)); break; } - if (subDocs == null) - subDocs = Lists.newArrayList(); + if (subDocs == null) subDocs = Lists.newArrayList(); subDocs.add(readDoc(parser)); - } - else { + } else { if (!"field".equals(localName)) { String msg = "XML element has invalid XML child element: " + localName; log.warn(msg); - throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, - msg); + throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg); } update = null; isNull = false; @@ -418,7 +430,10 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti if (NAME.equals(attrName)) { name = attrVal; } else if ("boost".equals(attrName)) { - String message = "Ignoring field boost: " + attrVal + " as index-time boosts are not supported anymore"; + String message = + "Ignoring field boost: " + + attrVal + + " as index-time boosts are not supported anymore"; if (WARNED_ABOUT_INDEX_TIME_BOOSTS.compareAndSet(false, true)) { log.warn(message); } else { @@ -437,7 +452,7 @@ public SolrInputDocument readDoc(XMLStreamReader parser) throws XMLStreamExcepti } } - if (updateMap != null) { + if (updateMap != null) { for (Map.Entry> entry : updateMap.entrySet()) { name = entry.getKey(); Map value = entry.getValue(); diff --git a/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java index 492735be6e7..613f351d631 100644 --- a/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java +++ b/solr/core/src/java/org/apache/solr/logging/MDCLoggingContext.java @@ -42,7 +42,7 @@ public class MDCLoggingContext { public static void setCollection(String collection) { if (collection != null) { - MDC.put(COLLECTION_PROP, "c:" + collection); + MDC.put(COLLECTION_PROP, collection); } else { MDC.remove(COLLECTION_PROP); } @@ -50,7 +50,7 @@ public static void setCollection(String collection) { public static void setTracerId(String traceId) { if (!StringUtils.isEmpty(traceId)) { - MDC.put(TRACE_ID, "t:" + traceId); + MDC.put(TRACE_ID, traceId); } else { MDC.remove(TRACE_ID); } @@ -58,7 +58,7 @@ public static void setTracerId(String traceId) { public static void setShard(String shard) { if (shard != null) { - MDC.put(SHARD_ID_PROP, "s:" + shard); + MDC.put(SHARD_ID_PROP, shard); } else { MDC.remove(SHARD_ID_PROP); } @@ -66,7 +66,7 @@ public static void setShard(String shard) { public static void setReplica(String replica) { if (replica != null) { - MDC.put(REPLICA_PROP, "r:" + replica); + MDC.put(REPLICA_PROP, replica); } else { MDC.remove(REPLICA_PROP); } @@ -74,7 +74,7 @@ public static void setReplica(String replica) { public static void setCoreName(String core) { if (core != null) { - MDC.put(CORE_NAME_PROP, "x:" + core); + MDC.put(CORE_NAME_PROP, core); } else { MDC.remove(CORE_NAME_PROP); } @@ -100,7 +100,7 @@ public static void setNode(String node) { private static void setNodeName(String node) { if (node != null) { - MDC.put(NODE_NAME_PROP, "n:" + node); + MDC.put(NODE_NAME_PROP, node); } else { MDC.remove(NODE_NAME_PROP); } diff --git a/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java b/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java index c400d6615b0..d1e5f2360ac 100644 --- a/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java +++ b/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java @@ -168,7 +168,7 @@ public DocSet createDocSet(SolrIndexSearcher searcher) throws IOException { } private DocSet createDocSet(SolrIndexSearcher searcher, long cost) throws IOException { - assert TestInjection.injectDocSetDelay(); + assert TestInjection.injectDocSetDelay(this); int maxDoc = searcher.maxDoc(); BitDocSet liveDocs = searcher.getLiveDocSet(); FixedBitSet liveBits = liveDocs.size() == maxDoc ? null : liveDocs.getBits(); diff --git a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java index 34299116157..feb1011b9d4 100644 --- a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java @@ -16,6 +16,8 @@ */ package org.apache.solr.response; +import static org.apache.solr.common.util.ByteArrayUtf8CharSequence.convertCharSeq; + import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; @@ -27,7 +29,6 @@ import java.util.Iterator; import java.util.List; import java.util.function.Consumer; - import org.apache.commons.io.output.ByteArrayOutputStream; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexableField; @@ -46,15 +47,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.solr.common.util.ByteArrayUtf8CharSequence.convertCharSeq; - - public class BinaryResponseWriter implements BinaryQueryResponseWriter { -// public static boolean useUtf8CharSeq = true; + // public static boolean useUtf8CharSeq = true; private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @Override - public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException { + public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) + throws IOException { Resolver resolver = new Resolver(req, response.getReturnFields()); if (req.getParams().getBool(CommonParams.OMIT_HEADER, false)) response.removeResponseHeader(); try (JavaBinCodec jbc = new JavaBinCodec(resolver)) { @@ -62,16 +61,18 @@ public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse resp } } - private static void serialize(SolrQueryResponse response,Resolver resolver, String f) throws IOException { - try (JavaBinCodec jbc = new JavaBinCodec(resolver); FileOutputStream fos = new FileOutputStream(f)) { + private static void serialize(SolrQueryResponse response, Resolver resolver, String f) + throws IOException, java.io.FileNotFoundException { + try (JavaBinCodec jbc = new JavaBinCodec(resolver); + FileOutputStream fos = new FileOutputStream(f)) { jbc.setWritableDocFields(resolver).marshal(response.getValues(), fos); fos.flush(); } - } @Override - public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException { + public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) + throws IOException { throw new RuntimeException("This is a binary writer , Cannot write to a characterstream"); } @@ -80,7 +81,8 @@ public String getContentType(SolrQueryRequest request, SolrQueryResponse respons return BinaryResponseParser.BINARY_CONTENT_TYPE; } - public static class Resolver implements JavaBinCodec.ObjectResolver , JavaBinCodec.WritableDocFields { + public static class Resolver + implements JavaBinCodec.ObjectResolver, JavaBinCodec.WritableDocFields { protected final SolrQueryRequest solrQueryRequest; protected IndexSchema schema; protected ReturnFields returnFields; @@ -95,21 +97,22 @@ public Object resolve(Object o, JavaBinCodec codec) throws IOException { if (o instanceof StoredField) { CharSequence val = ((StoredField) o).getCharSequenceValue(); if (val instanceof Utf8CharSequence) { - codec.writeUTF8Str((Utf8CharSequence) val); + JavaBinCodec.writeUTF8Str(codec, (Utf8CharSequence) val); return null; } } if (o instanceof ResultContext) { ReturnFields orig = returnFields; - ResultContext res = (ResultContext)o; - if(res.getReturnFields()!=null) { + ResultContext res = (ResultContext) o; + if (res.getReturnFields() != null) { returnFields = res.getReturnFields(); } -// if (useUtf8CharSeq) { - ResultContext.READASBYTES.set(fieldName -> { - SchemaField fld = res.getRequest().getSchema().getFieldOrNull(fieldName); - return fld != null && fld.getType().isUtf8Field(); - }); + + ResultContext.READASBYTES.set( + fieldName -> { + SchemaField fld = res.getRequest().getSchema().getFieldOrNull(fieldName); + return fld != null && fld.getType().isUtf8Field(); + }); try { writeResults(res, codec); @@ -121,14 +124,15 @@ public Object resolve(Object o, JavaBinCodec codec) throws IOException { return null; // null means we completely handled it } if (o instanceof DocList) { - ResultContext ctx = new BasicResultContext((DocList)o, returnFields, null, null, solrQueryRequest); + ResultContext ctx = + new BasicResultContext((DocList) o, returnFields, null, null, solrQueryRequest); writeResults(ctx, codec); return null; // null means we completely handled it } - if( o instanceof IndexableField ) { - if(schema == null) schema = solrQueryRequest.getSchema(); + if (o instanceof IndexableField) { + if (schema == null) schema = solrQueryRequest.getSchema(); - IndexableField f = (IndexableField)o; + IndexableField f = (IndexableField) o; SchemaField sf = schema.getFieldOrNull(f.name()); try { o = DocsStreamer.getValue(sf, f); @@ -149,8 +153,8 @@ public boolean wantsAllFields() { return returnFields.wantsAllFields(); } - protected void writeResultsBody( ResultContext res, JavaBinCodec codec ) throws IOException { - codec.writeTag(JavaBinCodec.ARR, res.getDocList().size()); + protected void writeResultsBody(ResultContext res, JavaBinCodec codec) throws IOException { + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, res.getDocList().size()); Iterator docStreamer = res.getProcessedDocuments(); while (docStreamer.hasNext()) { SolrDocument doc = docStreamer.next(); @@ -159,32 +163,29 @@ protected void writeResultsBody( ResultContext res, JavaBinCodec codec ) throws } public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException { - codec.writeTag(JavaBinCodec.SOLRDOCLST); + JavaBinCodec.writeTag(codec, JavaBinCodec.SOLRDOCLST); List l = new ArrayList<>(4); - l.add( ctx.getDocList().matches()); + l.add(ctx.getDocList().matches()); l.add((long) ctx.getDocList().offset()); - + Float maxScore = null; if (ctx.wantsScores()) { maxScore = ctx.getDocList().maxScore(); } l.add(maxScore); l.add(ctx.getDocList().hitCountRelation() == TotalHits.Relation.EQUAL_TO); - codec.writeArray(l); - + JavaBinCodec.writeArray(codec, l); + // this is a seprate function so that streaming responses can use just that part - writeResultsBody( ctx, codec ); + writeResultsBody(ctx, codec); } - } - /** * TODO -- there may be a way to do this without marshal at all... * - * @return a response object equivalent to what you get from the XML/JSON/javabin parser. Documents become - * SolrDocuments, DocList becomes SolrDocumentList etc. - * + * @return a response object equivalent to what you get from the XML/JSON/javabin parser. + * Documents become SolrDocuments, DocList becomes SolrDocumentList etc. * @since solr 1.4 */ @SuppressWarnings("unchecked") @@ -197,31 +198,26 @@ public static NamedList getParsedResponse(SolrQueryRequest req, SolrQuer ByteArrayOutputStream out = new ByteArrayOutputStream(); try (JavaBinCodec jbc = new JavaBinCodec(resolver)) { - jbc.setWritableDocFields(resolver).marshal(rsp.getValues(), out); + jbc.setWritableDocFields(resolver).marshal(rsp.getValues(), out, true); } InputStream in = out.toInputStream(); try (JavaBinCodec jbc = new JavaBinCodec(resolver)) { return (NamedList) jbc.unmarshal(in); } - } - catch (Exception ex) { + } catch (Exception ex) { throw new RuntimeException(ex); } } static class MaskCharSeqSolrDocument extends SolrDocument { - /** - * Get the value or collection of values for a given field. - */ + /** Get the value or collection of values for a given field. */ @Override public Object getFieldValue(String name) { return convertCharSeq(_fields.get(name)); } - /** - * Get a collection of values for a given field name - */ + /** Get a collection of values for a given field name */ @SuppressWarnings("unchecked") @Override public Collection getFieldValues(String name) { @@ -251,14 +247,11 @@ public Collection getRawFieldValues(String name) { return null; } - - /** - * Iterate of String->Object keys - */ + /** Iterate of String->Object keys */ @Override public Iterator> iterator() { Iterator> it = _fields.entrySet().iterator(); - return new Iterator>() { + return new Iterator<>() { @Override public boolean hasNext() { return it.hasNext(); @@ -272,18 +265,15 @@ public Entry next() { }; } - /////////////////////////////////////////////////////////////////// // Get the field values /////////////////////////////////////////////////////////////////// - /** - * returns the first value for a field - */ + /** returns the first value for a field */ @Override public Object getFirstValue(String name) { Object v = _fields.get(name); - if (v == null || !(v instanceof Collection)) return convertCharSeq(v); + if (!(v instanceof Collection)) return convertCharSeq(v); Collection c = (Collection) v; if (c.size() > 0) { return convertCharSeq(c.iterator().next()); @@ -305,5 +295,4 @@ public void forEach(Consumer> action) { super.forEach(action); } } - } diff --git a/solr/core/src/java/org/apache/solr/response/transform/RawValueTransformerFactory.java b/solr/core/src/java/org/apache/solr/response/transform/RawValueTransformerFactory.java index 5838e10520e..91e36638b36 100644 --- a/solr/core/src/java/org/apache/solr/response/transform/RawValueTransformerFactory.java +++ b/solr/core/src/java/org/apache/solr/response/transform/RawValueTransformerFactory.java @@ -16,11 +16,10 @@ */ package org.apache.solr.response.transform; +import com.google.common.base.Strings; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; - -import com.google.common.base.Strings; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.params.CommonParams; @@ -33,94 +32,85 @@ import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.QueryResponseWriter; -/** - * @since solr 5.2 - */ -public class RawValueTransformerFactory extends TransformerFactory -{ +/** @since solr 5.2 */ +public class RawValueTransformerFactory extends TransformerFactory { String applyToWT = null; - - public RawValueTransformerFactory() { - - } + + public RawValueTransformerFactory() {} public RawValueTransformerFactory(String wt) { this.applyToWT = wt; } - + @Override public void init(NamedList args) { super.init(args); - if(defaultUserArgs!=null&&defaultUserArgs.startsWith("wt=")) { + if (defaultUserArgs != null && defaultUserArgs.startsWith("wt=")) { applyToWT = defaultUserArgs.substring(3); } } - + @Override public DocTransformer create(String display, SolrParams params, SolrQueryRequest req) { String field = params.get("f"); - if(Strings.isNullOrEmpty(field)) { + if (Strings.isNullOrEmpty(field)) { field = display; } // When a 'wt' is specified in the transformer, only apply it to the same wt boolean apply = true; - if(applyToWT!=null) { + if (applyToWT != null) { String qwt = req.getParams().get(CommonParams.WT); - if(qwt==null) { + if (qwt == null) { QueryResponseWriter qw = req.getCore().getQueryResponseWriter(req); QueryResponseWriter dw = req.getCore().getQueryResponseWriter(applyToWT); - if(qw!=dw) { + if (qw != dw) { apply = false; } - } - else { + } else { apply = applyToWT.equals(qwt); } } - if(apply) { - return new RawTransformer( field, display ); + if (apply) { + return new RawTransformer(field, display); } - + if (field.equals(display)) { // we have to ensure the field is returned return new DocTransformer.NoopFieldTransformer(field); } - return new RenameFieldTransformer( field, display, false ); + return new RenameFieldTransformer(field, display, false); } - - static class RawTransformer extends DocTransformer - { + + static class RawTransformer extends DocTransformer { final String field; final String display; - public RawTransformer( String field, String display ) - { + public RawTransformer(String field, String display) { this.field = field; this.display = display; } @Override - public String getName() - { + public String getName() { return display; } @Override public void transform(SolrDocument doc, int docid) { Object val = doc.remove(field); - if(val==null) { + if (val == null) { return; } - if(val instanceof Collection) { - Collection current = (Collection)val; - ArrayList vals = new ArrayList(); - for(Object v : current) { + if (val instanceof Collection) { + Collection current = (Collection) val; + ArrayList vals = + new ArrayList(); + for (Object v : current) { vals.add(new WriteableStringValue(v)); } doc.setField(display, vals); - } - else { + } else { doc.setField(display, new WriteableStringValue(val)); } } @@ -130,21 +120,20 @@ public String[] getExtraRequestFields() { return new String[] {this.field}; } } - + public static class WriteableStringValue extends WriteableValue { public final Object val; - + public WriteableStringValue(Object val) { this.val = val; } - + @Override public void write(String name, TextWriter writer) throws IOException { String str = null; - if(val instanceof IndexableField) { // delays holding it in memory - str = ((IndexableField)val).stringValue(); - } - else { + if (val instanceof IndexableField) { // delays holding it in memory + str = ((IndexableField) val).stringValue(); + } else { str = val.toString(); } writer.getWriter().write(str); @@ -153,13 +142,11 @@ public void write(String name, TextWriter writer) throws IOException { @Override public Object resolve(Object o, JavaBinCodec codec) throws IOException { ObjectResolver orig = codec.getResolver(); - if(orig != null) { - codec.writeVal(orig.resolve(val, codec)); + if (orig != null) { + JavaBinCodec.writeVal(codec, orig.resolve(val, codec)); return null; } return val.toString(); } } } - - diff --git a/solr/core/src/java/org/apache/solr/response/transform/WriteableGeoJSON.java b/solr/core/src/java/org/apache/solr/response/transform/WriteableGeoJSON.java index f89f5c847f5..107e01279ac 100644 --- a/solr/core/src/java/org/apache/solr/response/transform/WriteableGeoJSON.java +++ b/solr/core/src/java/org/apache/solr/response/transform/WriteableGeoJSON.java @@ -39,7 +39,7 @@ public WriteableGeoJSON(Shape shape, ShapeWriter jsonWriter) { @Override public Object resolve(Object o, JavaBinCodec codec) throws IOException { - codec.writeStr(jsonWriter.toString(shape)); + JavaBinCodec.writeStr(codec, jsonWriter.toString(shape)); return null; // this means we wrote it } diff --git a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchemaFactory.java b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchemaFactory.java index 75bdd3a7fa5..7c3e670f13b 100644 --- a/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchemaFactory.java +++ b/solr/core/src/java/org/apache/solr/schema/ManagedIndexSchemaFactory.java @@ -20,6 +20,9 @@ import java.io.IOException; import java.io.InputStream; import java.lang.invoke.MethodHandles; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import org.apache.commons.io.IOUtils; import org.apache.solr.cloud.ZkController; @@ -34,6 +37,7 @@ import org.apache.solr.core.SolrConfig; import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrResourceLoader; +import org.apache.solr.core.SolrResourceNotFoundException; import org.apache.solr.util.SystemIdResolver; import org.apache.solr.util.plugin.SolrCoreAware; import org.apache.zookeeper.CreateMode; @@ -49,7 +53,8 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol public static final String UPGRADED_SCHEMA_EXTENSION = ".bak"; private static final String SCHEMA_DOT_XML = "schema.xml"; - public static final String DEFAULT_MANAGED_SCHEMA_RESOURCE_NAME = "managed-schema"; + public static final String DEFAULT_MANAGED_SCHEMA_RESOURCE_NAME = "managed-schema.xml"; + public static final String LEGACY_MANAGED_SCHEMA_RESOURCE_NAME = "managed-schema"; public static final String MANAGED_SCHEMA_RESOURCE_NAME = "managedSchemaResourceName"; private boolean isMutable = true; @@ -79,7 +84,6 @@ public void init(NamedList args) { log.error(msg); throw new SolrException(ErrorCode.SERVER_ERROR, msg); } - if (args.size() > 0) { String msg = "Unexpected arg(s): " + args; log.error(msg); @@ -92,6 +96,65 @@ public String getSchemaResourceName(String cdResourceName) { return managedSchemaResourceName; // actually a guess; reality depends on the actual files in the config set :-( } + /** + * Lookup the path to the managed schema, dealing with falling back to the + * legacy managed-schema file, instead of the expected managed-schema.xml file if the legacy file exists. + * + * This method is duplicated in ManagedIndexSchema. + */ + public String lookupZKManagedSchemaPath() { + final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader)loader; + final ZkController zkController = zkLoader.getZkController(); + final SolrZkClient zkClient = zkController.getZkClient(); + String managedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + managedSchemaResourceName; + final String legacyManagedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + ManagedIndexSchemaFactory.LEGACY_MANAGED_SCHEMA_RESOURCE_NAME; + try { + // check if we are using the legacy managed-schema file name. + if (zkClient.exists(legacyManagedSchemaPath, true)){ + log.debug("Legacy managed schema resource {} found - loading legacy managed schema instead of {} file." + , ManagedIndexSchemaFactory.LEGACY_MANAGED_SCHEMA_RESOURCE_NAME, managedSchemaResourceName); + managedSchemaPath = legacyManagedSchemaPath; + } + } catch (KeeperException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + // Restore the interrupted status + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + + } + return managedSchemaPath; + } + + /** + * Lookup the path to the managed schema, dealing with falling back to the + * legacy managed-schema file, instead of the expected managed-schema.xml file if the legacy file exists. + */ + public Path lookupLocalManagedSchemaPath() { + final Path legacyManagedSchemaPath = Paths.get(loader.getConfigPath().toString(), ManagedIndexSchemaFactory.LEGACY_MANAGED_SCHEMA_RESOURCE_NAME); + + Path managedSchemaPath = Paths.get(loader.getConfigPath().toString(), managedSchemaResourceName); + + // check if we are using the legacy managed-schema file name. + if (Files.exists(legacyManagedSchemaPath)){ + log.debug("Legacy managed schema resource {} found - loading legacy managed schema instead of {} file.", ManagedIndexSchemaFactory.LEGACY_MANAGED_SCHEMA_RESOURCE_NAME, managedSchemaResourceName); + managedSchemaPath = legacyManagedSchemaPath; + } + + Path parentPath = managedSchemaPath.getParent(); + if (!Files.isDirectory(parentPath)) { + try { + Files.createDirectories(parentPath); + } + catch (IOException ioe) { + final String msg = "Can't create managed schema directory " + parentPath; + log.error(msg); + throw new SolrException(ErrorCode.SERVER_ERROR, msg); + } + } + + return managedSchemaPath; + } /** * First, try to locate the managed schema file named in the managedSchemaResourceName * param. If the managed schema file exists and is accessible, it is used to instantiate @@ -102,7 +165,7 @@ public String getSchemaResourceName(String cdResourceName) { * * Once the IndexSchema is instantiated, if the managed schema file does not exist, * the instantiated IndexSchema is persisted to the managed schema file named in the - * managedSchemaResourceName param, in the directory given by + * managedSchemaResourceName param, in the directory given by * {@link org.apache.solr.core.SolrResourceLoader#getConfigDir()}, or if configs are * in ZooKeeper, under {@link org.apache.solr.cloud.ZkSolrResourceLoader#getConfigSetZkPath()}. * @@ -126,7 +189,8 @@ public ManagedIndexSchema create(String resourceName, SolrConfig config, ConfigS } else { // ZooKeeper final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader)loader; final SolrZkClient zkClient = zkLoader.getZkController().getZkClient(); - final String managedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + managedSchemaResourceName; + final String managedSchemaPath = lookupZKManagedSchemaPath(); + managedSchemaResourceName = managedSchemaPath.substring(managedSchemaPath.lastIndexOf("/")+1); // not loving this Stat stat = new Stat(); try { // Attempt to load the managed schema @@ -153,7 +217,7 @@ public ManagedIndexSchema create(String resourceName, SolrConfig config, ConfigS schemaInputStream = loader.openResource(resourceName); loadedResource = resourceName; shouldUpgrade = true; - } catch (Exception e) { + } catch (IOException e) { try { // Retry to load the managed schema, in case it was created since the first attempt byte[] data = zkClient.getData(managedSchemaPath, null, stat, true); @@ -195,6 +259,8 @@ private InputStream readSchemaLocally() { InputStream schemaInputStream = null; try { // Attempt to load the managed schema + final Path managedSchemaPath = lookupLocalManagedSchemaPath(); + managedSchemaResourceName = managedSchemaPath.getName(managedSchemaPath.getNameCount()-1).toString(); schemaInputStream = loader.openResource(managedSchemaResourceName); loadedResource = managedSchemaResourceName; warnIfNonManagedSchemaExists(); @@ -219,7 +285,7 @@ private InputStream readSchemaLocally() { } /** - * Return whether a non-managed schema exists, either in local storage or on ZooKeeper. + * Return whether a non-managed schema exists, either in local storage or on ZooKeeper. */ private void warnIfNonManagedSchemaExists() { if ( ! resourceName.equals(managedSchemaResourceName)) { @@ -232,7 +298,7 @@ private void warnIfNonManagedSchemaExists() { exists = zkLoader.getZkController().pathExists(nonManagedSchemaPath); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // Restore the interrupted status - log.warn("", e); // Log as warning and suppress the exception + log.warn("", e); // Log as warning and suppress the exception } catch (KeeperException e) { // log as warning and suppress the exception log.warn("Error checking for the existence of the non-managed schema {}", resourceName, e); @@ -244,7 +310,10 @@ private void warnIfNonManagedSchemaExists() { if (null != nonManagedSchemaInputStream) { exists = true; } - } catch (IOException e) { + } catch (SolrResourceNotFoundException e) { + // This is expected when the non-managed schema does not exist + } catch (IOException e) { + throw new RuntimeException(e); // This is expected when the non-managed schema does not exist } finally { IOUtils.closeQuietly(nonManagedSchemaInputStream); @@ -258,7 +327,7 @@ private void warnIfNonManagedSchemaExists() { } /** - * Persist the managed schema and rename the non-managed schema + * Persist the managed schema and rename the non-managed schema * by appending {@link #UPGRADED_SCHEMA_EXTENSION}. * * Failure to rename the non-managed schema will be logged as a warning, @@ -290,7 +359,7 @@ private void upgradeToManagedSchema() { } else { File upgradedSchemaFile = new File(nonManagedSchemaFile + UPGRADED_SCHEMA_EXTENSION); if (nonManagedSchemaFile.renameTo(upgradedSchemaFile)) { - // Set the resource name to the managed schema so that the CoreAdminHandler returns a findable filename + // Set the resource name to the managed schema so that the CoreAdminHandler returns a findable filename schema.setResourceName(managedSchemaResourceName); log.info("After upgrading to managed schema, renamed the non-managed schema {} to {}" @@ -320,7 +389,7 @@ private File locateConfigFile(String resource) { } /** - * Persist the managed schema to ZooKeeper and rename the non-managed schema + * Persist the managed schema to ZooKeeper and rename the non-managed schema * by appending {@link #UPGRADED_SCHEMA_EXTENSION}. * * Failure to rename the non-managed schema will be logged as a warning, @@ -434,7 +503,7 @@ public void setSchema(ManagedIndexSchema schema) { this.schema = schema; core.setLatestSchema(schema); } - + public boolean isMutable() { return isMutable; } diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java index cc9e1503a85..03bdde24fe0 100644 --- a/solr/core/src/java/org/apache/solr/schema/SchemaManager.java +++ b/solr/core/src/java/org/apache/solr/schema/SchemaManager.java @@ -427,31 +427,33 @@ private ManagedIndexSchema getFreshManagedSchema(SolrCore core) throws IOExcepti KeeperException, InterruptedException { SolrResourceLoader resourceLoader = core.getResourceLoader(); - String name = core.getLatestSchema().getResourceName(); + String schemaResourceName = core.getLatestSchema().getResourceName(); if (resourceLoader instanceof ZkSolrResourceLoader) { final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader)resourceLoader; SolrZkClient zkClient = zkLoader.getZkController().getZkClient(); + String managedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + schemaResourceName; try { - if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + name, true)) { - String backupName = name + ManagedIndexSchemaFactory.UPGRADED_SCHEMA_EXTENSION; + if (!zkClient.exists(managedSchemaPath, true)) { + String backupName = schemaResourceName + ManagedIndexSchemaFactory.UPGRADED_SCHEMA_EXTENSION; if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + backupName, true)) { - log.warn("Unable to retrieve fresh managed schema, neither {} nor {} exist.", name, backupName); + log.warn("Unable to retrieve fresh managed schema, neither {} nor {} exist.", schemaResourceName, backupName); // use current schema return (ManagedIndexSchema) core.getLatestSchema(); } else { - name = backupName; + schemaResourceName = backupName; } } } catch (Exception e) { - log.warn("Unable to retrieve fresh managed schema {}", name, e); + log.warn("Unable to retrieve fresh managed schema {}", schemaResourceName, e); // use current schema return (ManagedIndexSchema) core.getLatestSchema(); } - InputStream in = resourceLoader.openResource(name); + schemaResourceName = managedSchemaPath.substring(managedSchemaPath.lastIndexOf("/")+1); + InputStream in = resourceLoader.openResource(schemaResourceName); if (in instanceof ZkSolrResourceLoader.ZkByteArrayInputStream) { int version = ((ZkSolrResourceLoader.ZkByteArrayInputStream) in).getStat().getVersion(); log.info("managed schema loaded . version : {} ", version); - return new ManagedIndexSchema(core.getSolrConfig(), name, () -> IndexSchemaFactory.getParsedSchema(in, zkLoader, core.getLatestSchema().getResourceName()), true, name, version, + return new ManagedIndexSchema(core.getSolrConfig(), schemaResourceName, () -> IndexSchemaFactory.getParsedSchema(in, zkLoader, core.getLatestSchema().getResourceName()), true, schemaResourceName, version, core.getLatestSchema().getSchemaUpdateLock()); } else { return (ManagedIndexSchema) core.getLatestSchema(); diff --git a/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java b/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java index e20a62c3362..3a4ba52d929 100644 --- a/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java +++ b/solr/core/src/java/org/apache/solr/schema/ZkIndexSchemaReader.java @@ -48,8 +48,8 @@ public ZkIndexSchemaReader(ManagedIndexSchemaFactory managedIndexSchemaFactory, this.managedIndexSchemaFactory = managedIndexSchemaFactory; zkLoader = (ZkSolrResourceLoader)managedIndexSchemaFactory.getResourceLoader(); this.zkClient = zkLoader.getZkController().getZkClient(); - this.managedSchemaPath = zkLoader.getConfigSetZkPath() + "/" + managedIndexSchemaFactory.getManagedSchemaResourceName(); - this.uniqueCoreId = solrCore.getName()+":"+solrCore.getStartNanoTime(); + this.managedSchemaPath = managedIndexSchemaFactory.lookupZKManagedSchemaPath(); + this.uniqueCoreId = solrCore.getName() + ":" + solrCore.getStartNanoTime(); // register a CloseHook for the core this reader is linked to, so that we can de-register the listener solrCore.addCloseHook(new CloseHook() { @@ -172,10 +172,10 @@ void updateSchema(Watcher watcher, int expectedZkVersion) throws KeeperException } long start = System.nanoTime(); String resourceName = managedIndexSchemaFactory.getManagedSchemaResourceName(); - ManagedIndexSchema newSchema = new ManagedIndexSchema - (managedIndexSchemaFactory.getConfig(), resourceName, - () -> IndexSchemaFactory.getParsedSchema(new ByteArrayInputStream(data),zkLoader , resourceName), managedIndexSchemaFactory.isMutable(), - resourceName, stat.getVersion(), oldSchema.getSchemaUpdateLock()); + ManagedIndexSchema newSchema = new ManagedIndexSchema(managedIndexSchemaFactory.getConfig(), resourceName, + () -> IndexSchemaFactory.getParsedSchema(new ByteArrayInputStream(data), zkLoader, resourceName), + managedIndexSchemaFactory.isMutable(), + resourceName, stat.getVersion(), oldSchema.getSchemaUpdateLock()); managedIndexSchemaFactory.setSchema(newSchema); long stop = System.nanoTime(); log.info("Finished refreshing schema in {} ms", TimeUnit.MILLISECONDS.convert(stop - start, TimeUnit.NANOSECONDS)); @@ -198,7 +198,7 @@ public void command() { // force update now as the schema may have changed while our zk session was expired updateSchema(null, -1); } catch (Exception exc) { - log.error("Failed to update managed-schema watcher after session expiration due to: {}", exc); + log.error("Failed to update managed schema watcher after session expiration due to: {}", exc); } } diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java index 8982b6fe323..0965d0c82f5 100644 --- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java @@ -24,14 +24,12 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; -import org.apache.solr.common.util.DataInputInputStream; import org.apache.solr.common.util.FastInputStream; import org.apache.solr.common.util.FastOutputStream; import org.apache.solr.common.util.JavaBinCodec; @@ -42,20 +40,18 @@ import org.slf4j.LoggerFactory; /** - * Log Format: List{Operation, Version, ...} - * ADD, VERSION, DOC - * DELETE, VERSION, ID_BYTES - * DELETE_BY_QUERY, VERSION, String + * Log Format: List{Operation, Version, ...} ADD, VERSION, DOC DELETE, VERSION, ID_BYTES + * DELETE_BY_QUERY, VERSION, String * - * TODO: keep two files, one for [operation, version, id] and the other for the actual - * document data. That way we could throw away document log files more readily - * while retaining the smaller operation log files longer (and we can retrieve - * the stored fields from the latest documents from the index). + *

TODO: keep two files, one for [operation, version, id] and the other for the actual document + * data. That way we could throw away document log files more readily while retaining the smaller + * operation log files longer (and we can retrieve the stored fields from the latest documents from + * the index). * - * This would require keeping all source fields stored of course. + *

This would require keeping all source fields stored of course. * - * This would also allow to not log document data for requests with commit=true - * in them (since we know that if the request succeeds, all docs will be committed) + *

This would also allow to not log document data for requests with commit=true in them (since we + * know that if the request succeeds, all docs will be committed) * * @deprecated since 8.6 */ @@ -65,7 +61,6 @@ public class HdfsTransactionLog extends TransactionLog { private static boolean debug = log.isDebugEnabled(); private static boolean trace = log.isTraceEnabled(); - Path tlogFile; private long finalLogSize; @@ -74,53 +69,65 @@ public class HdfsTransactionLog extends TransactionLog { private volatile boolean isClosed = false; - HdfsTransactionLog(FileSystem fs, Path tlogFile, Collection globalStrings, Integer tlogDfsReplication) { + HdfsTransactionLog( + FileSystem fs, Path tlogFile, Collection globalStrings, Integer tlogDfsReplication) { this(fs, tlogFile, globalStrings, false, tlogDfsReplication); } - HdfsTransactionLog(FileSystem fs, Path tlogFile, Collection globalStrings, boolean openExisting, Integer tlogDfsReplication) { + HdfsTransactionLog( + FileSystem fs, + Path tlogFile, + Collection globalStrings, + boolean openExisting, + Integer tlogDfsReplication) { super(); boolean success = false; this.fs = fs; try { this.tlogFile = tlogFile; - + if (fs.exists(tlogFile) && openExisting) { - FSHDFSUtils.recoverFileLease(fs, tlogFile, fs.getConf(), new CallerInfo(){ + FSHDFSUtils.recoverFileLease( + fs, + tlogFile, + fs.getConf(), + new CallerInfo() { + + @Override + public boolean isCallerClosed() { + return isClosed; + } + }); - @Override - public boolean isCallerClosed() { - return isClosed; - }}); - tlogOutStream = fs.append(tlogFile); } else { fs.delete(tlogFile, false); - - tlogOutStream = fs.create(tlogFile, (short)tlogDfsReplication.intValue()); + + tlogOutStream = fs.create(tlogFile, (short) tlogDfsReplication.intValue()); tlogOutStream.hsync(); } fos = new FastOutputStream(tlogOutStream, new byte[65536], 0); - long start = tlogOutStream.getPos(); + long start = tlogOutStream.getPos(); if (openExisting) { if (start > 0) { readHeader(null); - - // we should already be at the end - // raf.seek(start); - // assert channel.position() == start; - fos.setWritten(start); // reflect that we aren't starting at the beginning - //assert fos.size() == channel.size(); + // we should already be at the end + // raf.seek(start); + + // assert channel.position() == start; + fos.setWritten(start); // reflect that we aren't starting at the beginning + // assert fos.size() == channel.size(); } else { addGlobalStrings(globalStrings); } } else { if (start > 0) { - log.error("New transaction log already exists:{} size={}", tlogFile, tlogOutStream.size()); + log.error( + "New transaction log already exists:{} size={}", tlogFile, tlogOutStream.size()); } addGlobalStrings(globalStrings); @@ -130,7 +137,7 @@ public boolean isCallerClosed() { assert ObjectReleaseTracker.track(this); log.debug("Opening new tlog {}", this); - + } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); } finally { @@ -148,16 +155,16 @@ public boolean isCallerClosed() { public boolean endsWithCommit() throws IOException { ensureFlushed(); long size = getLogSize(); - + // the end of the file should have the end message (added during a commit) plus a 4 byte size - byte[] buf = new byte[ END_MESSAGE.length() ]; + byte[] buf = new byte[END_MESSAGE.length()]; long pos = size - END_MESSAGE.length() - 4; if (pos < 0) return false; - + FSDataFastInputStream dis = new FSDataFastInputStream(fs.open(tlogFile), pos); try { dis.read(buf); - for (int i=0; i header = null; + Map header = null; try { try (LogCodec codec = new LogCodec(resolver)) { - header = (Map) codec.unmarshal(fis); + header = (Map) codec.unmarshal(fis); } - + fis.readInt(); // skip size } finally { if (fis != null && closeFis) { @@ -187,10 +194,10 @@ private void readHeader(FastInputStream fis) throws IOException { // needed to read other records synchronized (this) { - globalStringList = (List)header.get("strings"); + globalStringList = (List) header.get("strings"); globalStringMap = new HashMap<>(globalStringList.size()); - for (int i=0; i= sz) { log.info("Read available inputstream data, opening new inputstream pos={} sz={}", pos, sz); - + fis.close(); initStream(pos); } - + if (pos == 0) { readHeader(fis); - // shouldn't currently happen - header and first record are currently written at the same time + // shouldn't currently happen - header and first record are currently written at the same + // time synchronized (HdfsTransactionLog.this) { if (fis.position() >= getLogSize()) { return null; @@ -434,7 +444,7 @@ public Object next() throws IOException, InterruptedException { } } - Object o = codec.readVal(fis); + Object o = JavaBinCodec.readVal(codec); // skip over record size int size = fis.readInt(); @@ -455,23 +465,29 @@ public void close() { @Override public String toString() { synchronized (HdfsTransactionLog.this) { - return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}"; + return "LogReader{" + + "file=" + + tlogFile + + ", position=" + + fis.position() + + ", end=" + + getLogSize() + + "}"; } } - + @Override public long currentPos() { return fis.position(); } - + @Override public long currentSize() { return getLogSize(); } - } - public class HDFSSortedLogReader extends HDFSLogReader{ + public class HDFSSortedLogReader extends HDFSLogReader { private long startingPos; private boolean inOrder = true; private TreeMap versionToPos; @@ -490,7 +506,7 @@ public Object next() throws IOException, InterruptedException { long pos = startingPos; long lastVersion = Long.MIN_VALUE; - while ( (o = super.next()) != null) { + while ((o = super.next()) != null) { List entry = (List) o; long version = (Long) entry.get(UpdateLog.VERSION_IDX); version = Math.abs(version); @@ -517,17 +533,19 @@ public Object next() throws IOException, InterruptedException { public class HDFSReverseReader extends ReverseReader { FSDataFastInputStream fis; - private LogCodec codec = new LogCodec(resolver) { - @Override - public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) { - // Given that the SolrInputDocument is last in an add record, it's OK to just skip - // reading it completely. - return null; - } - }; + private LogCodec codec = + new LogCodec(resolver) { + @Override + public SolrInputDocument readSolrInputDocument(JavaBinCodec javaBinCodec) { + // Given that the SolrInputDocument is last in an add record, it's OK to just skip + // reading it completely. + return null; + } + }; - int nextLength; // length of the next record (the next one closer to the start of the log file) - long prevPos; // where we started reading from last time (so prevPos - nextLength == start of next record) + int nextLength; // length of the next record (the next one closer to the start of the log file) + long prevPos; // where we started reading from last time (so prevPos - nextLength == start of + // next record) public HDFSReverseReader() throws IOException { incref(); @@ -539,8 +557,8 @@ public HDFSReverseReader() throws IOException { } fis = new FSDataFastInputStream(fs.open(tlogFile), 0); - - if (sz >=4) { + codec.initRead(fis); + if (sz >= 4) { // readHeader(fis); // should not be needed prevPos = sz - 4; fis.seek(prevPos); @@ -548,8 +566,8 @@ public HDFSReverseReader() throws IOException { } } - - /** Returns the next object from the log, or null if none available. + /** + * Returns the next object from the log, or null if none available. * * @return The log record, or null if EOF * @throws IOException If there is a low-level I/O error. @@ -561,10 +579,10 @@ public Object next() throws IOException { int thisLength = nextLength; - long recordStart = prevPos - thisLength; // back up to the beginning of the next record - prevPos = recordStart - 4; // back up 4 more to read the length of the next record + long recordStart = prevPos - thisLength; // back up to the beginning of the next record + prevPos = recordStart - 4; // back up 4 more to read the length of the next record - if (prevPos <= 0) return null; // this record is the header + if (prevPos <= 0) return null; // this record is the header long bufferPos = fis.getBufferPos(); if (prevPos >= bufferPos) { @@ -572,26 +590,31 @@ public Object next() throws IOException { } else { // Position buffer so that this record is at the end. // For small records, this will cause subsequent calls to next() to be within the buffer. - long seekPos = endOfThisRecord - fis.getBufferSize(); - seekPos = Math.min(seekPos, prevPos); // seek to the start of the record if it's larger then the block size. + long seekPos = endOfThisRecord - fis.getBufferSize(); + seekPos = + Math.min( + seekPos, + prevPos); // seek to the start of the record if it's larger then the block size. seekPos = Math.max(seekPos, 0); fis.seek(seekPos); - fis.peek(); // cause buffer to be filled + fis.peek(); // cause buffer to be filled } fis.seek(prevPos); - nextLength = fis.readInt(); // this is the length of the *next* record (i.e. closer to the beginning) + nextLength = + fis.readInt(); // this is the length of the *next* record (i.e. closer to the beginning) // TODO: optionally skip document data - Object o = codec.readVal(fis); + Object o = JavaBinCodec.readVal(codec); - // assert fis.position() == prevPos + 4 + thisLength; // this is only true if we read all the data (and we currently skip reading SolrInputDocument + // assert fis.position() == prevPos + 4 + thisLength; // this is only true if we read all the + // data (and we currently skip reading SolrInputDocument return o; } /* returns the position in the log file of the last record returned by next() */ public long position() { - return prevPos + 4; // skip the length + return prevPos + 4; // skip the length } public void close() { @@ -606,17 +629,19 @@ public void close() { @Override public String toString() { synchronized (HdfsTransactionLog.this) { - return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + getLogSize() + "}"; + return "LogReader{" + + "file=" + + tlogFile + + ", position=" + + fis.position() + + ", end=" + + getLogSize() + + "}"; } } - - } - } - - class FSDataFastInputStream extends FastInputStream { private FSDataInputStream fis; @@ -635,10 +660,12 @@ public int readWrappedStream(byte[] target, int offset, int len) throws IOExcept public void seek(long position) throws IOException { if (position <= readFromStream && position >= getBufferPos()) { // seek within buffer - pos = (int)(position - getBufferPos()); + pos = (int) (position - getBufferPos()); } else { - // long currSize = ch.size(); // not needed - underlying read should handle (unless read never done) - // if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + " on file of size " + currSize + " file=" + ch); + // long currSize = ch.size(); // not needed - underlying read should handle (unless read + // never done) + // if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + " + // on file of size " + currSize + " file=" + ch); readFromStream = position; end = pos = 0; } @@ -658,9 +685,18 @@ public int getBufferSize() { public void close() throws IOException { fis.close(); } - + @Override public String toString() { - return "readFromStream="+readFromStream +" pos="+pos +" end="+end + " bufferPos="+getBufferPos() + " position="+position() ; + return "readFromStream=" + + readFromStream + + " pos=" + + pos + + " end=" + + end + + " bufferPos=" + + getBufferPos() + + " position=" + + position(); } } diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java index 651a0fb0482..b0b49186e43 100644 --- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java @@ -35,71 +35,74 @@ import java.util.Map; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.util.BytesRef; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrInputDocument; -import org.apache.solr.common.util.DataInputInputStream; import org.apache.solr.common.util.FastInputStream; import org.apache.solr.common.util.FastOutputStream; import org.apache.solr.common.util.JavaBinCodec; import org.apache.solr.common.util.ObjectReleaseTracker; +import org.eclipse.jetty.io.RuntimeIOException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Log Format: List{Operation, Version, ...} - * ADD, VERSION, DOC - * DELETE, VERSION, ID_BYTES - * DELETE_BY_QUERY, VERSION, String - * - * TODO: keep two files, one for [operation, version, id] and the other for the actual - * document data. That way we could throw away document log files more readily - * while retaining the smaller operation log files longer (and we can retrieve - * the stored fields from the latest documents from the index). + * Log Format: List{Operation, Version, ...} ADD, VERSION, DOC DELETE, VERSION, ID_BYTES + * DELETE_BY_QUERY, VERSION, String * - * This would require keeping all source fields stored of course. + *

TODO: keep two files, one for [operation, version, id] and the other for the actual document + * data. That way we could throw away document log files more readily while retaining the smaller + * operation log files longer (and we can retrieve the stored fields from the latest documents from + * the index). * - * This would also allow to not log document data for requests with commit=true - * in them (since we know that if the request succeeds, all docs will be committed) + *

This would require keeping all source fields stored of course. * + *

This would also allow to not log document data for requests with commit=true in them (since we + * know that if the request succeeds, all docs will be committed) */ public class TransactionLog implements Closeable { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private boolean debug = log.isDebugEnabled(); private boolean trace = log.isTraceEnabled(); - public final static String END_MESSAGE = "SOLR_TLOG_END"; + public static final String END_MESSAGE = "SOLR_TLOG_END"; long id; File tlogFile; RandomAccessFile raf; FileChannel channel; OutputStream os; - FastOutputStream fos; // all accesses to this stream should be synchronized on "this" (The TransactionLog) + FastOutputStream + fos; // all accesses to this stream should be synchronized on "this" (The TransactionLog) int numRecords; boolean isBuffer; - protected volatile boolean deleteOnClose = true; // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery) + protected volatile boolean deleteOnClose = + true; // we can delete old tlogs since they are currently only used for real-time-get (and in + // the future, recovery) AtomicInteger refcount = new AtomicInteger(1); Map globalStringMap = new HashMap<>(); List globalStringList = new ArrayList<>(); // write a BytesRef as a byte array - static final JavaBinCodec.ObjectResolver resolver = new JavaBinCodec.ObjectResolver() { - @Override - public Object resolve(Object o, JavaBinCodec codec) throws IOException { - if (o instanceof BytesRef) { - BytesRef br = (BytesRef) o; - codec.writeByteArray(br.bytes, br.offset, br.length); - return null; - } - // Fallback: we have no idea how to serialize this. Be noisy to prevent insidious bugs - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, - "TransactionLog doesn't know how to serialize " + o.getClass() + "; try implementing ObjectResolver?"); - } - }; + static final JavaBinCodec.ObjectResolver resolver = + new JavaBinCodec.ObjectResolver() { + @Override + public Object resolve(Object o, JavaBinCodec codec) throws IOException { + if (o instanceof BytesRef) { + BytesRef br = (BytesRef) o; + JavaBinCodec.writeByteArray(codec, br.bytes, br.offset, br.length); + return null; + } + // Fallback: we have no idea how to serialize this. Be noisy to prevent insidious bugs + throw new SolrException( + SolrException.ErrorCode.SERVER_ERROR, + "TransactionLog doesn't know how to serialize " + + o.getClass() + + "; try implementing ObjectResolver?"); + } + }; public class LogCodec extends JavaBinCodec { @@ -108,50 +111,52 @@ public LogCodec(JavaBinCodec.ObjectResolver resolver) { } @Override - public void writeExternString(CharSequence s) throws IOException { - if (s == null) { - writeTag(NULL); + public void writeExternString(CharSequence str) throws IOException { + if (str == null) { + writeTag(this, NULL); return; } - // no need to synchronize globalStringMap - it's only updated before the first record is written to the log - Integer idx = globalStringMap.get(s.toString()); + // no need to synchronize globalStringMap - it's only updated before the first record is + // written to the log + Integer idx = globalStringMap.get(str.toString()); if (idx == null) { // write a normal string - writeStr(s); + writeStr(this, str); } else { // write the extern string - writeTag(EXTERN_STRING, idx); + writeTag(this, EXTERN_STRING, idx); } } @Override - public CharSequence readExternString(DataInputInputStream fis) throws IOException { - int idx = readSize(fis); - if (idx != 0) {// idx != 0 is the index of the extern string - // no need to synchronize globalStringList - it's only updated before the first record is written to the log + public CharSequence readExternString(JavaBinCodec javaBinCodec) throws IOException { + int idx = readSize(javaBinCodec); + if (idx != 0) { // idx != 0 is the index of the extern string + // no need to synchronize globalStringList - it's only updated before the first record is + // written to the log return globalStringList.get(idx - 1); - } else {// idx == 0 means it has a string value + } else { // idx == 0 means it has a string value // this shouldn't happen with this codec subclass. throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Corrupt transaction log"); } } @Override - protected Object readObject(DataInputInputStream dis) throws IOException { + protected Object readObject() throws IOException { if (UUID == tagByte) { - return new java.util.UUID(dis.readLong(), dis.readLong()); + return new java.util.UUID(readLong(this), readLong(this)); } - return super.readObject(dis); + return super.readObject(); } @Override public boolean writePrimitive(Object val) throws IOException { if (val instanceof java.util.UUID) { java.util.UUID uuid = (java.util.UUID) val; - daos.writeByte(UUID); - daos.writeLong(uuid.getMostSignificantBits()); - daos.writeLong(uuid.getLeastSignificantBits()); + writeByteToOS(this, UUID); + writeLongToOS(this, uuid.getMostSignificantBits()); + writeLongToOS(this, uuid.getLeastSignificantBits()); return true; } return super.writePrimitive(val); @@ -166,8 +171,12 @@ public boolean writePrimitive(Object val) throws IOException { boolean success = false; try { if (debug) { - log.debug("New TransactionLog file= {}, exists={}, size={} openExisting={}" - , tlogFile, tlogFile.exists(), tlogFile.length(), openExisting); + log.debug( + "New TransactionLog file= {}, exists={}, size={} openExisting={}", + tlogFile, + tlogFile.exists(), + tlogFile.length(), + openExisting); } // Parse tlog id from the filename @@ -187,7 +196,7 @@ public boolean writePrimitive(Object val) throws IOException { readHeader(null); raf.seek(start); assert channel.position() == start; - fos.setWritten(start); // reflect that we aren't starting at the beginning + fos.setWritten(start); // reflect that we aren't starting at the beginning assert fos.size() == channel.size(); } else { addGlobalStrings(globalStrings); @@ -222,11 +231,11 @@ public boolean writePrimitive(Object val) throws IOException { } // for subclasses - protected TransactionLog() { - } + protected TransactionLog() {} - /** Returns the number of records in the log (currently includes the header and an optional commit). - * Note: currently returns 0 for reopened existing log files. + /** + * Returns the number of records in the log (currently includes the header and an optional + * commit). Note: currently returns 0 for reopened existing log files. */ public int numRecords() { synchronized (this) { @@ -245,7 +254,8 @@ public boolean endsWithCommit() throws IOException { byte[] buf = new byte[END_MESSAGE.length()]; long pos = size - END_MESSAGE.length() - 4; if (pos < 0) return false; - @SuppressWarnings("resource") final ChannelFastInputStream is = new ChannelFastInputStream(channel, pos); + @SuppressWarnings("resource") + final ChannelFastInputStream is = new ChannelFastInputStream(channel, pos); is.read(buf); for (int i = 0; i < buf.length; i++) { if (buf[i] != END_MESSAGE.charAt(i)) return false; @@ -254,24 +264,26 @@ public boolean endsWithCommit() throws IOException { } public long writeData(Object o) { - @SuppressWarnings("resource") final LogCodec codec = new LogCodec(resolver); + @SuppressWarnings("resource") + final LogCodec codec = new LogCodec(resolver); try { - long pos = fos.size(); // if we had flushed, this should be equal to channel.position() + long pos = fos.size(); // if we had flushed, this should be equal to channel.position() codec.init(fos); - codec.writeVal(o); + JavaBinCodec.writeVal(codec, o); return pos; } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); } } - @SuppressWarnings({"unchecked"}) private void readHeader(FastInputStream fis) throws IOException { // read existing header fis = fis != null ? fis : new ChannelFastInputStream(channel, 0); - @SuppressWarnings("resource") final LogCodec codec = new LogCodec(resolver); - Map header = (Map) codec.unmarshal(fis); + @SuppressWarnings("resource") + final LogCodec codec = new LogCodec(resolver); + codec.initRead(fis); + Map header = (Map) JavaBinCodec.readVal(codec); fis.readInt(); // skip size @@ -294,7 +306,7 @@ protected void addGlobalStrings(Collection strings) { if (origSize > 0) { idx = globalStringMap.get(s); } - if (idx != null) continue; // already in list + if (idx != null) continue; // already in list globalStringList.add(s); globalStringMap.put(s, globalStringList.size()); } @@ -314,7 +326,7 @@ protected void writeLogHeader(LogCodec codec) throws IOException { Map header = new LinkedHashMap<>(); header.put("SOLR_TLOG", 1); // a magic string + version number header.put("strings", globalStringList); - codec.marshal(header, fos); + codec.marshal(header, fos, true); endRecord(pos); } @@ -332,7 +344,7 @@ protected void checkWriteHeader(LogCodec codec, SolrInputDocument optional) thro if (fos.size() != 0) return; synchronized (this) { - if (fos.size() != 0) return; // check again while synchronized + if (fos.size() != 0) return; // check again while synchronized if (optional != null) { addGlobalStrings(optional.getFieldNames()); } @@ -343,13 +355,12 @@ protected void checkWriteHeader(LogCodec codec, SolrInputDocument optional) thro int lastAddSize; /** - * Writes an add update command to the transaction log. This is not applicable for - * in-place updates; use {@link #write(AddUpdateCommand, long)}. - * (The previous pointer (applicable for in-place updates) is set to -1 while writing - * the command to the transaction log.) + * Writes an add update command to the transaction log. This is not applicable for in-place + * updates; use {@link #write(AddUpdateCommand, long)}. (The previous pointer (applicable for + * in-place updates) is set to -1 while writing the command to the transaction log.) + * * @param cmd The add update command to be written * @return Returns the position pointer of the written update command - * * @see #write(AddUpdateCommand, long) */ public long write(AddUpdateCommand cmd) { @@ -357,11 +368,12 @@ public long write(AddUpdateCommand cmd) { } /** - * Writes an add update command to the transaction log. This should be called only for - * writing in-place updates, or else pass -1 as the prevPointer. - * @param cmd The add update command to be written - * @param prevPointer The pointer in the transaction log which this update depends - * on (applicable for in-place updates) + * Writes an add update command to the transaction log. This should be called only for writing + * in-place updates, or else pass -1 as the prevPointer. + * + * @param cmd The add update command to be written + * @param prevPointer The pointer in the transaction log which this update depends on (applicable + * for in-place updates) * @return Returns the position pointer of the written update command */ public long write(AddUpdateCommand cmd, long prevPointer) { @@ -374,36 +386,36 @@ public long write(AddUpdateCommand cmd, long prevPointer) { checkWriteHeader(codec, sdoc); // adaptive buffer sizing - int bufSize = lastAddSize; // unsynchronized access of lastAddSize should be fine + int bufSize = lastAddSize; // unsynchronized access of lastAddSize should be fine // at least 256 bytes and at most 1 MB bufSize = Math.min(1024 * 1024, Math.max(256, bufSize + (bufSize >> 3) + 256)); MemOutputStream out = new MemOutputStream(new byte[bufSize]); codec.init(out); if (cmd.isInPlaceUpdate()) { - codec.writeTag(JavaBinCodec.ARR, 5); - codec.writeInt(UpdateLog.UPDATE_INPLACE); // should just take one byte - codec.writeLong(cmd.getVersion()); - codec.writeLong(prevPointer); - codec.writeLong(cmd.prevVersion); + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, 5); + JavaBinCodec.writeInt(codec, UpdateLog.UPDATE_INPLACE); // should just take one byte + JavaBinCodec.writeLong(codec, cmd.getVersion()); + JavaBinCodec.writeLong(codec, prevPointer); + JavaBinCodec.writeLong(codec, cmd.prevVersion); codec.writeSolrInputDocument(cmd.getSolrInputDocument()); } else { - codec.writeTag(JavaBinCodec.ARR, 3); - codec.writeInt(UpdateLog.ADD); // should just take one byte - codec.writeLong(cmd.getVersion()); + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, 3); + JavaBinCodec.writeInt(codec, UpdateLog.ADD); // should just take one byte + JavaBinCodec.writeLong(codec, cmd.getVersion()); codec.writeSolrInputDocument(cmd.getSolrInputDocument()); } lastAddSize = (int) out.size(); synchronized (this) { - long pos = fos.size(); // if we had flushed, this should be equal to channel.position() + long pos = fos.size(); // if we had flushed, this should be equal to channel.position() assert pos != 0; /*** - System.out.println("###writing at " + pos + " fos.size()=" + fos.size() + " raf.length()=" + raf.length()); - if (pos != fos.size()) { - throw new RuntimeException("ERROR" + "###writing at " + pos + " fos.size()=" + fos.size() + " raf.length()=" + raf.length()); - } + * System.out.println("###writing at " + pos + " fos.size()=" + fos.size() + " raf.length()=" + raf.length()); + * if (pos != fos.size()) { + * throw new RuntimeException("ERROR" + "###writing at " + pos + " fos.size()=" + fos.size() + " raf.length()=" + raf.length()); + * } ***/ out.writeAll(fos); @@ -428,13 +440,13 @@ public long writeDelete(DeleteUpdateCommand cmd) { MemOutputStream out = new MemOutputStream(new byte[20 + br.length]); codec.init(out); - codec.writeTag(JavaBinCodec.ARR, 3); - codec.writeInt(UpdateLog.DELETE); // should just take one byte - codec.writeLong(cmd.getVersion()); - codec.writeByteArray(br.bytes, br.offset, br.length); + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, 3); + JavaBinCodec.writeInt(codec, UpdateLog.DELETE); // should just take one byte + JavaBinCodec.writeLong(codec, cmd.getVersion()); + JavaBinCodec.writeByteArray(codec, br.bytes, br.offset, br.length); synchronized (this) { - long pos = fos.size(); // if we had flushed, this should be equal to channel.position() + long pos = fos.size(); // if we had flushed, this should be equal to channel.position() assert pos != 0; out.writeAll(fos); endRecord(pos); @@ -445,7 +457,6 @@ public long writeDelete(DeleteUpdateCommand cmd) { } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); } - } public long writeDeleteByQuery(DeleteUpdateCommand cmd) { @@ -455,13 +466,13 @@ public long writeDeleteByQuery(DeleteUpdateCommand cmd) { MemOutputStream out = new MemOutputStream(new byte[20 + (cmd.query.length())]); codec.init(out); - codec.writeTag(JavaBinCodec.ARR, 3); - codec.writeInt(UpdateLog.DELETE_BY_QUERY); // should just take one byte - codec.writeLong(cmd.getVersion()); - codec.writeStr(cmd.query); + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, 3); + JavaBinCodec.writeInt(codec, UpdateLog.DELETE_BY_QUERY); // should just take one byte + JavaBinCodec.writeLong(codec, cmd.getVersion()); + JavaBinCodec.writeStr(codec, cmd.query); synchronized (this) { - long pos = fos.size(); // if we had flushed, this should be equal to channel.position() + long pos = fos.size(); // if we had flushed, this should be equal to channel.position() out.writeAll(fos); endRecord(pos); // fos.flushBuffer(); // flush later @@ -470,29 +481,28 @@ public long writeDeleteByQuery(DeleteUpdateCommand cmd) { } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); } - } - public long writeCommit(CommitUpdateCommand cmd) { LogCodec codec = new LogCodec(resolver); synchronized (this) { try { - long pos = fos.size(); // if we had flushed, this should be equal to channel.position() + long pos = fos.size(); // if we had flushed, this should be equal to channel.position() if (pos == 0) { writeLogHeader(codec); pos = fos.size(); } codec.init(fos); - codec.writeTag(JavaBinCodec.ARR, 3); - codec.writeInt(UpdateLog.COMMIT); // should just take one byte - codec.writeLong(cmd.getVersion()); - codec.writeStr(END_MESSAGE); // ensure these bytes are (almost) last in the file + JavaBinCodec.writeTag(codec, JavaBinCodec.ARR, 3); + JavaBinCodec.writeInt(codec, UpdateLog.COMMIT); // should just take one byte + JavaBinCodec.writeLong(codec, cmd.getVersion()); + JavaBinCodec.writeStr( + codec, END_MESSAGE); // ensure these bytes are (almost) last in the file endRecord(pos); - fos.flush(); // flush since this will be the last record in a log fill + fos.flush(); // flush since this will be the last record in a log fill assert fos.size() == channel.size(); return pos; @@ -502,7 +512,6 @@ public long writeCommit(CommitUpdateCommand cmd) { } } - /* This method is thread safe */ public Object lookup(long pos) { @@ -516,16 +525,17 @@ public Object lookup(long pos) { // TODO: optimize this by keeping track of what we have flushed up to fos.flushBuffer(); /*** - System.out.println("###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos); - if (fos.size() != raf.length() || pos >= fos.size() ) { - throw new RuntimeException("ERROR" + "###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos); - } + * System.out.println("###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos); + * if (fos.size() != raf.length() || pos >= fos.size() ) { + * throw new RuntimeException("ERROR" + "###flushBuffer to " + fos.size() + " raf.length()=" + raf.length() + " pos="+pos); + * } ***/ } ChannelFastInputStream fis = new ChannelFastInputStream(channel, pos); try (LogCodec codec = new LogCodec(resolver)) { - return codec.readVal(fis); + codec.init(fis); + return JavaBinCodec.readVal(codec); } } catch (IOException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e); @@ -535,7 +545,8 @@ public Object lookup(long pos) { public void incref() { int result = refcount.incrementAndGet(); if (result <= 1) { - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "incref on a closed log: " + this); + throw new SolrException( + SolrException.ErrorCode.SERVER_ERROR, "incref on a closed log: " + this); } } @@ -556,10 +567,11 @@ public long position() { } } - /** Move to a read-only state, closing and releasing resources while keeping the log available for reads */ - public void closeOutput() { - - } + /** + * Move to a read-only state, closing and releasing resources while keeping the log available for + * reads + */ + public void closeOutput() {} public void finish(UpdateLog.SyncLevel syncLevel) { if (syncLevel == UpdateLog.SyncLevel.NONE) return; @@ -626,16 +638,14 @@ public long getLogSize() { return 0; } - /** - * @return the FastOutputStream size - */ + /** @return the FastOutputStream size */ public synchronized long getLogSizeFromStream() { return fos.size(); } - /** Returns a reader that can be used while a log is still in use. - * Currently only *one* LogReader may be outstanding, and that log may only - * be used from a single thread. + /** + * Returns a reader that can be used while a log is still in use. Currently only *one* LogReader + * may be outstanding, and that log may only be used from a single thread. */ public LogReader getReader(long startingPos) { return new LogReader(startingPos); @@ -657,12 +667,18 @@ public class LogReader { public LogReader(long startingPos) { incref(); fis = new ChannelFastInputStream(channel, startingPos); + try { + codec.init(fis); + } catch (IOException e) { + throw new RuntimeIOException(e); + } } // for classes that extend protected LogReader() {} - /** Returns the next object from the log, or null if none available. + /** + * Returns the next object from the log, or null if none available. * * @return The log record, or null if EOF * @throws IOException If there is a low-level I/O error. @@ -685,7 +701,8 @@ public Object next() throws IOException, InterruptedException { if (pos == 0) { readHeader(fis); - // shouldn't currently happen - header and first record are currently written at the same time + // shouldn't currently happen - header and first record are currently written at the same + // time synchronized (TransactionLog.this) { if (fis.position() >= fos.size()) { return null; @@ -694,7 +711,7 @@ public Object next() throws IOException, InterruptedException { } } - Object o = codec.readVal(fis); + Object o = JavaBinCodec.readVal(codec); // skip over record size int size = fis.readInt(); @@ -710,7 +727,14 @@ public void close() { @Override public String toString() { synchronized (TransactionLog.this) { - return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}"; + return "LogReader{" + + "file=" + + tlogFile + + ", position=" + + fis.position() + + ", end=" + + fos.size() + + "}"; } } @@ -725,7 +749,6 @@ public long currentPos() { public long currentSize() throws IOException { return channel.size(); } - } public class SortedLogReader extends LogReader { @@ -774,7 +797,8 @@ public Object next() throws IOException, InterruptedException { public abstract class ReverseReader { - /** Returns the next object from the log, or null if none available. + /** + * Returns the next object from the log, or null if none available. * * @return The log record, or null if EOF * @throws IOException If there is a low-level I/O error. @@ -788,22 +812,23 @@ public abstract class ReverseReader { @Override public abstract String toString(); - } public class FSReverseReader extends ReverseReader { ChannelFastInputStream fis; - private LogCodec codec = new LogCodec(resolver) { - @Override - public SolrInputDocument readSolrInputDocument(DataInputInputStream dis) { - // Given that the SolrInputDocument is last in an add record, it's OK to just skip - // reading it completely. - return null; - } - }; + private LogCodec codec = + new LogCodec(resolver) { + @Override + public SolrInputDocument readSolrInputDocument(JavaBinCodec javaBinCodec) { + // Given that the SolrInputDocument is last in an add record, it's OK to just skip + // reading it completely. + return null; + } + }; - int nextLength; // length of the next record (the next one closer to the start of the log file) - long prevPos; // where we started reading from last time (so prevPos - nextLength == start of next record) + int nextLength; // length of the next record (the next one closer to the start of the log file) + long prevPos; // where we started reading from last time (so prevPos - nextLength == start of + // next record) public FSReverseReader() throws IOException { incref(); @@ -816,6 +841,7 @@ public FSReverseReader() throws IOException { } fis = new ChannelFastInputStream(channel, 0); + codec.init(fis); if (sz >= 4) { // readHeader(fis); // should not be needed prevPos = sz - 4; @@ -824,7 +850,8 @@ public FSReverseReader() throws IOException { } } - /** Returns the next object from the log, or null if none available. + /** + * Returns the next object from the log, or null if none available. * * @return The log record, or null if EOF * @throws IOException If there is a low-level I/O error. @@ -836,10 +863,10 @@ public Object next() throws IOException { int thisLength = nextLength; - long recordStart = prevPos - thisLength; // back up to the beginning of the next record - prevPos = recordStart - 4; // back up 4 more to read the length of the next record + long recordStart = prevPos - thisLength; // back up to the beginning of the next record + prevPos = recordStart - 4; // back up 4 more to read the length of the next record - if (prevPos <= 0) return null; // this record is the header + if (prevPos <= 0) return null; // this record is the header long bufferPos = fis.getBufferPos(); if (prevPos >= bufferPos) { @@ -848,26 +875,31 @@ public Object next() throws IOException { // Position buffer so that this record is at the end. // For small records, this will cause subsequent calls to next() to be within the buffer. long seekPos = endOfThisRecord - fis.getBufferSize(); - seekPos = Math.min(seekPos, prevPos); // seek to the start of the record if it's larger then the block size. + seekPos = + Math.min( + seekPos, + prevPos); // seek to the start of the record if it's larger then the block size. seekPos = Math.max(seekPos, 0); fis.seek(seekPos); - fis.peek(); // cause buffer to be filled + fis.peek(); // cause buffer to be filled } fis.seek(prevPos); - nextLength = fis.readInt(); // this is the length of the *next* record (i.e. closer to the beginning) + nextLength = + fis.readInt(); // this is the length of the *next* record (i.e. closer to the beginning) // TODO: optionally skip document data - Object o = codec.readVal(fis); + Object o = JavaBinCodec.readVal(codec); - // assert fis.position() == prevPos + 4 + thisLength; // this is only true if we read all the data (and we currently skip reading SolrInputDocument + // assert fis.position() == prevPos + 4 + thisLength; // this is only true if we read all the + // data (and we currently skip reading SolrInputDocument return o; } /* returns the position in the log file of the last record returned by next() */ public long position() { - return prevPos + 4; // skip the length + return prevPos + 4; // skip the length } public void close() { @@ -877,11 +909,16 @@ public void close() { @Override public String toString() { synchronized (TransactionLog.this) { - return "LogReader{" + "file=" + tlogFile + ", position=" + fis.position() + ", end=" + fos.size() + "}"; + return "LogReader{" + + "file=" + + tlogFile + + ", position=" + + fis.position() + + ", end=" + + fos.size() + + "}"; } } - - } static class ChannelFastInputStream extends FastInputStream { @@ -906,15 +943,17 @@ public void seek(long position) throws IOException { // seek within buffer pos = (int) (position - getBufferPos()); } else { - // long currSize = ch.size(); // not needed - underlying read should handle (unless read never done) - // if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + " on file of size " + currSize + " file=" + ch); + // long currSize = ch.size(); // not needed - underlying read should handle (unless read + // never done) + // if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + + // " on file of size " + currSize + " file=" + ch); readFromStream = position; end = pos = 0; } assert position() == position; } - /** where is the start of the buffer relative to the whole file */ + /** where is the start of the buffer relative to the whole file */ public long getBufferPos() { return readFromStream - end; } @@ -930,9 +969,16 @@ public void close() throws IOException { @Override public String toString() { - return "readFromStream=" + readFromStream + " pos=" + pos + " end=" + end + " bufferPos=" + getBufferPos() + " position=" + position(); + return "readFromStream=" + + readFromStream + + " pos=" + + pos + + " end=" + + end + + " bufferPos=" + + getBufferPos() + + " position=" + + position(); } } } - - diff --git a/solr/core/src/java/org/apache/solr/util/ExportTool.java b/solr/core/src/java/org/apache/solr/util/ExportTool.java index 9767a92f093..dc8b4a61267 100644 --- a/solr/core/src/java/org/apache/solr/util/ExportTool.java +++ b/solr/core/src/java/org/apache/solr/util/ExportTool.java @@ -17,6 +17,13 @@ package org.apache.solr.util; +import static org.apache.solr.common.params.CommonParams.FL; +import static org.apache.solr.common.params.CommonParams.JAVABIN; +import static org.apache.solr.common.params.CommonParams.Q; +import static org.apache.solr.common.params.CommonParams.SORT; +import static org.apache.solr.common.util.JavaBinCodec.SOLRINPUTDOC; + +import com.google.common.collect.ImmutableSet; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; @@ -46,8 +53,6 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.zip.GZIPOutputStream; - -import com.google.common.collect.ImmutableSet; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.lucene.util.SuppressForbidden; @@ -77,12 +82,6 @@ import org.noggit.CharArr; import org.noggit.JSONWriter; -import static org.apache.solr.common.params.CommonParams.FL; -import static org.apache.solr.common.params.CommonParams.JAVABIN; -import static org.apache.solr.common.params.CommonParams.Q; -import static org.apache.solr.common.params.CommonParams.SORT; -import static org.apache.solr.common.util.JavaBinCodec.SOLRINPUTDOC; - public class ExportTool extends SolrCLI.ToolBase { @Override public String getName() { @@ -94,7 +93,7 @@ public Option[] getOptions() { return OPTIONS; } - public static abstract class Info { + public abstract static class Info { String baseurl; String format; String query; @@ -109,11 +108,9 @@ public static abstract class Info { CloudSolrClient solrClient; DocsSink sink; - public Info(String url) { setUrl(url); setOutFormat(null, "jsonl"); - } public void setUrl(String url) { @@ -137,11 +134,8 @@ public void setOutFormat(String out, String format) { this.out = out; if (this.out == null) { - this.out = JAVABIN.equals(format) ? - coll + ".javabin" : - coll + ".json"; + this.out = JAVABIN.equals(format) ? coll + ".javabin" : coll + ".json"; } - } DocsSink getSink() { @@ -152,8 +146,12 @@ DocsSink getSink() { void fetchUniqueKey() throws SolrServerException, IOException { solrClient = new CloudSolrClient.Builder(Collections.singletonList(baseurl)).build(); - NamedList response = solrClient.request(new GenericSolrRequest(SolrRequest.METHOD.GET, "/schema/uniquekey", - new MapSolrParams(Collections.singletonMap("collection", coll)))); + NamedList response = + solrClient.request( + new GenericSolrRequest( + SolrRequest.METHOD.GET, + "/schema/uniquekey", + new MapSolrParams(Collections.singletonMap("collection", coll)))); uniqueKey = (String) response.get("uniqueKey"); } @@ -169,12 +167,9 @@ public void streamSolrDocument(SolrDocument doc) { } @Override - public void streamDocListInfo(long numFound, long start, Float maxScore) { - - } + public void streamDocListInfo(long numFound, long start, Float maxScore) {} }; } - } static Set formats = ImmutableSet.of(JAVABIN, "jsonl"); @@ -191,11 +186,11 @@ protected void runImpl(CommandLine cli) throws Exception { info.exportDocs(); } - static abstract class DocsSink { + abstract static class DocsSink { Info info; OutputStream fos; - abstract void start() throws IOException ; + abstract void start() throws IOException; @SuppressForbidden(reason = "Command line tool prints out to console") void accept(SolrDocument document) throws IOException { @@ -204,45 +199,43 @@ void accept(SolrDocument document) throws IOException { if (count % 100000 == 0) { System.out.println("\nDOCS: " + count); } - - } - void end() throws IOException { - } + void end() throws IOException {} } private static final Option[] OPTIONS = { - Option.builder("url") - .hasArg() - .required() - .desc("Address of the collection, example http://localhost:8983/solr/gettingstarted.") - .build(), - Option.builder("out") - .hasArg() - .required(false) - .desc("File name, defaults to 'collection-name.'.") - .build(), - Option.builder("format") - .hasArg() - .required(false) - .desc("Output format for exported docs (json or javabin), defaulting to json. File extension would be .json.") - .build(), - Option.builder("limit") - .hasArg() - .required(false) - .desc("Maximum number of docs to download. Default is 100, use -1 for all docs.") - .build(), - Option.builder("query") - .hasArg() - .required(false) - .desc("A custom query, default is '*:*'.") - .build(), - Option.builder("fields") - .hasArg() - .required(false) - .desc("Comma separated list of fields to export. By default all fields are fetched.") - .build() + Option.builder("url") + .hasArg() + .required() + .desc("Address of the collection, example http://localhost:8983/solr/gettingstarted.") + .build(), + Option.builder("out") + .hasArg() + .required(false) + .desc("File name, defaults to 'collection-name.'.") + .build(), + Option.builder("format") + .hasArg() + .required(false) + .desc( + "Output format for exported docs (json or javabin), defaulting to json. File extension would be .json.") + .build(), + Option.builder("limit") + .hasArg() + .required(false) + .desc("Maximum number of docs to download. Default is 100, use -1 for all docs.") + .build(), + Option.builder("query") + .hasArg() + .required(false) + .desc("A custom query, default is '*:*'.") + .build(), + Option.builder("fields") + .hasArg() + .required(false) + .desc("Comma separated list of fields to export. By default all fields are fetched.") + .build() }; static class JsonSink extends DocsSink { @@ -257,12 +250,12 @@ public JsonSink(Info info) { @Override public void start() throws IOException { fos = new FileOutputStream(info.out); - if(info.out.endsWith(".json.gz") || info.out.endsWith(".json.")) fos = new GZIPOutputStream(fos); + if (info.out.endsWith(".json.gz") || info.out.endsWith(".json.")) + fos = new GZIPOutputStream(fos); if (info.bufferSize > 0) { fos = new BufferedOutputStream(fos, info.bufferSize); } writer = new OutputStreamWriter(fos, StandardCharsets.UTF_8); - } @Override @@ -276,24 +269,25 @@ public void end() throws IOException { public synchronized void accept(SolrDocument doc) throws IOException { charArr.reset(); Map m = new LinkedHashMap<>(doc.size()); - doc.forEach((s, field) -> { - if (s.equals("_version_") || s.equals("_roor_")) return; - if (field instanceof List) { - if (((List) field).size() == 1) { - field = ((List) field).get(0); - } - } - field = constructDateStr(field); - if (field instanceof List) { - List list = (List) field; - if (hasdate(list)) { - ArrayList listCopy = new ArrayList<>(list.size()); - for (Object o : list) listCopy.add(constructDateStr(o)); - field = listCopy; - } - } - m.put(s, field); - }); + doc.forEach( + (s, field) -> { + if (s.equals("_version_") || s.equals("_roor_")) return; + if (field instanceof List) { + if (((List) field).size() == 1) { + field = ((List) field).get(0); + } + } + field = constructDateStr(field); + if (field instanceof List) { + List list = (List) field; + if (hasdate(list)) { + ArrayList listCopy = new ArrayList<>(list.size()); + for (Object o : list) listCopy.add(constructDateStr(o)); + field = listCopy; + } + } + m.put(s, field); + }); jsonWriter.write(m); writer.write(charArr.getArray(), charArr.getStart(), charArr.getEnd()); writer.append('\n'); @@ -303,7 +297,7 @@ public synchronized void accept(SolrDocument doc) throws IOException { private boolean hasdate(List list) { boolean hasDate = false; for (Object o : list) { - if(o instanceof Date){ + if (o instanceof Date) { hasDate = true; break; } @@ -313,7 +307,8 @@ private boolean hasdate(List list) { private Object constructDateStr(Object field) { if (field instanceof Date) { - field = DateTimeFormatter.ISO_INSTANT.format(Instant.ofEpochMilli(((Date) field).getTime())); + field = + DateTimeFormatter.ISO_INSTANT.format(Instant.ofEpochMilli(((Date) field).getTime())); } return field; } @@ -329,46 +324,47 @@ public JavabinSink(Info info) { @Override public void start() throws IOException { fos = new FileOutputStream(info.out); - if(info.out.endsWith(".json.gz") || info.out.endsWith(".json.")) fos = new GZIPOutputStream(fos); + if (info.out.endsWith(".json.gz") || info.out.endsWith(".json.")) + fos = new GZIPOutputStream(fos); if (info.bufferSize > 0) { fos = new BufferedOutputStream(fos, info.bufferSize); } - codec = new JavaBinCodec(fos, null); - codec.writeTag(JavaBinCodec.NAMED_LST, 2); - codec.writeStr("params"); - codec.writeNamedList(new NamedList<>()); - codec.writeStr("docs"); - codec.writeTag(JavaBinCodec.ITERATOR); - + codec = new JavaBinCodec(fos, null, false); + JavaBinCodec.writeTag(codec, JavaBinCodec.NAMED_LST, 2); + JavaBinCodec.writeStr(codec, "params"); + JavaBinCodec.writeNamedList(codec, new NamedList<>()); + JavaBinCodec.writeStr(codec, "docs"); + JavaBinCodec.writeTag(codec, JavaBinCodec.ITERATOR); } @Override public void end() throws IOException { - codec.writeTag(JavaBinCodec.END); + JavaBinCodec.writeTag(codec, JavaBinCodec.END); codec.close(); fos.flush(); fos.close(); - } - private BiConsumer bic= new BiConsumer<>() { - @Override - public void accept(String s, Object o) { - try { - if (s.equals("_version_") || s.equals("_root_")) return; - codec.writeExternString(s); - codec.writeVal(o); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }; + + private final BiConsumer bic = + new BiConsumer<>() { + @Override + public void accept(String s, Object o) { + try { + if (s.equals("_version_") || s.equals("_root_")) return; + codec.writeExternString(s); + JavaBinCodec.writeVal(codec, o); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; @Override public synchronized void accept(SolrDocument doc) throws IOException { int sz = doc.size(); - if(doc.containsKey("_version_")) sz--; - if(doc.containsKey("_root_")) sz--; - codec.writeTag(SOLRINPUTDOC, sz); + if (doc.containsKey("_version_")) sz--; + if (doc.containsKey("_root_")) sz--; + JavaBinCodec.writeTag(codec, SOLRINPUTDOC, sz); codec.writeFloat(1f); // document boost doc.forEach(bic); super.accept(doc); @@ -381,15 +377,14 @@ static class MultiThreadedRunner extends Info { SolrDocument EOFDOC = new SolrDocument(); volatile boolean failed = false; Map corehandlers = new HashMap<>(); - private long startTime ; + private long startTime; @SuppressForbidden(reason = "Need to print out time") public MultiThreadedRunner(String url) { super(url); - startTime= System.currentTimeMillis(); + startTime = System.currentTimeMillis(); } - @Override @SuppressForbidden(reason = "Need to print out time") void exportDocs() throws Exception { @@ -398,10 +393,12 @@ void exportDocs() throws Exception { ClusterStateProvider stateProvider = solrClient.getClusterStateProvider(); DocCollection coll = stateProvider.getCollection(this.coll); Map m = coll.getSlicesMap(); - producerThreadpool = ExecutorUtil.newMDCAwareFixedThreadPool(m.size(), - new SolrNamedThreadFactory("solrcli-exporter-producers")); - consumerThreadpool = ExecutorUtil.newMDCAwareFixedThreadPool(1, - new SolrNamedThreadFactory("solrcli-exporter-consumer")); + producerThreadpool = + ExecutorUtil.newMDCAwareFixedThreadPool( + m.size(), new SolrNamedThreadFactory("solrcli-exporter-producers")); + consumerThreadpool = + ExecutorUtil.newMDCAwareFixedThreadPool( + 1, new SolrNamedThreadFactory("solrcli-exporter-consumer")); sink.start(); CountDownLatch consumerlatch = new CountDownLatch(1); try { @@ -411,15 +408,17 @@ void exportDocs() throws Exception { output.println("NO: of shards : " + corehandlers.size()); } CountDownLatch producerLatch = new CountDownLatch(corehandlers.size()); - corehandlers.forEach((s, coreHandler) -> producerThreadpool.submit(() -> { - try { - coreHandler.exportDocsFromCore(); - } catch (Exception e) { - if(output != null) output.println("Error exporting docs from : "+s); - - } - producerLatch.countDown(); - })); + corehandlers.forEach( + (s, coreHandler) -> + producerThreadpool.submit( + () -> { + try { + coreHandler.exportDocsFromCore(); + } catch (Exception e) { + if (output != null) output.println("Error exporting docs from : " + s); + } + producerLatch.countDown(); + })); producerLatch.await(); queue.offer(EOFDOC, 10, TimeUnit.SECONDS); @@ -433,11 +432,15 @@ void exportDocs() throws Exception { try { Files.delete(new File(out).toPath()); } catch (IOException e) { - //ignore + // ignore } } - System.out.println("\nTotal Docs exported: "+ (docsWritten.get() -1)+ - ". Time taken: "+( (System.currentTimeMillis() - startTime)/1000) + "secs"); + System.out.println( + "\nTotal Docs exported: " + + (docsWritten.get() - 1) + + ". Time taken: " + + ((System.currentTimeMillis() - startTime) / 1000) + + "secs"); } } @@ -445,37 +448,38 @@ private void addProducers(Map m) { for (Map.Entry entry : m.entrySet()) { Slice slice = entry.getValue(); Replica replica = slice.getLeader(); - if (replica == null) replica = slice.getReplicas().iterator().next();// get a random replica + if (replica == null) + replica = slice.getReplicas().iterator().next(); // get a random replica CoreHandler coreHandler = new CoreHandler(replica); corehandlers.put(replica.getCoreName(), coreHandler); } } private void addConsumer(CountDownLatch consumerlatch) { - consumerThreadpool.submit(() -> { - while (true) { - SolrDocument doc = null; - try { - doc = queue.poll(30, TimeUnit.SECONDS); - } catch (InterruptedException e) { - if (output != null) output.println("Consumer interrupted"); - failed = true; - break; - } - if (doc == EOFDOC) break; - try { - if (docsWritten.get() > limit) continue; - sink.accept(doc); - } catch (Exception e) { - if (output != null) output.println("Failed to write to file " + e.getMessage()); - failed = true; - } - } - consumerlatch.countDown(); - }); + consumerThreadpool.submit( + () -> { + while (true) { + SolrDocument doc = null; + try { + doc = queue.poll(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + if (output != null) output.println("Consumer interrupted"); + failed = true; + break; + } + if (doc == EOFDOC) break; + try { + if (docsWritten.get() > limit) continue; + sink.accept(doc); + } catch (Exception e) { + if (output != null) output.println("Failed to write to file " + e.getMessage()); + failed = true; + } + } + consumerlatch.countDown(); + }); } - class CoreHandler { final Replica replica; long expectedDocs; @@ -485,8 +489,7 @@ class CoreHandler { this.replica = replica; } - boolean exportDocsFromCore() - throws IOException, SolrServerException { + boolean exportDocsFromCore() throws IOException, SolrServerException { HttpSolrClient client = new HttpSolrClient.Builder(baseurl).build(); try { expectedDocs = getDocCount(replica.getCoreName(), client); @@ -498,33 +501,41 @@ boolean exportDocsFromCore() params.add(CommonParams.DISTRIB, "false"); params.add(CommonParams.ROWS, "1000"); String cursorMark = CursorMarkParams.CURSOR_MARK_START; - Consumer wrapper = doc -> { - try { - queue.offer(doc, 10, TimeUnit.SECONDS); - receivedDocs.incrementAndGet(); - } catch (InterruptedException e) { - failed = true; - if (output != null) output.println("Failed to write docs from" + e.getMessage()); - } - }; - StreamingBinaryResponseParser responseParser = new StreamingBinaryResponseParser(getStreamer(wrapper)); + Consumer wrapper = + doc -> { + try { + queue.offer(doc, 10, TimeUnit.SECONDS); + receivedDocs.incrementAndGet(); + } catch (InterruptedException e) { + failed = true; + if (output != null) output.println("Failed to write docs from" + e.getMessage()); + } + }; + StreamingBinaryResponseParser responseParser = + new StreamingBinaryResponseParser(getStreamer(wrapper)); while (true) { if (failed) return false; if (docsWritten.get() > limit) return true; params.set(CursorMarkParams.CURSOR_MARK_PARAM, cursorMark); - request = new GenericSolrRequest(SolrRequest.METHOD.GET, - "/" + replica.getCoreName() + "/select", params); + request = + new GenericSolrRequest( + SolrRequest.METHOD.GET, "/" + replica.getCoreName() + "/select", params); request.setResponseParser(responseParser); try { NamedList rsp = client.request(request); String nextCursorMark = (String) rsp.get(CursorMarkParams.CURSOR_MARK_NEXT); if (nextCursorMark == null || Objects.equals(cursorMark, nextCursorMark)) { if (output != null) - output.println(StrUtils.formatString("\nExport complete for : {0}, docs : {1}", replica.getCoreName(), receivedDocs.get())); + output.println( + StrUtils.formatString( + "\nExport complete for : {0}, docs : {1}", + replica.getCoreName(), receivedDocs.get())); if (expectedDocs != receivedDocs.get()) { if (output != null) { - output.println(StrUtils.formatString("Could not download all docs for core {0} , expected: {1} , actual", - replica.getCoreName(), expectedDocs, receivedDocs)); + output.println( + StrUtils.formatString( + "Could not download all docs for core {0} , expected: {1} , actual", + replica.getCoreName(), expectedDocs, receivedDocs)); return false; } } @@ -533,7 +544,12 @@ boolean exportDocsFromCore() cursorMark = nextCursorMark; if (output != null) output.print("."); } catch (SolrServerException e) { - if(output != null) output.println("Error reading from server "+ replica.getBaseUrl()+"/"+ replica.getCoreName()); + if (output != null) + output.println( + "Error reading from server " + + replica.getBaseUrl() + + "/" + + replica.getCoreName()); failed = true; return false; } @@ -545,13 +561,13 @@ boolean exportDocsFromCore() } } - - static long getDocCount(String coreName, HttpSolrClient client) throws SolrServerException, IOException { + static long getDocCount(String coreName, HttpSolrClient client) + throws SolrServerException, IOException { SolrQuery q = new SolrQuery("*:*"); q.setRows(0); q.add("distrib", "false"); - GenericSolrRequest request = new GenericSolrRequest(SolrRequest.METHOD.GET, - "/" + coreName + "/select", q); + GenericSolrRequest request = + new GenericSolrRequest(SolrRequest.METHOD.GET, "/" + coreName + "/select", q); NamedList res = client.request(request); SolrDocumentList sdl = (SolrDocumentList) res.get("response"); return sdl.getNumFound(); diff --git a/solr/core/src/java/org/apache/solr/util/PackageTool.java b/solr/core/src/java/org/apache/solr/util/PackageTool.java index 89aa244945e..34f66275157 100644 --- a/solr/core/src/java/org/apache/solr/util/PackageTool.java +++ b/solr/core/src/java/org/apache/solr/util/PackageTool.java @@ -16,11 +16,13 @@ */ package org.apache.solr.util; +import static org.apache.solr.packagemanager.PackageUtils.print; +import static org.apache.solr.packagemanager.PackageUtils.printGreen; + import java.io.File; import java.lang.invoke.MethodHandles; import java.nio.file.Paths; import java.util.Map; - import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Option; import org.apache.commons.io.FileUtils; @@ -43,9 +45,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.solr.packagemanager.PackageUtils.print; -import static org.apache.solr.packagemanager.PackageUtils.printGreen; - public class PackageTool extends SolrCLI.ToolBase { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -55,7 +54,7 @@ public PackageTool() { // Need a logging free, clean output going through to the user. Configurator.setRootLevel(Level.OFF); } - + @Override public String getName() { return "package"; diff --git a/solr/core/src/java/org/apache/solr/util/RecordingJSONParser.java b/solr/core/src/java/org/apache/solr/util/RecordingJSONParser.java index 932ae5fbffd..bb2ba75dfdd 100644 --- a/solr/core/src/java/org/apache/solr/util/RecordingJSONParser.java +++ b/solr/core/src/java/org/apache/solr/util/RecordingJSONParser.java @@ -36,7 +36,7 @@ public class RecordingJSONParser extends JSONParser { private boolean objectStarted = false; private long lastMarkedPosition = 0; private long lastGlobalPosition = 0; - private static final int BUFFER_SIZE = 8192; + private static final int BUFFER_SIZE = 16384; public RecordingJSONParser(Reader in) { diff --git a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java b/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java deleted file mode 100644 index ded3a546818..00000000000 --- a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.solr.util; - -import java.nio.charset.Charset; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.WeakHashMap; - -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.core.LogEvent; -import org.apache.logging.log4j.core.config.plugins.Plugin; -import org.apache.logging.log4j.core.config.plugins.PluginAttribute; -import org.apache.logging.log4j.core.config.plugins.PluginFactory; -import org.apache.logging.log4j.core.layout.AbstractStringLayout; -import org.apache.solr.cloud.ZkController; -import org.apache.solr.common.SolrException; -import org.apache.solr.common.StringUtils; -import org.apache.solr.common.cloud.DocCollection; -import org.apache.solr.common.cloud.Replica; -import org.apache.solr.common.util.SuppressForbidden; -import org.apache.solr.core.SolrCore; -import org.apache.solr.request.SolrQueryRequest; -import org.apache.solr.request.SolrRequestInfo; -import org.slf4j.MDC; - -import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP; - -@SuppressForbidden(reason = "class is specific to log4j2") -@Plugin(name = "SolrLogLayout", category = "Core", elementType = "layout", printObject = true) -public class SolrLogLayout extends AbstractStringLayout { - - protected SolrLogLayout(Charset charset) { - super(charset); - } - - @PluginFactory - public static SolrLogLayout createLayout(@PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { - return new SolrLogLayout(charset); - } - - /** - * Add this interface to a thread group and the string returned by getTag() - * will appear in log statements of any threads under that group. - */ - public static interface TG { - public String getTag(); - } - - @SuppressForbidden(reason = "Need currentTimeMillis to compare against log event timestamp. " + - "This is inaccurate but unavoidable due to interface limitations, in any case this is just for logging.") - final long startTime = System.currentTimeMillis(); - - long lastTime = startTime; - Map methodAlias = new HashMap<>(); - - public static class Method { - public String className; - public String methodName; - - public Method(String className, String methodName) { - this.className = className; - this.methodName = methodName; - } - - @Override - public int hashCode() { - return className.hashCode() + methodName.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof Method)) return false; - Method other = (Method) obj; - return (className.equals(other.className) && methodName - .equals(other.methodName)); - } - - @Override - public String toString() { - return className + '.' + methodName; - } - } - - public static class CoreInfo { - static int maxCoreNum; - String shortId; - String url; - Map coreProps; - } - - Map coreInfoMap = new WeakHashMap<>(); - - public void appendThread(StringBuilder sb) { - Thread th = Thread.currentThread(); - - // NOTE: LogRecord.getThreadID is *not* equal to Thread.getId() - sb.append(" T"); - sb.append(th.getId()); - } - - @Override - public String toSerializable(LogEvent event) { - return _format(event); - } - - public String _format(LogEvent event) { - String message = event.getMessage().getFormattedMessage(); - if (message == null) { - message = ""; - } - StringBuilder sb = new StringBuilder(message.length() + 80); - - long now = event.getTimeMillis(); - long timeFromStart = now - startTime; - lastTime = now; - String shortClassName = getShortClassName(event.getSource().getClassName(), event.getSource().getMethodName()); - - /*** - * sb.append(timeFromStart).append(' ').append(timeSinceLast); - * sb.append(' '); - * sb.append(record.getSourceClassName()).append('.').append( - * record.getSourceMethodName()); sb.append(' '); - * sb.append(record.getLevel()); - ***/ - - SolrRequestInfo requestInfo = SolrRequestInfo.getRequestInfo(); - - SolrCore core; - try (SolrQueryRequest req = (requestInfo == null) ? null : requestInfo.getReq()) { - core = (req == null) ? null : req.getCore(); - } - ZkController zkController; - CoreInfo info = null; - - if (core != null) { - info = coreInfoMap.get(core.hashCode()); - if (info == null) { - info = new CoreInfo(); - info.shortId = "C" + Integer.toString(CoreInfo.maxCoreNum++); - coreInfoMap.put(core.hashCode(), info); - - if (sb.length() == 0) sb.append("ASYNC "); - sb.append(" NEW_CORE ").append(info.shortId); - sb.append(" name=").append(core.getName()); - sb.append(" ").append(core); - } - - zkController = core.getCoreContainer().getZkController(); - if (zkController != null) { - if (info.url == null) { - info.url = zkController.getBaseUrl() + "/" + core.getName(); - sb.append(" url=").append(info.url).append(" node=").append(zkController.getNodeName()); - } - - Map coreProps = getReplicaProps(zkController, core); - if (info.coreProps == null || !coreProps.equals(info.coreProps)) { - info.coreProps = coreProps; - final String corePropsString = "coll:" - + core.getCoreDescriptor().getCloudDescriptor() - .getCollectionName() + " core:" + core.getName() + " props:" - + coreProps; - sb.append(" ").append(info.shortId).append("_STATE=").append(corePropsString); - } - } - } - - if (sb.length() > 0) sb.append('\n'); - sb.append(timeFromStart); - - // sb.append("\nL").append(record.getSequenceNumber()); // log number is - // useful for sequencing when looking at multiple parts of a log file, but - // ms since start should be fine. - appendThread(sb); - - appendMDC(sb); - - // todo: should be able to get port from core container for non zk tests - - if (info != null) { - sb.append(' ').append(info.shortId); // core - } - - if (shortClassName.length() > 0) { - sb.append(' ').append(shortClassName); - } - - if (event.getLevel() != Level.INFO) { - sb.append(' ').append(event.getLevel()); - } - - sb.append(' '); - appendMultiLineString(sb, message); - Throwable th = event.getThrown(); - - if (th != null) { - sb.append(' '); - String err = SolrException.toStr(th); - String ignoredMsg = SolrException.doIgnore(th, err); - if (ignoredMsg != null) { - sb.append(ignoredMsg); - } else { - sb.append(err); - } - } - - - sb.append('\n'); - - /*** - * Isn't core specific... prob better logged from zkController if (info != - * null) { ClusterState clusterState = zkController.getClusterState(); if - * (info.clusterState != clusterState) { // something has changed in the - * matrix... sb.append(zkController.getBaseUrl() + - * " sees new ClusterState:"); } } - ***/ - - return sb.toString(); - } - - private Map getReplicaProps(ZkController zkController, SolrCore core) { - final String collectionName = core.getCoreDescriptor().getCloudDescriptor().getCollectionName(); - DocCollection collection = zkController.getClusterState().getCollectionOrNull(collectionName); - Replica replica = collection.getReplica(zkController.getCoreNodeName(core.getCoreDescriptor())); - if (replica != null) { - return replica.getProperties(); - } - return Collections.emptyMap(); - } - - private void addFirstLine(StringBuilder sb, String msg) { - // INFO: [] webapp=/solr path=/select params={q=foobarbaz} hits=0 status=0 - // QTime=1 - - if (!shorterFormat || !msg.startsWith("[")) { - sb.append(msg); - return; - } - - int idx = msg.indexOf(']'); - if (idx < 0 || !msg.startsWith(" webapp=", idx + 1)) { - sb.append(msg); - return; - } - - idx = msg.indexOf(' ', idx + 8); // space after webapp= - if (idx < 0) { - sb.append(msg); - return; - } - idx = msg.indexOf('=', idx + 1); // = in path= - if (idx < 0) { - sb.append(msg); - return; - } - - int idx2 = msg.indexOf(' ', idx + 1); - if (idx2 < 0) { - sb.append(msg); - return; - } - - sb.append(msg.substring(idx + 1, idx2 + 1)); // path - - idx = msg.indexOf("params=", idx2); - if (idx < 0) { - sb.append(msg.substring(idx2)); - } else { - sb.append(msg.substring(idx + 7)); - } - } - - private void appendMultiLineString(StringBuilder sb, String msg) { - int idx = msg.indexOf('\n'); - if (idx < 0) { - addFirstLine(sb, msg); - return; - } - - int lastIdx = -1; - for (;;) { - if (idx < 0) { - if (lastIdx == -1) { - addFirstLine(sb, msg.substring(lastIdx + 1)); - } else { - sb.append(msg.substring(lastIdx + 1)); - } - break; - } - if (lastIdx == -1) { - addFirstLine(sb, msg.substring(lastIdx + 1, idx)); - } else { - sb.append(msg.substring(lastIdx + 1, idx)); - } - - sb.append("\n\t"); - lastIdx = idx; - idx = msg.indexOf('\n', lastIdx + 1); - } - } - - // TODO: name this better... it's only for cloud tests where every core - // container has just one solr server so Port/Core are fine - public boolean shorterFormat = false; - - public void setShorterFormat() { - shorterFormat = true; - // looking at /update is enough... we don't need "UPDATE /update" - methodAlias.put(new Method( - "org.apache.solr.update.processor.LogUpdateProcessor", "finish"), ""); - } - - private Method classAndMethod = new Method(null, null); // don't need to be - // thread safe - - private String getShortClassName(String name, String method) { - classAndMethod.className = name; - classAndMethod.methodName = method; - - String out = methodAlias.get(classAndMethod); - if (out != null) return out; - - StringBuilder sb = new StringBuilder(); - - int lastDot = name.lastIndexOf('.'); - if (lastDot < 0) return name + '.' + method; - - int prevIndex = -1; - for (;;) { - char ch = name.charAt(prevIndex + 1); - sb.append(ch); - int idx = name.indexOf('.', prevIndex + 1); - ch = name.charAt(idx + 1); - if (idx >= lastDot || Character.isUpperCase(ch)) { - sb.append(name.substring(idx)); - break; - } - prevIndex = idx; - } - - return sb.toString() + '.' + method; - } - - - private void appendMDC(StringBuilder sb) { - if (!StringUtils.isEmpty(MDC.get(NODE_NAME_PROP))) { - sb.append(" n:").append(MDC.get(NODE_NAME_PROP)); - } - if (!StringUtils.isEmpty(MDC.get(COLLECTION_PROP))) { - sb.append(" c:").append(MDC.get(COLLECTION_PROP)); - } - if (!StringUtils.isEmpty(MDC.get(SHARD_ID_PROP))) { - sb.append(" s:").append(MDC.get(SHARD_ID_PROP)); - } - if (!StringUtils.isEmpty(MDC.get(REPLICA_PROP))) { - sb.append(" r:").append(MDC.get(REPLICA_PROP)); - } - if (!StringUtils.isEmpty(MDC.get(CORE_NAME_PROP))) { - sb.append(" x:").append(MDC.get(CORE_NAME_PROP)); - } - } -} diff --git a/solr/core/src/java/org/apache/solr/util/StartupLoggingUtils.java b/solr/core/src/java/org/apache/solr/util/StartupLoggingUtils.java index 53f19b9fff4..c3c749d2414 100644 --- a/solr/core/src/java/org/apache/solr/util/StartupLoggingUtils.java +++ b/solr/core/src/java/org/apache/solr/util/StartupLoggingUtils.java @@ -114,7 +114,7 @@ public static boolean changeLogLevel(String logLevel) { private static boolean isLog4jActive() { try { // Make sure we have log4j LogManager in classpath - Class.forName("org.apache.log4j.LogManager"); + Class.forName("org.apache.logging.log4j.LogManager"); // Make sure that log4j is really selected as logger in slf4j - we could have LogManager in the bridge class :) return binder.getLoggerFactoryClassStr().contains("Log4jLoggerFactory"); } catch (Exception e) { diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java index 0e62c994714..81c2e5a6714 100644 --- a/solr/core/src/java/org/apache/solr/util/TestInjection.java +++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java @@ -582,11 +582,11 @@ public static boolean injectUIFOutOfMemoryError() { return true; } - public static boolean injectDocSetDelay() { + public static boolean injectDocSetDelay(Object query) { if (delayBeforeCreatingNewDocSet != null) { countDocSetDelays.incrementAndGet(); try { - log.info("Pausing DocSet for {}ms", delayBeforeCreatingNewDocSet); + log.info("Pausing DocSet for {}ms: {}", delayBeforeCreatingNewDocSet, query); if (log.isDebugEnabled()) { log.debug("", new Exception("Stack Trace")); } diff --git a/solr/core/src/test-files/log4j2.xml b/solr/core/src/test-files/log4j2.xml index 53dcae5c748..0e8f08c0c61 100644 --- a/solr/core/src/test-files/log4j2.xml +++ b/solr/core/src/test-files/log4j2.xml @@ -21,7 +21,7 @@ - %maxLen{%-4r %-5p (%t) [%X{node_name} %X{collection} %X{shard} %X{replica} %X{core} %X{trace_id}] %c{1.} %m%notEmpty{ + %maxLen{%-4r %-5p (%t) [%notEmpty{n:%X{node_name}}%notEmpty{ c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}%notEmpty{ t:%X{trace_id}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml index 46158cd77cf..e00b0c10de8 100644 --- a/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml +++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig-managed-schema.xml @@ -24,7 +24,7 @@ ${managed.schema.mutable} - ${managed.schema.resourceName:managed-schema} + ${managed.schema.resourceName:managed-schema.xml} diff --git a/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema b/solr/core/src/test-files/solr/configsets/upload/legacy-managed-schema/managed-schema similarity index 100% rename from solr/core/src/test-files/solr/configsets/upload/regular/managed-schema rename to solr/core/src/test-files/solr/configsets/upload/legacy-managed-schema/managed-schema diff --git a/solr/core/src/test-files/solr/configsets/upload/legacy-managed-schema/solrconfig.xml b/solr/core/src/test-files/solr/configsets/upload/legacy-managed-schema/solrconfig.xml new file mode 100644 index 00000000000..827b5bd7253 --- /dev/null +++ b/solr/core/src/test-files/solr/configsets/upload/legacy-managed-schema/solrconfig.xml @@ -0,0 +1,60 @@ + + + + + + + + + ${solr.data.dir:} + + + + ${tests.luceneMatchVersion:LATEST} + + + + ${solr.commitwithin.softcommit:true} + + + + + + explicit + true + text + + + + + + + + diff --git a/solr/core/src/test-files/solr/configsets/upload/with-lib-directive/managed-schema b/solr/core/src/test-files/solr/configsets/upload/regular/managed-schema.xml similarity index 100% rename from solr/core/src/test-files/solr/configsets/upload/with-lib-directive/managed-schema rename to solr/core/src/test-files/solr/configsets/upload/regular/managed-schema.xml diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema b/solr/core/src/test-files/solr/configsets/upload/with-lib-directive/managed-schema.xml similarity index 100% rename from solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema rename to solr/core/src/test-files/solr/configsets/upload/with-lib-directive/managed-schema.xml diff --git a/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema.xml b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema.xml new file mode 100644 index 00000000000..9e2f9471026 --- /dev/null +++ b/solr/core/src/test-files/solr/configsets/upload/with-script-processor/managed-schema.xml @@ -0,0 +1,25 @@ + + + + + + + + + + diff --git a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java index d5550800aa6..532b2fd28a3 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestConfigSetsAPI.java @@ -32,6 +32,7 @@ import java.lang.invoke.MethodHandles; import java.net.URI; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.security.Principal; import java.util.Arrays; import java.util.Collection; @@ -403,6 +404,42 @@ public void testUploadDisabled(boolean v2) throws Exception { } } + public void testUploadLegacyManagedSchemaFile() throws Exception { + String configSetName = "legacy-managed-schema"; + SolrZkClient zkClient = new SolrZkClient(cluster.getZkServer().getZkAddress(), + AbstractZkTestCase.TIMEOUT, 45000, null); + try { + long statusCode = uploadConfigSet(configSetName, "", null, zkClient, true); + assertEquals(0l, statusCode); + + assertTrue("managed-schema file should have been uploaded", + zkClient.exists("/configs/"+configSetName+"/managed-schema", true)); + } finally { + zkClient.close(); + } + + // try to create a collection with the uploaded configset + createCollection("newcollection", configSetName, 1, 1, cluster.getSolrClient()); + + String payload = "{\n" + + " 'add-field' : {\n" + + " 'name':'a1',\n" + + " 'type': 'string',\n" + + " 'stored':true,\n" + + " 'indexed':false\n" + + " },\n" + + " }"; + + ByteBuffer buff = Charset.forName("UTF-8").encode(payload); + Map map = postDataAndGetResponse(cluster.getSolrClient(), + cluster.getJettySolrRunners().get(0).getBaseUrl().toString() + + "/newcollection/schema?wt=js" + + "on", buff, null, false); + Map responseHeader = (Map)map.get("responseHeader"); + Long status = (Long)responseHeader.get("status"); + assertEquals((long)status, 0L); + } + @Test public void testOverwriteV1() throws Exception { testOverwrite(false); @@ -829,7 +866,7 @@ public void testUploadWithLibDirective() throws Exception { // try to create a collection with the uploaded configset CollectionAdminResponse resp = createCollection("newcollection3", "with-lib-directive" + trustedSuffix, 1, 1, cluster.getSolrClient()); - + SolrInputDocument doc = sdoc("id", "4055", "subject", "Solr"); cluster.getSolrClient().add("newcollection3", doc); cluster.getSolrClient().commit("newcollection3"); @@ -858,11 +895,11 @@ private void uploadConfigSetWithAssertions(String configSetName, String suffix, } } private void assertConfigsetFiles(String configSetName, String suffix, SolrZkClient zkClient) throws KeeperException, InterruptedException, IOException { - assertTrue("managed-schema file should have been uploaded", - zkClient.exists("/configs/"+configSetName+suffix+"/managed-schema", true)); - assertTrue("managed-schema file contents on zookeeper are not exactly same as that of the file uploaded in config", - Arrays.equals(zkClient.getData("/configs/"+configSetName+suffix+"/managed-schema", null, null, true), - readFile("solr/configsets/upload/"+configSetName+"/managed-schema"))); + assertTrue("managed-schema.xml file should have been uploaded", + zkClient.exists("/configs/"+configSetName+suffix+"/managed-schema.xml", true)); + assertTrue("managed-schema.xml file contents on zookeeper are not exactly same as that of the file uploaded in config", + Arrays.equals(zkClient.getData("/configs/"+configSetName+suffix+"/managed-schema.xml", null, null, true), + readFile("solr/configsets/upload/"+configSetName+"/managed-schema.xml"))); assertTrue("solrconfig.xml file should have been uploaded", zkClient.exists("/configs/"+configSetName+suffix+"/solrconfig.xml", true)); @@ -1001,7 +1038,7 @@ private static void zip(File directory, File zipfile) throws IOException { zout.close(); } } - + public void scriptRequest(String collection) throws SolrServerException, IOException { SolrClient client = cluster.getSolrClient(); SolrInputDocument doc = sdoc("id", "4055", "subject", "Solr"); @@ -1026,21 +1063,21 @@ protected CollectionAdminResponse createCollection(String collectionName, String res.setResponse(client.request(request)); return res; } - + public static Map postDataAndGetResponse(CloudSolrClient cloudClient, String uri, ByteBuffer bytarr, String username, boolean usePut) throws IOException { HttpEntityEnclosingRequestBase httpRequest = null; HttpEntity entity; String response = null; Map m = null; - + try { if (usePut) { httpRequest = new HttpPut(uri); } else { httpRequest = new HttpPost(uri); } - + if (username != null) { httpRequest.addHeader(new BasicHeader("user", username)); } @@ -1093,7 +1130,7 @@ private byte[] readFile(String fname) throws IOException { } return buf; } - + @Test public void testDeleteErrors() throws Exception { final String baseUrl = cluster.getJettySolrRunners().get(0).getBaseUrl().toString(); @@ -1188,15 +1225,15 @@ public void testList() throws Exception { } /** - * A simple sanity check that the test-framework hueristic logic for setting - * {@link ExternalPaths#DEFAULT_CONFIGSET} is working as it should + * A simple sanity check that the test-framework hueristic logic for setting + * {@link ExternalPaths#DEFAULT_CONFIGSET} is working as it should * in the current test env, and finding the real directory which matches what {@link ZkController} * finds and uses to bootstrap ZK in cloud based tests. * *

- * This assumes the {@link SolrDispatchFilter#SOLR_DEFAULT_CONFDIR_ATTRIBUTE} system property - * has not been externally set in the environment where this test is being run -- which should - * never be the case, since it would prevent the test-framework from using + * This assumes the {@link SolrDispatchFilter#SOLR_DEFAULT_CONFDIR_ATTRIBUTE} system property + * has not been externally set in the environment where this test is being run -- which should + * never be the case, since it would prevent the test-framework from using * {@link ExternalPaths#DEFAULT_CONFIGSET} * * @see SolrDispatchFilter#SOLR_DEFAULT_CONFDIR_ATTRIBUTE @@ -1208,7 +1245,7 @@ public void testUserAndTestDefaultConfigsetsAreSame() throws IOException { final File extPath = new File(ExternalPaths.DEFAULT_CONFIGSET); assertTrue("_default dir doesn't exist: " + ExternalPaths.DEFAULT_CONFIGSET, extPath.exists()); assertTrue("_default dir isn't a dir: " + ExternalPaths.DEFAULT_CONFIGSET, extPath.isDirectory()); - + final String zkBootStrap = ConfigSetService.getDefaultConfigDirPath(); assertEquals("extPath _default configset dir vs zk bootstrap path", ExternalPaths.DEFAULT_CONFIGSET, zkBootStrap); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java index 3e9956fbe7a..3f90fadd950 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestRebalanceLeaders.java @@ -18,9 +18,16 @@ import java.io.IOException; import java.lang.invoke.MethodHandles; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; - import org.apache.lucene.util.LuceneTestCase; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.embedded.JettySolrRunner; @@ -61,74 +68,83 @@ public static void setupCluster() throws Exception { numReplicas = random().nextInt(2) + 2; useAdminToSetProps = random().nextBoolean(); - configureCluster(numNodes) - .addConfig(COLLECTION_NAME, configset("cloud-minimal")) - .configure(); + configureCluster(numNodes).addConfig(COLLECTION_NAME, configset("cloud-minimal")).configure(); - CollectionAdminResponse resp = CollectionAdminRequest.createCollection(COLLECTION_NAME, COLLECTION_NAME, - numShards, numReplicas, 0, 0) - .process(cluster.getSolrClient()); + CollectionAdminResponse resp = + CollectionAdminRequest.createCollection( + COLLECTION_NAME, COLLECTION_NAME, numShards, numReplicas, 0, 0) + .process(cluster.getSolrClient()); assertEquals("Admin request failed; ", 0, resp.getStatus()); cluster.waitForActiveCollection(COLLECTION_NAME, numShards, numShards * numReplicas); - } @Before public void removeAllProperties() throws KeeperException, InterruptedException { forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); for (Slice slice : docCollection.getSlices()) { for (Replica rep : slice.getReplicas()) { - rep.getProperties().forEach((key, value) -> { - if (key.startsWith("property.")) { - try { - delProp(slice, rep, key); - } catch (IOException | SolrServerException e) { - fail("Caught unexpected exception in @Before " + e.getMessage()); - } - } - }); + rep.getProperties() + .forEach( + (key, value) -> { + if (key.startsWith("property.")) { + try { + delProp(slice, rep, key); + } catch (IOException | SolrServerException e) { + fail("Caught unexpected exception in @Before " + e.getMessage()); + } + } + }); } } } int timeoutMs = 60000; - - // test that setting an arbitrary "slice unique" property un-sets the property if it's on another replica in the - // slice. This is testing when the property is set on an _individual_ replica whereas testBalancePropertySliceUnique - // tests whether changing an individual _replica_ un-sets the property on other replicas _in that slice_. + // test that setting an arbitrary "slice unique" property un-sets the property if it's on another + // replica in the + // slice. This is testing when the property is set on an _individual_ replica whereas + // testBalancePropertySliceUnique + // tests whether changing an individual _replica_ un-sets the property on other replicas _in that + // slice_. // // NOTE: There were significant problems because at one point the code implicitly defined - // shardUnique=true for the special property preferredLeader. That was removed at one point so we're explicitly + // shardUnique=true for the special property preferredLeader. That was removed at one point so + // we're explicitly // testing that as well. @Test - public void testSetArbitraryPropertySliceUnique() throws IOException, SolrServerException, InterruptedException, KeeperException { + public void testSetArbitraryPropertySliceUnique() + throws IOException, SolrServerException, InterruptedException, KeeperException { // Check both special (preferredLeader) and something arbitrary. doTestSetArbitraryPropertySliceUnique("foo" + random().nextInt(1_000_000)); removeAllProperties(); doTestSetArbitraryPropertySliceUnique("preferredleader"); } - - // Test that automatically distributing a slice unique property un-sets that property if it's in any other replica + // Test that automatically distributing a slice unique property un-sets that property if it's in + // any other replica // on that slice. - // This is different than the test above. The test above sets individual properties on individual nodes. This one + // This is different than the test above. The test above sets individual properties on individual + // nodes. This one // relies on Solr to pick which replicas to set the property on @Test - public void testBalancePropertySliceUnique() throws KeeperException, InterruptedException, IOException, SolrServerException { + public void testBalancePropertySliceUnique() + throws KeeperException, InterruptedException, IOException, SolrServerException { // Check both cases of "special" property preferred(Ll)eader doTestBalancePropertySliceUnique("foo" + random().nextInt(1_000_000)); removeAllProperties(); doTestBalancePropertySliceUnique("preferredleader"); } - // We've moved on from a property being tested, we need to check if rebalancing the leaders actually chantges the + // We've moved on from a property being tested, we need to check if rebalancing the leaders + // actually chantges the // leader appropriately. @Test public void testRebalanceLeaders() throws Exception { - // First let's unbalance the preferredLeader property, do all the leaders get reassigned properly? + // First let's unbalance the preferredLeader property, do all the leaders get reassigned + // properly? concentrateProp("preferredLeader"); sendRebalanceCommand(); checkPreferredsAreLeaders(); @@ -138,19 +154,23 @@ public void testRebalanceLeaders() throws Exception { sendRebalanceCommand(); checkPreferredsAreLeaders(); - // Now check the condition we saw "in the wild" where you could not rebalance properly when Jetty was restarted. + // Now check the condition we saw "in the wild" where you could not rebalance properly when + // Jetty was restarted. concentratePropByRestartingJettys(); sendRebalanceCommand(); checkPreferredsAreLeaders(); } - // Insure that the property is set on only one replica per slice when changing a unique property on an individual + // Insure that the property is set on only one replica per slice when changing a unique property + // on an individual // replica. - private void doTestSetArbitraryPropertySliceUnique(String propIn) throws InterruptedException, KeeperException, IOException, SolrServerException { + private void doTestSetArbitraryPropertySliceUnique(String propIn) + throws InterruptedException, KeeperException, IOException, SolrServerException { final String prop = (random().nextBoolean()) ? propIn : propIn.toUpperCase(Locale.ROOT); // First set the property in some replica in some slice forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); Slice[] slices = docCollection.getSlices().toArray(new Slice[0]); Slice slice = slices[random().nextInt(slices.length)]; @@ -171,25 +191,41 @@ private void doTestSetArbitraryPropertySliceUnique(String propIn) throws Interru // insure that no other replica in that slice has the property when we return. while (timeout.hasTimedOut() == false) { forceUpdateCollectionStatus(); - modColl = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + modColl = + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME); modSlice = modColl.getSlice(slice.getName()); - rightRep = modSlice.getReplica(rep.getName()).getBool("property." + prop.toLowerCase(Locale.ROOT), false); - count = modSlice.getReplicas().stream().filter(thisRep -> thisRep.getBool("property." + prop.toLowerCase(Locale.ROOT), false)).count(); + rightRep = + modSlice + .getReplica(rep.getName()) + .getBool("property." + prop.toLowerCase(Locale.ROOT), false); + count = + modSlice.getReplicas().stream() + .filter( + thisRep -> thisRep.getBool("property." + prop.toLowerCase(Locale.ROOT), false)) + .count(); if (count == 1 && rightRep) { break; } - TimeUnit.MILLISECONDS.sleep(100); + TimeUnit.MILLISECONDS.sleep(200); } if (count != 1 || rightRep == false) { - fail("The property " + prop + " was not uniquely distributed in slice " + slice.getName() - + " " + modColl.toString()); + fail( + "The property " + + prop + + " was not uniquely distributed in slice " + + slice.getName() + + " " + + modColl.toString()); } } } - // Fail if we the replicas with the preferredLeader property are _not_ also the leaders. private void checkPreferredsAreLeaders() throws InterruptedException, KeeperException { // Make sure that the shard unique are where you expect. @@ -197,7 +233,8 @@ private void checkPreferredsAreLeaders() throws InterruptedException, KeeperExce while (timeout.hasTimedOut() == false) { if (checkPreferredsAreLeaders(false)) { - // Ok, all preferreds are leaders. Just for Let's also get the election queue and guarantee that every + // Ok, all preferreds are leaders. Just for Let's also get the election queue and guarantee + // that every // live replica is in the queue and none are repeated. checkElectionQueues(); return; @@ -205,40 +242,67 @@ private void checkPreferredsAreLeaders() throws InterruptedException, KeeperExce TimeUnit.MILLISECONDS.sleep(100); } - log.error("Leaders are not all preferres {}", cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME)); + log.error( + "Leaders are not all preferres {}", + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME)); // Show the errors checkPreferredsAreLeaders(true); } // Do all active nodes in each slice appear exactly once in the slice's leader election queue? - // Since we assert that the number of live replicas is the same size as the leader election queue, we only + // Since we assert that the number of live replicas is the same size as the leader election queue, + // we only // have to compare one way. private void checkElectionQueues() throws KeeperException, InterruptedException { - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); - Set liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes(); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + Set liveNodes = + cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes(); for (Slice slice : docCollection.getSlices()) { Set liveReplicas = new HashSet<>(); - slice.getReplicas().forEach(replica -> { - if (replica.isActive(liveNodes)) { - liveReplicas.add(replica); - } - }); + slice + .getReplicas() + .forEach( + replica -> { + if (replica.isActive(liveNodes)) { + liveReplicas.add(replica); + } + }); checkOneQueue(docCollection, slice, liveReplicas); } } // Helper method to check one leader election queue's consistency. - private void checkOneQueue(DocCollection coll, Slice slice, Set liveReplicas) throws KeeperException, InterruptedException { - - List leaderQueue = cluster.getSolrClient().getZkStateReader().getZkClient().getChildren("/collections/" + COLLECTION_NAME + - "/leader_elect/" + slice.getName() + "/election", null, true); + private void checkOneQueue(DocCollection coll, Slice slice, Set liveReplicas) + throws KeeperException, InterruptedException { + + List leaderQueue = + cluster + .getSolrClient() + .getZkStateReader() + .getZkClient() + .getChildren( + "/collections/" + + COLLECTION_NAME + + "/leader_elect/" + + slice.getName() + + "/election", + null, + true); if (leaderQueue.size() != liveReplicas.size()) { - log.error("One or more replicas is missing from the leader election queue! Slice {}, election queue: {}, collection: {}" - , slice.getName(), leaderQueue, coll); + log.error( + "One or more replicas is missing from the leader election queue! Slice {}, election queue: {}, collection: {}", + slice.getName(), + leaderQueue, + coll); fail("One or more replicas is missing from the leader election queue"); } // Check that each election node has a corresponding live replica. @@ -252,10 +316,13 @@ private void checkOneQueue(DocCollection coll, Slice slice, Set liveRep } // Just an encapsulation for checkPreferredsAreLeaders to make returning easier. - // the doAsserts var is to actually print the problem and fail the test if the condition is not met. - private boolean checkPreferredsAreLeaders(boolean doAsserts) throws KeeperException, InterruptedException { + // the doAsserts var is to actually print the problem and fail the test if the condition is not + // met. + private boolean checkPreferredsAreLeaders(boolean doAsserts) + throws KeeperException, InterruptedException { forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); for (Slice slice : docCollection.getSlices()) { for (Replica rep : slice.getReplicas()) { if (rep.getBool("property.preferredleader", false)) { @@ -272,7 +339,8 @@ private boolean checkPreferredsAreLeaders(boolean doAsserts) throws KeeperExcept } // Arbitrarily send the rebalance command either with the SolrJ interface or with an HTTP request. - private void sendRebalanceCommand() throws SolrServerException, InterruptedException, IOException { + private void sendRebalanceCommand() + throws SolrServerException, InterruptedException, IOException { if (random().nextBoolean()) { rebalanceLeaderUsingSolrJAPI(); } else { @@ -280,9 +348,11 @@ private void sendRebalanceCommand() throws SolrServerException, InterruptedExcep } } - // Helper method to make sure the property is _unbalanced_ first, then it gets properly re-assigned with the + // Helper method to make sure the property is _unbalanced_ first, then it gets properly + // re-assigned with the // BALANCESHARDUNIQUE command. - private void doTestBalancePropertySliceUnique(String propIn) throws InterruptedException, IOException, KeeperException, SolrServerException { + private void doTestBalancePropertySliceUnique(String propIn) + throws InterruptedException, IOException, KeeperException, SolrServerException { final String prop = (random().nextBoolean()) ? propIn : propIn.toUpperCase(Locale.ROOT); // Concentrate the properties on as few replicas a possible @@ -296,10 +366,10 @@ private void doTestBalancePropertySliceUnique(String propIn) throws InterruptedE // Verify that the property is reasonably evenly distributed verifyPropCorrectlyDistributed(prop); - } - private void verifyPropCorrectlyDistributed(String prop) throws KeeperException, InterruptedException { + private void verifyPropCorrectlyDistributed(String prop) + throws KeeperException, InterruptedException { TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME); @@ -307,7 +377,12 @@ private void verifyPropCorrectlyDistributed(String prop) throws KeeperException, DocCollection docCollection = null; while (timeout.hasTimedOut() == false) { forceUpdateCollectionStatus(); - docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + docCollection = + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME); int maxPropCount = Integer.MAX_VALUE; int minPropCount = Integer.MIN_VALUE; for (Slice slice : docCollection.getSlices()) { @@ -327,7 +402,9 @@ private void verifyPropCorrectlyDistributed(String prop) throws KeeperException, } // Used when we concentrate the leader on a few nodes. - private void verifyPropDistributedAsExpected(Map expectedShardReplicaMap, String prop) throws InterruptedException, KeeperException { + private void verifyPropDistributedAsExpected( + Map expectedShardReplicaMap, String prop) + throws InterruptedException, KeeperException { // Make sure that the shard unique are where you expect. TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME); @@ -336,7 +413,12 @@ private void verifyPropDistributedAsExpected(Map expectedShardRe DocCollection docCollection = null; while (timeout.hasTimedOut() == false) { forceUpdateCollectionStatus(); - docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + docCollection = + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME); failure = false; for (Map.Entry ent : expectedShardReplicaMap.entrySet()) { Replica rep = docCollection.getSlice(ent.getKey()).getReplica(ent.getValue()); @@ -350,12 +432,18 @@ private void verifyPropDistributedAsExpected(Map expectedShardRe TimeUnit.MILLISECONDS.sleep(100); } - fail(prop + " properties are not on the expected replicas: " + docCollection.toString() - + System.lineSeparator() + "Expected " + expectedShardReplicaMap.toString()); + fail( + prop + + " properties are not on the expected replicas: " + + docCollection.toString() + + System.lineSeparator() + + "Expected " + + expectedShardReplicaMap.toString()); } // Just check that the property is distributed as expectecd. This does _not_ rebalance the leaders - private void rebalancePropAndCheck(String prop) throws IOException, SolrServerException, InterruptedException, KeeperException { + private void rebalancePropAndCheck(String prop) + throws IOException, SolrServerException, InterruptedException, KeeperException { if (random().nextBoolean()) { rebalancePropUsingSolrJAPI(prop); @@ -364,12 +452,13 @@ private void rebalancePropAndCheck(String prop) throws IOException, SolrServerEx } } - - private void rebalanceLeaderUsingSolrJAPI() throws IOException, SolrServerException, InterruptedException { - CollectionAdminResponse resp = CollectionAdminRequest - .rebalanceLeaders(COLLECTION_NAME) - .process(cluster.getSolrClient()); - assertTrue("All leaders should have been verified", resp.getResponse().get("Summary").toString().contains("Success")); + private void rebalanceLeaderUsingSolrJAPI() + throws IOException, SolrServerException, InterruptedException { + CollectionAdminResponse resp = + CollectionAdminRequest.rebalanceLeaders(COLLECTION_NAME).process(cluster.getSolrClient()); + assertTrue( + "All leaders should have been verified", + resp.getResponse().get("Summary").toString().contains("Success")); assertEquals("Admin request failed; ", 0, resp.getStatus()); } @@ -380,31 +469,33 @@ private void rebalanceLeaderUsingStandardRequest() throws IOException, SolrServe QueryRequest request = new QueryRequest(params); request.setPath("/admin/collections"); QueryResponse resp = request.process(cluster.getSolrClient()); - assertTrue("All leaders should have been verified", resp.getResponse().get("Summary").toString().contains("Success")); + assertTrue( + "All leaders should have been verified", + resp.getResponse().get("Summary").toString().contains("Success")); assertEquals("Call to rebalanceLeaders failed ", 0, resp.getStatus()); } - - private void rebalancePropUsingSolrJAPI(String prop) throws IOException, SolrServerException, InterruptedException { + private void rebalancePropUsingSolrJAPI(String prop) + throws IOException, SolrServerException, InterruptedException { // Don't set the value, that should be done automatically. CollectionAdminResponse resp; if (prop.toLowerCase(Locale.ROOT).contains("preferredleader")) { - resp = CollectionAdminRequest - .balanceReplicaProperty(COLLECTION_NAME, prop) - .process(cluster.getSolrClient()); + resp = + CollectionAdminRequest.balanceReplicaProperty(COLLECTION_NAME, prop) + .process(cluster.getSolrClient()); } else { - resp = CollectionAdminRequest - .balanceReplicaProperty(COLLECTION_NAME, prop) - .setShardUnique(true) - .process(cluster.getSolrClient()); - + resp = + CollectionAdminRequest.balanceReplicaProperty(COLLECTION_NAME, prop) + .setShardUnique(true) + .process(cluster.getSolrClient()); } assertEquals("Admin request failed; ", 0, resp.getStatus()); } - private void rebalancePropUsingStandardRequest(String prop) throws IOException, SolrServerException { + private void rebalancePropUsingStandardRequest(String prop) + throws IOException, SolrServerException { ModifiableSolrParams params = new ModifiableSolrParams(); params.set("action", CollectionParams.CollectionAction.BALANCESHARDUNIQUE.toString()); params.set("property", prop); @@ -419,11 +510,13 @@ private void rebalancePropUsingStandardRequest(String prop) throws IOException, assertEquals("Call to rebalanceLeaders failed ", 0, resp.getStatus()); } - // This important. I've (Erick Erickson) run across a situation where the "standard request" causes failures, but + // This important. I've (Erick Erickson) run across a situation where the "standard request" + // causes failures, but // never the Admin request. So let's test both all the time for a given test. // // This sets an _individual_ replica to have the property, not collection-wide - private void setProp(Slice slice, Replica rep, String prop) throws IOException, SolrServerException { + private void setProp(Slice slice, Replica rep, String prop) + throws IOException, SolrServerException { if (useAdminToSetProps) { setPropWithAdminRequest(slice, rep, prop); } else { @@ -431,7 +524,8 @@ private void setProp(Slice slice, Replica rep, String prop) throws IOException, } } - void setPropWithStandardRequest(Slice slice, Replica rep, String prop) throws IOException, SolrServerException { + void setPropWithStandardRequest(Slice slice, Replica rep, String prop) + throws IOException, SolrServerException { ModifiableSolrParams params = new ModifiableSolrParams(); params.set("action", CollectionParams.CollectionAction.ADDREPLICAPROP.toString()); @@ -449,48 +543,62 @@ void setPropWithStandardRequest(Slice slice, Replica rep, String prop) throws IO request.setPath("/admin/collections"); cluster.getSolrClient().request(request); String propLC = prop.toLowerCase(Locale.ROOT); - waitForState("Expecting property '" + prop + "'to appear on replica " + rep.getName(), COLLECTION_NAME, + waitForState( + "Expecting property '" + prop + "'to appear on replica " + rep.getName(), + COLLECTION_NAME, (n, c) -> "true".equals(c.getReplica(rep.getName()).getProperty(propLC))); - } - void setPropWithAdminRequest(Slice slice, Replica rep, String prop) throws IOException, SolrServerException { + void setPropWithAdminRequest(Slice slice, Replica rep, String prop) + throws IOException, SolrServerException { boolean setUnique = (prop.toLowerCase(Locale.ROOT).equals("preferredleader") == false); CollectionAdminRequest.AddReplicaProp addProp = - CollectionAdminRequest.addReplicaProperty(COLLECTION_NAME, slice.getName(), rep.getName(), prop, "true"); + CollectionAdminRequest.addReplicaProperty( + COLLECTION_NAME, slice.getName(), rep.getName(), prop, "true"); if (setUnique) { addProp.setShardUnique(true); } CollectionAdminResponse resp = addProp.process(cluster.getSolrClient()); assertEquals(0, resp.getStatus()); String propLC = prop.toLowerCase(Locale.ROOT); - waitForState("Expecting property '" + prop + "'to appear on replica " + rep.getName(), COLLECTION_NAME, + waitForState( + "Expecting property '" + prop + "'to appear on replica " + rep.getName(), + COLLECTION_NAME, (n, c) -> "true".equals(c.getReplica(rep.getName()).getProperty(propLC))); - } - private void delProp(Slice slice, Replica rep, String prop) throws IOException, SolrServerException { + private void delProp(Slice slice, Replica rep, String prop) + throws IOException, SolrServerException { String propLC = prop.toLowerCase(Locale.ROOT); - CollectionAdminResponse resp = CollectionAdminRequest.deleteReplicaProperty(COLLECTION_NAME, slice.getName(), rep.getName(), propLC) - .process(cluster.getSolrClient()); + CollectionAdminResponse resp = + CollectionAdminRequest.deleteReplicaProperty( + COLLECTION_NAME, slice.getName(), rep.getName(), propLC) + .process(cluster.getSolrClient()); assertEquals("Admin request failed; ", 0, resp.getStatus()); - waitForState("Expecting property '" + prop + "' to be removed from replica " + rep.getName(), COLLECTION_NAME, + waitForState( + "Expecting property '" + prop + "' to be removed from replica " + rep.getName(), + COLLECTION_NAME, (n, c) -> c.getReplica(rep.getName()).getProperty(prop) == null); } - // Intentionally un-balance the property to insure that BALANCESHARDUNIQUE does its job. There was an odd case - // where rebalancing didn't work very well if the Solr nodes were stopped and restarted that worked perfectly + // Intentionally un-balance the property to insure that BALANCESHARDUNIQUE does its job. There was + // an odd case + // where rebalancing didn't work very well if the Solr nodes were stopped and restarted that + // worked perfectly // when if the nodes were _not_ restarted in the test. So we have to test that too. private void concentratePropByRestartingJettys() throws Exception { List jettys = new ArrayList<>(cluster.getJettySolrRunners()); Collections.shuffle(jettys, random()); jettys.remove(random().nextInt(jettys.size())); - // Now we have a list of jettys, and there is one missing. Stop all of the remaining jettys, then start them again + // Now we have a list of jettys, and there is one missing. Stop all of the remaining jettys, + // then start them again // to concentrate the leaders. It's not necessary that all shards have a leader. for (JettySolrRunner jetty : jettys) { cluster.stopJettySolrRunner(jetty); + } + for (JettySolrRunner jetty : jettys) { cluster.waitForJettyToStop(jetty); } checkReplicasInactive(jettys); @@ -503,26 +611,35 @@ private void concentratePropByRestartingJettys() throws Exception { checkAllReplicasActive(); } - // while banging my nead against a wall, I put a lot of force refresh statements in. Want to leave them in + // while banging my nead against a wall, I put a lot of force refresh statements in. Want to leave + // them in // but have this be a no-op so if we start to get failures, we can re-enable with minimal effort. private void forceUpdateCollectionStatus() throws KeeperException, InterruptedException { // cluster.getSolrClient().getZkStateReader().forceUpdateCollection(COLLECTION_NAME); } - // Since we have to restart jettys, we don't want to try rebalancing etc. until we're sure all jettys that should + // Since we have to restart jettys, we don't want to try rebalancing etc. until we're sure all + // jettys that should // be up are up and all replicas are active. - private void checkReplicasInactive(List downJettys) throws KeeperException, InterruptedException { + private void checkReplicasInactive(List downJettys) + throws KeeperException, InterruptedException { TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME); DocCollection docCollection = null; Set liveNodes = null; Set downJettyNodes = new TreeSet<>(); for (JettySolrRunner jetty : downJettys) { - downJettyNodes.add(jetty.getBaseUrl().getHost() + ":" + jetty.getBaseUrl().getPort() + "_solr"); + downJettyNodes.add( + jetty.getBaseUrl().getHost() + ":" + jetty.getBaseUrl().getPort() + "_solr"); } while (timeout.hasTimedOut() == false) { forceUpdateCollectionStatus(); - docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + docCollection = + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME); liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes(); boolean expectedInactive = true; @@ -542,18 +659,28 @@ private void checkReplicasInactive(List downJettys) throws Keep } TimeUnit.MILLISECONDS.sleep(100); } - fail("timed out waiting for all replicas to become inactive: livenodes: " + liveNodes + - " Collection state: " + docCollection.toString()); + fail( + "timed out waiting for all replicas to become inactive: livenodes: " + + liveNodes + + " Collection state: " + + docCollection.toString()); } - // We need to wait around until all replicas are active before expecting rebalancing or distributing shard-unique + // We need to wait around until all replicas are active before expecting rebalancing or + // distributing shard-unique // properties to work. private void checkAllReplicasActive() throws KeeperException, InterruptedException { TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME); while (timeout.hasTimedOut() == false) { forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); - Set liveNodes = cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes(); + DocCollection docCollection = + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME); + Set liveNodes = + cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes(); boolean allActive = true; for (Slice slice : docCollection.getSlices()) { for (Replica rep : slice.getReplicas()) { @@ -570,17 +697,23 @@ private void checkAllReplicasActive() throws KeeperException, InterruptedExcepti fail("timed out waiting for all replicas to become active"); } - // use a simple heuristic to put as many replicas with the property on as few nodes as possible. The point is that + // use a simple heuristic to put as many replicas with the property on as few nodes as possible. + // The point is that // then we can execute BALANCESHARDUNIQUE and be sure it worked correctly - private void concentrateProp(String prop) throws KeeperException, InterruptedException, IOException, SolrServerException { + private void concentrateProp(String prop) + throws KeeperException, InterruptedException, IOException, SolrServerException { // find all the live nodes - // for each slice, assign the leader to the first replica that is in the lowest position on live_nodes - List liveNodes = new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()); + // for each slice, assign the leader to the first replica that is in the lowest position on + // live_nodes + List liveNodes = + new ArrayList<>( + cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()); Collections.shuffle(liveNodes, random()); Map uniquePropMap = new TreeMap<>(); forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); for (Slice slice : docCollection.getSlices()) { Replica changedRep = null; int livePos = Integer.MAX_VALUE; @@ -592,7 +725,9 @@ private void concentrateProp(String prop) throws KeeperException, InterruptedExc } } if (livePos == Integer.MAX_VALUE) { - fail("Invalid state! We should have a replica to add the property to! " + docCollection.toString()); + fail( + "Invalid state! We should have a replica to add the property to! " + + docCollection.toString()); } uniquePropMap.put(slice.getName(), changedRep.getName()); @@ -603,7 +738,8 @@ private void concentrateProp(String prop) throws KeeperException, InterruptedExc } // make sure that the property in question is unique per shard. - private Map verifyPropUniquePerShard(String prop) throws InterruptedException, KeeperException { + private Map verifyPropUniquePerShard(String prop) + throws InterruptedException, KeeperException { Map uniquePropMaps = new TreeMap<>(); TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME); @@ -614,15 +750,25 @@ private Map verifyPropUniquePerShard(String prop) throws Interru } TimeUnit.MILLISECONDS.sleep(100); } - fail("There should be exactly one replica with value " + prop + " set to true per shard: " - + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).toString()); + fail( + "There should be exactly one replica with value " + + prop + + " set to true per shard: " + + cluster + .getSolrClient() + .getZkStateReader() + .getClusterState() + .getCollection(COLLECTION_NAME) + .toString()); return null; // keeps IDE happy. } // return true if every shard has exactly one replica with the unique property set to "true" - private boolean checkdUniquePropPerShard(Map uniques, String prop) throws KeeperException, InterruptedException { + private boolean checkdUniquePropPerShard(Map uniques, String prop) + throws KeeperException, InterruptedException { forceUpdateCollectionStatus(); - DocCollection docCollection = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); + DocCollection docCollection = + cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME); for (Slice slice : docCollection.getSlices()) { int propfCount = 0; @@ -638,4 +784,4 @@ private boolean checkdUniquePropPerShard(Map uniques, String pro } return true; } -} \ No newline at end of file +} diff --git a/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java b/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java index 06be968d124..5d2753834f8 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestStressLiveNodes.java @@ -116,7 +116,7 @@ private static List getCachedLiveNodesFromLocalState(final int expectedC log.info("sleeping #{} to give watchers a chance to finish: {} != {}", i, expectedCount, result.size()); } - Thread.sleep(200); + Thread.sleep(100); } else { break; } @@ -132,7 +132,7 @@ private static List getCachedLiveNodesFromLocalState(final int expectedC public void testStress() throws Exception { // do many iters, so we have "bursts" of adding nodes that we then check - final int numIters = atLeast(TEST_NIGHTLY ? 1000 : 100); + final int numIters = atLeast(TEST_NIGHTLY ? 1000 : 10); for (int iter = 0; iter < numIters; iter++) { // sanity check that ZK says there is in fact 1 live node @@ -157,7 +157,7 @@ public void testStress() throws Exception { // odds of concurrent watchers firing regardless of the num CPUs or load on the machine running // the test (but we deliberately don't look at availableProcessors() since we want randomization // consistency across all machines for a given seed) - final int numThreads = TestUtil.nextInt(random(), 2, 5); + final int numThreads = TestUtil.nextInt(random(), 2, TEST_NIGHTLY ? 5 : 3); // use same num for all thrashers, to increase likely hood of them all competing // (diff random number would mean heavy concurrency only for ~ the first N=lowest num requests) @@ -165,7 +165,7 @@ public void testStress() throws Exception { // this does not need to be a large number -- in fact, the higher it is, the more // likely we are to see a mistake in early watcher triggers get "corrected" by a later one // and overlook a possible bug - final int numNodesPerThrasher = TestUtil.nextInt(random(), 1, 5); + final int numNodesPerThrasher = TestUtil.nextInt(random(), 1, TEST_NIGHTLY ? 5 : 2); log.info("preparing parallel adds to live nodes: iter={}, numThreads={} numNodesPerThread={}", iter, numThreads, numNodesPerThrasher); diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java index c0d51d958e8..5de30607d42 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/HdfsCloudIncrementalBackupTest.java @@ -17,11 +17,11 @@ package org.apache.solr.cloud.api.collections; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -34,109 +34,117 @@ import org.junit.AfterClass; import org.junit.BeforeClass; -@LuceneTestCase.SuppressCodecs({"SimpleText"}) // Backups do checksum validation against a footer value not present in 'SimpleText' -@ThreadLeakFilters(defaultFilters = true, filters = { - BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) -}) -public class HdfsCloudIncrementalBackupTest extends AbstractIncrementalBackupTest{ - public static final String SOLR_XML = "\n" + - "\n" + - " ${shareSchema:false}\n" + - " ${configSetBaseDir:configsets}\n" + - " ${coreRootDirectory:.}\n" + - "\n" + - " \n" + - " ${urlScheme:}\n" + - " ${socketTimeout:90000}\n" + - " ${connTimeout:15000}\n" + - " \n" + - "\n" + - " \n" + - " 127.0.0.1\n" + - " ${hostPort:8983}\n" + - " ${hostContext:solr}\n" + - " ${solr.zkclienttimeout:30000}\n" + - " ${genericCoreNodeNames:true}\n" + - " 10000\n" + - " ${distribUpdateConnTimeout:45000}\n" + - " ${distribUpdateSoTimeout:340000}\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " hdfs\n" + - " \n" + - " \n" + - " ${solr.hdfs.default.backup.path}\n" + - " ${solr.hdfs.home:}\n" + - " ${solr.hdfs.confdir:}\n" + - " \n" + - " \n" + - " \n" + - "\n"; +@LuceneTestCase.SuppressCodecs({ + "SimpleText" +}) // Backups do checksum validation against a footer value not present in 'SimpleText' +@ThreadLeakFilters( + defaultFilters = true, + filters = { + BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) + }) +@ThreadLeakLingering(linger = 5000) +public class HdfsCloudIncrementalBackupTest extends AbstractIncrementalBackupTest { + public static final String SOLR_XML = + "\n" + + "\n" + + " ${shareSchema:false}\n" + + " ${configSetBaseDir:configsets}\n" + + " ${coreRootDirectory:.}\n" + + "\n" + + " \n" + + " ${urlScheme:}\n" + + " ${socketTimeout:90000}\n" + + " ${connTimeout:15000}\n" + + " \n" + + "\n" + + " \n" + + " 127.0.0.1\n" + + " ${hostPort:8983}\n" + + " ${hostContext:solr}\n" + + " ${solr.zkclienttimeout:30000}\n" + + " ${genericCoreNodeNames:true}\n" + + " 10000\n" + + " ${distribUpdateConnTimeout:45000}\n" + + " ${distribUpdateSoTimeout:340000}\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " hdfs\n" + + " \n" + + " \n" + + " ${solr.hdfs.default.backup.path}\n" + + " ${solr.hdfs.home:}\n" + + " ${solr.hdfs.confdir:}\n" + + " \n" + + " \n" + + " \n" + + "\n"; - private static MiniDFSCluster dfsCluster; - private static String hdfsUri; - private static FileSystem fs; + private static MiniDFSCluster dfsCluster; + private static String hdfsUri; + private static FileSystem fs; - @BeforeClass - public static void setupClass() throws Exception { - dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); - hdfsUri = HdfsTestUtil.getURI(dfsCluster); - try { - URI uri = new URI(hdfsUri); - Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); - fs = FileSystem.get(uri, conf); + @BeforeClass + public static void beforeHdfsCloudIncrementalBackupTest() throws Exception { + dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); + hdfsUri = HdfsTestUtil.getURI(dfsCluster); + try { + URI uri = new URI(hdfsUri); + Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); + fs = FileSystem.get(uri, conf); - if (fs instanceof DistributedFileSystem) { - // Make sure dfs is not in safe mode - while (((DistributedFileSystem) fs).setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)) { - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - Thread.interrupted(); - // continue - } - } - } - - fs.mkdirs(new org.apache.hadoop.fs.Path("/backup")); - } catch (IOException | URISyntaxException e) { - throw new RuntimeException(e); + if (fs instanceof DistributedFileSystem) { + // Make sure dfs is not in safe mode + while (((DistributedFileSystem) fs) + .setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)) { + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + Thread.interrupted(); + // continue + } } + } - System.setProperty("solr.hdfs.default.backup.path", "/backup"); - System.setProperty("solr.hdfs.home", hdfsUri + "/solr"); - useFactory("solr.StandardDirectoryFactory"); - - configureCluster(NUM_SHARDS)// nodes - .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) - .withSolrXml(SOLR_XML) - .configure(); + fs.mkdirs(new org.apache.hadoop.fs.Path("/backup")); + } catch (IOException | URISyntaxException e) { + throw new RuntimeException(e); } - @AfterClass - public static void teardownClass() throws Exception { - IOUtils.closeQuietly(fs); - fs = null; - try { - HdfsTestUtil.teardownClass(dfsCluster); - } finally { - dfsCluster = null; - System.clearProperty("solr.hdfs.home"); - System.clearProperty("solr.hdfs.default.backup.path"); - System.clearProperty("test.build.data"); - System.clearProperty("test.cache.data"); - } - } + System.setProperty("solr.hdfs.default.backup.path", "/backup"); + System.setProperty("solr.hdfs.home", hdfsUri + "/solr"); + useFactory("solr.StandardDirectoryFactory"); - @Override - public String getCollectionNamePrefix() { - return "hdfsbackuprestore"; - } + configureCluster(NUM_SHARDS) // nodes + .addConfig( + "conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) + .withSolrXml(SOLR_XML) + .configure(); + } - @Override - public String getBackupLocation() { - return null; + @AfterClass + public static void afterHdfsCloudIncrementalBackupTest() throws Exception { + IOUtils.closeQuietly(fs); + fs = null; + try { + HdfsTestUtil.teardownClass(dfsCluster); + } finally { + dfsCluster = null; + System.clearProperty("solr.hdfs.home"); + System.clearProperty("solr.hdfs.default.backup.path"); + System.clearProperty("test.build.data"); + System.clearProperty("test.cache.data"); } + } + + @Override + public String getCollectionNamePrefix() { + return "hdfsbackuprestore"; + } + + @Override + public String getBackupLocation() { + return null; + } } diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java index a7f7c6daf99..6dade0da6a1 100644 --- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java +++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestHdfsCloudBackupRestore.java @@ -16,6 +16,7 @@ */ package org.apache.solr.cloud.api.collections; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.net.URI; @@ -67,6 +68,7 @@ QuickPatchThreadsFilter.class, BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) +@ThreadLeakLingering(linger = 5000) public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCase { public static final String SOLR_XML = "\n" + "\n" + @@ -110,7 +112,7 @@ public class TestHdfsCloudBackupRestore extends AbstractCloudBackupRestoreTestCa private static FileSystem fs; @BeforeClass - public static void setupClass() throws Exception { + public static void beforeTestHdfsCloudBackupRestore() throws Exception { dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); hdfsUri = HdfsTestUtil.getURI(dfsCluster); try { @@ -149,7 +151,7 @@ public static void setupClass() throws Exception { } @AfterClass - public static void teardownClass() throws Exception { + public static void afterTestHdfsCloudBackupRestore() throws Exception { IOUtils.closeQuietly(fs); fs = null; try { diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java index f70335d14d3..15f58b2abf6 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsNNFailoverTest.java @@ -16,6 +16,7 @@ */ package org.apache.solr.cloud.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -36,6 +37,7 @@ QuickPatchThreadsFilter.class, BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) +@ThreadLeakLingering(linger = 5000) public class HdfsNNFailoverTest extends BasicDistributedZkTest { private static final String COLLECTION = "collection"; private static MiniDFSCluster dfsCluster; diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java index 8d8833f027e..97f8d20c467 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsRecoverLeaseTest.java @@ -16,11 +16,12 @@ */ package org.apache.solr.cloud.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.net.URI; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -29,6 +30,7 @@ import org.apache.lucene.util.QuickPatchThreadsFilter; import org.apache.solr.SolrIgnoredThreadsFilter; import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.util.ObjectReleaseTracker; import org.apache.solr.util.BadHdfsThreadsFilter; import org.apache.solr.util.FSHDFSUtils; import org.apache.solr.util.FSHDFSUtils.CallerInfo; @@ -38,72 +40,76 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -@ThreadLeakFilters(defaultFilters = true, filters = { - SolrIgnoredThreadsFilter.class, - QuickPatchThreadsFilter.class, - BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) -}) +@ThreadLeakFilters( + defaultFilters = true, + filters = { + SolrIgnoredThreadsFilter.class, + QuickPatchThreadsFilter.class, + BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) + }) +@ThreadLeakLingering(linger = 10000) public class HdfsRecoverLeaseTest extends SolrTestCaseJ4 { - + private static MiniDFSCluster dfsCluster; @BeforeClass - public static void beforeClass() throws Exception { + public static void beforeHdfsRecoverLeaseTest() throws Exception { dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false); } @AfterClass - public static void afterClass() throws Exception { + public static void afterHdfsRecoverLeaseTest() throws Exception { try { HdfsTestUtil.teardownClass(dfsCluster); } finally { dfsCluster = null; } } - + @Before public void setUp() throws Exception { super.setUp(); } - + @After public void tearDown() throws Exception { super.tearDown(); } - + @Test public void testBasic() throws IOException { long startRecoverLeaseSuccessCount = FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get(); - + URI uri = dfsCluster.getURI(); Path path = new Path(uri); Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); FileSystem fs1 = FileSystem.get(path.toUri(), conf); Path testFile = new Path(uri.toString() + "/testfile"); FSDataOutputStream out = fs1.create(testFile); - + out.write(5); out.hflush(); out.close(); - FSHDFSUtils.recoverFileLease(fs1, testFile, conf, new CallerInfo() { - - @Override - public boolean isCallerClosed() { - return false; - } - }); + FSHDFSUtils.recoverFileLease( + fs1, + testFile, + conf, + new CallerInfo() { + + @Override + public boolean isCallerClosed() { + return false; + } + }); assertEquals(0, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount); - + fs1.close(); - FileSystem fs2 = FileSystem.get(path.toUri(), conf); Path testFile2 = new Path(uri.toString() + "/testfile2"); FSDataOutputStream out2 = fs2.create(testFile2); - + if (random().nextBoolean()) { int cnt = random().nextInt(100); for (int i = 0; i < cnt; i++) { @@ -112,38 +118,41 @@ public boolean isCallerClosed() { out2.hflush(); } - // closing the fs will close the file it seems // fs2.close(); - + FileSystem fs3 = FileSystem.get(path.toUri(), conf); - FSHDFSUtils.recoverFileLease(fs3, testFile2, conf, new CallerInfo() { - - @Override - public boolean isCallerClosed() { - return false; - } - }); + FSHDFSUtils.recoverFileLease( + fs3, + testFile2, + conf, + new CallerInfo() { + + @Override + public boolean isCallerClosed() { + return false; + } + }); assertEquals(1, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount); - + fs3.close(); fs2.close(); } - + @Test public void testMultiThreaded() throws Exception { long startRecoverLeaseSuccessCount = FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get(); - + final URI uri = dfsCluster.getURI(); final Path path = new Path(uri); final Configuration conf = HdfsTestUtil.getClientConfiguration(dfsCluster); - + // n threads create files class WriterThread extends Thread { private FileSystem fs; private int id; - + public WriterThread(int id) { this.id = id; try { @@ -152,14 +161,14 @@ public WriterThread(int id) { throw new RuntimeException(e); } } - + @Override public void run() { Path testFile = new Path(uri.toString() + "/file-" + id); FSDataOutputStream out; try { out = fs.create(testFile); - + if (random().nextBoolean()) { int cnt = random().nextInt(100); for (int i = 0; i < cnt; i++) { @@ -171,20 +180,20 @@ public void run() { throw new RuntimeException(); } } - + public void close() throws IOException { fs.close(); } - + public int getFileId() { return id; } } - + class RecoverThread extends Thread { private FileSystem fs; private int id; - + public RecoverThread(int id) { this.id = id; try { @@ -193,60 +202,63 @@ public RecoverThread(int id) { throw new RuntimeException(e); } } - + @Override public void run() { Path testFile = new Path(uri.toString() + "/file-" + id); try { - FSHDFSUtils.recoverFileLease(fs, testFile, conf, new CallerInfo() { - - @Override - public boolean isCallerClosed() { - return false; - } - }); + FSHDFSUtils.recoverFileLease( + fs, + testFile, + conf, + new CallerInfo() { + + @Override + public boolean isCallerClosed() { + return false; + } + }); } catch (IOException e) { throw new RuntimeException(e); } } - + public void close() throws IOException { fs.close(); } } - + Set writerThreads = new HashSet(); Set recoverThreads = new HashSet(); - + int threadCount = 3; for (int i = 0; i < threadCount; i++) { WriterThread wt = new WriterThread(i); writerThreads.add(wt); wt.run(); } - + for (WriterThread wt : writerThreads) { wt.join(); } - - Thread.sleep(2000); - + + Thread.sleep(1000); + for (WriterThread wt : writerThreads) { RecoverThread rt = new RecoverThread(wt.getFileId()); recoverThreads.add(rt); rt.run(); } - + for (WriterThread wt : writerThreads) { wt.close(); } - + for (RecoverThread rt : recoverThreads) { rt.close(); } - assertEquals(threadCount, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount); - + assertEquals( + threadCount, FSHDFSUtils.RECOVER_LEASE_SUCCESS_COUNT.get() - startRecoverLeaseSuccessCount); } - } diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java index 3872167d5ca..dc01f0aede8 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java @@ -16,6 +16,8 @@ */ package org.apache.solr.cloud.hdfs; +import io.netty.channel.EventLoopGroup; +import io.netty.util.concurrent.GlobalEventExecutor; import java.io.File; import java.lang.invoke.MethodHandles; import java.net.URI; @@ -30,6 +32,7 @@ import java.util.TimerTask; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinWorkerThread; +import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import org.apache.commons.lang3.time.FastDateFormat; @@ -61,6 +64,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; import static org.apache.lucene.util.LuceneTestCase.random; public class HdfsTestUtil { @@ -80,11 +84,11 @@ public class HdfsTestUtil { private static FileSystem badTlogOutStreamFs; public static MiniDFSCluster setupClass(String dir) throws Exception { - return setupClass(dir, true, true); + return setupClass(dir, TEST_NIGHTLY, true); } public static MiniDFSCluster setupClass(String dir, boolean haTesting) throws Exception { - return setupClass(dir, haTesting, true); + return setupClass(dir, TEST_NIGHTLY, haTesting); } public static void checkAssumptions() { @@ -278,6 +282,10 @@ private static Configuration getBasicConfiguration(Configuration conf) { conf.setBoolean("dfs.permissions.enabled", false); conf.set("hadoop.security.authentication", "simple"); conf.setBoolean("fs.hdfs.impl.disable.cache", true); + conf.setInt("solr.hdfs.lease.recovery.timeout", 300); + conf.setInt("solr.hdfs.lease.recovery.first.pause", 10); + conf.setInt("solr.hdfs.lease.recovery.pause", 10); + return conf; } @@ -331,6 +339,7 @@ public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception { } } } finally { + GlobalEventExecutor.INSTANCE.shutdownGracefully(0, 0, TimeUnit.SECONDS); System.clearProperty("test.build.data"); System.clearProperty("test.cache.data"); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java index 2e04ee04dc0..b12e179648d 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsThreadLeakTest.java @@ -16,6 +16,8 @@ */ package org.apache.solr.cloud.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; @@ -31,13 +33,12 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - @ThreadLeakFilters(defaultFilters = true, filters = { SolrIgnoredThreadsFilter.class, QuickPatchThreadsFilter.class, BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) +@ThreadLeakLingering(linger = 5000) public class HdfsThreadLeakTest extends SolrTestCaseJ4 { private static MiniDFSCluster dfsCluster; @@ -53,8 +54,10 @@ public static void afterClass() throws Exception { } finally { dfsCluster = null; } + + interruptThreadsOnTearDown(); // not closed properly } - + @Test public void testBasic() throws IOException { String uri = HdfsTestUtil.getURI(dfsCluster); diff --git a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java index 5c6bce79028..eb8520ac4bc 100644 --- a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java @@ -24,7 +24,6 @@ import java.util.Map; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.solr.SolrTestCaseJ4; @@ -38,63 +37,62 @@ public class CachingDirectoryFactoryTest extends SolrTestCaseJ4 { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - private Map dirs = new HashMap<>(); + private final Map dirs = new HashMap<>(); private volatile boolean stop = false; - + private static class Tracker { String path; AtomicInteger refCnt = new AtomicInteger(0); Directory dir; } - + @Test public void stressTest() throws Exception { doStressTest(new RAMDirectoryFactory()); doStressTest(new ByteBuffersDirectoryFactory()); } - + private void doStressTest(final CachingDirectoryFactory df) throws Exception { List threads = new ArrayList<>(); - int threadCount = 11; + int threadCount = TEST_NIGHTLY ? 11 : 3; for (int i = 0; i < threadCount; i++) { Thread getDirThread = new GetDirThread(df); threads.add(getDirThread); getDirThread.start(); } - + for (int i = 0; i < 4; i++) { Thread releaseDirThread = new ReleaseDirThread(df); threads.add(releaseDirThread); releaseDirThread.start(); } - + for (int i = 0; i < 2; i++) { Thread incRefThread = new IncRefThread(df); threads.add(incRefThread); incRefThread.start(); } - Thread.sleep(TEST_NIGHTLY ? 30000 : 8000); - - Thread closeThread = new Thread() { - public void run() { - try { - df.close(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }; + Thread.sleep(TEST_NIGHTLY ? 30000 : 4000); + + Thread closeThread = + new Thread() { + public void run() { + try { + df.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; closeThread.start(); - - + stop = true; - + for (Thread thread : threads) { thread.join(); } - - + // do any remaining releases synchronized (dirs) { int sz = dirs.size(); @@ -107,38 +105,35 @@ public void run() { } } } - } - - closeThread.join(); + closeThread.join(); } - + private class ReleaseDirThread extends Thread { Random random; private CachingDirectoryFactory df; - + public ReleaseDirThread(CachingDirectoryFactory df) { this.df = df; } - + @Override public void run() { random = random(); while (!stop) { try { - Thread.sleep(random.nextInt(50) + 1); + Thread.sleep(random.nextInt(TEST_NIGHTLY ? 50 : 10) + 1); } catch (InterruptedException e1) { throw new RuntimeException(e1); } - + synchronized (dirs) { int sz = dirs.size(); List dirsList = new ArrayList<>(); dirsList.addAll(dirs.values()); if (sz > 0) { - Tracker tracker = dirsList.get(Math.min(dirsList.size() - 1, - random.nextInt(sz + 1))); + Tracker tracker = dirsList.get(Math.min(dirsList.size() - 1, random.nextInt(sz + 1))); try { if (tracker.refCnt.get() > 0) { if (random.nextInt(10) > 7) { @@ -157,19 +152,18 @@ public void run() { } } } - } } } - + private class GetDirThread extends Thread { Random random; private CachingDirectoryFactory df; - + public GetDirThread(CachingDirectoryFactory df) { this.df = df; } - + @Override public void run() { random = random(); @@ -187,7 +181,8 @@ public void run() { if (random.nextBoolean()) { path = "path" + random.nextInt(20) + "/" + random.nextInt(20); } else { - path = "path" + random.nextInt(20) + "/" + random.nextInt(20) + "/" + random.nextInt(20); + path = + "path" + random.nextInt(20) + "/" + random.nextInt(20) + "/" + random.nextInt(20); } } synchronized (dirs) { @@ -202,7 +197,7 @@ public void run() { } tracker.refCnt.incrementAndGet(); } - + } catch (AlreadyClosedException e) { log.warn("Cannot get dir, factory is already closed"); } catch (IOException e) { @@ -211,29 +206,29 @@ public void run() { } } } - + private class IncRefThread extends Thread { Random random; private CachingDirectoryFactory df; - + public IncRefThread(CachingDirectoryFactory df) { this.df = df; } - + @Override public void run() { random = random(); while (!stop) { try { - Thread.sleep(random.nextInt(300) + 1); + Thread.sleep(random.nextInt(TEST_NIGHTLY ? 300 : 50) + 1); } catch (InterruptedException e1) { throw new RuntimeException(e1); } - + String path = "path" + random.nextInt(20); synchronized (dirs) { Tracker tracker = dirs.get(path); - + if (tracker != null && tracker.refCnt.get() > 0) { try { df.incRef(tracker.dir); @@ -241,13 +236,11 @@ public void run() { log.warn("", e); continue; } - + tracker.refCnt.incrementAndGet(); } } - } } } - } diff --git a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java index 4253176f3b7..363e162aa3d 100644 --- a/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/core/HdfsDirectoryFactoryTest.java @@ -16,6 +16,10 @@ */ package org.apache.solr.core; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.google.common.base.Strings; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Path; @@ -26,9 +30,6 @@ import java.util.Locale; import java.util.Map; import java.util.Random; - -import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import com.google.common.base.Strings; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.lucene.store.Directory; @@ -52,23 +53,24 @@ import org.junit.BeforeClass; import org.junit.Test; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - -@ThreadLeakFilters(defaultFilters = true, filters = { - SolrIgnoredThreadsFilter.class, - QuickPatchThreadsFilter.class, - BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) -}) +@ThreadLeakFilters( + defaultFilters = true, + filters = { + SolrIgnoredThreadsFilter.class, + QuickPatchThreadsFilter.class, + BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) + }) +@ThreadLeakLingering(linger = 5000) public class HdfsDirectoryFactoryTest extends SolrTestCaseJ4 { private static MiniDFSCluster dfsCluster; - + @BeforeClass - public static void setupClass() throws Exception { + public static void beforeHdfsDirectoryFactoryTest() throws Exception { dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath(), false); } - + @AfterClass - public static void teardownClass() throws Exception { + public static void afterHdfsDirectoryFactoryTest() throws Exception { try { HdfsTestUtil.teardownClass(dfsCluster); } finally { @@ -84,9 +86,10 @@ public static void teardownClass() throws Exception { @Test @SuppressWarnings({"try"}) public void testInitArgsOrSysPropConfig() throws Exception { - try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { + try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { // test sys prop config - System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); + System.setProperty( + HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); hdfsFactory.init(new NamedList<>()); String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); @@ -103,7 +106,8 @@ public void testInitArgsOrSysPropConfig() throws Exception { assertTrue(dataHome.endsWith("/solr2/mock/data")); // test sys prop and init args config - init args wins - System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); + System.setProperty( + HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); hdfsFactory.init(nl); dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); @@ -116,8 +120,11 @@ public void testInitArgsOrSysPropConfig() throws Exception { System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString()); - try (Directory dir = hdfsFactory - .create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT)) { + try (Directory dir = + hdfsFactory.create( + HdfsTestUtil.getURI(dfsCluster) + "/solr", + NoLockFactory.INSTANCE, + DirContext.DEFAULT)) { assertEquals(confDir.toString(), hdfsFactory.getConfDir()); } @@ -130,20 +137,23 @@ public void testInitArgsOrSysPropConfig() throws Exception { hdfsFactory.init(nl); - assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); + assertEquals( + 4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); nl = new NamedList<>(); hdfsFactory.init(nl); System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true"); - assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); + assertEquals( + 3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB); System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED); - assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); + assertEquals( + 0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertFalse(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); } } @@ -151,26 +161,31 @@ public void testInitArgsOrSysPropConfig() throws Exception { @Test public void testCleanupOldIndexDirectories() throws Exception { try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { - System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); + System.setProperty( + HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); hdfsFactory.init(new NamedList<>()); String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); assertTrue(dataHome.endsWith("/solr1/mock/data")); System.clearProperty(HdfsDirectoryFactory.HDFS_HOME); - try(FileSystem hdfs = FileSystem.get(HdfsTestUtil.getClientConfiguration(dfsCluster))) { + try (FileSystem hdfs = FileSystem.get(HdfsTestUtil.getClientConfiguration(dfsCluster))) { org.apache.hadoop.fs.Path dataHomePath = new org.apache.hadoop.fs.Path(dataHome); - org.apache.hadoop.fs.Path currentIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index"); - assertFalse(checkHdfsDirectory(hdfs,currentIndexDirPath)); + org.apache.hadoop.fs.Path currentIndexDirPath = + new org.apache.hadoop.fs.Path(dataHomePath, "index"); + assertFalse(checkHdfsDirectory(hdfs, currentIndexDirPath)); hdfs.mkdirs(currentIndexDirPath); assertTrue(checkHdfsDirectory(hdfs, currentIndexDirPath)); - String timestamp1 = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date()); - org.apache.hadoop.fs.Path oldIndexDirPath = new org.apache.hadoop.fs.Path(dataHomePath, "index." + timestamp1); - assertFalse(checkHdfsDirectory(hdfs,oldIndexDirPath)); + String timestamp1 = + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date()); + org.apache.hadoop.fs.Path oldIndexDirPath = + new org.apache.hadoop.fs.Path(dataHomePath, "index." + timestamp1); + assertFalse(checkHdfsDirectory(hdfs, oldIndexDirPath)); hdfs.mkdirs(oldIndexDirPath); assertTrue(checkHdfsDirectory(hdfs, oldIndexDirPath)); - hdfsFactory.cleanupOldIndexDirectories(dataHomePath.toString(), currentIndexDirPath.toString(), false); + hdfsFactory.cleanupOldIndexDirectories( + dataHomePath.toString(), currentIndexDirPath.toString(), false); assertTrue(checkHdfsDirectory(hdfs, currentIndexDirPath)); assertFalse(checkHdfsDirectory(hdfs, oldIndexDirPath)); @@ -178,18 +193,19 @@ public void testCleanupOldIndexDirectories() throws Exception { } } - private boolean checkHdfsDirectory(FileSystem hdfs, org.apache.hadoop.fs.Path path) throws IOException { + private boolean checkHdfsDirectory(FileSystem hdfs, org.apache.hadoop.fs.Path path) + throws IOException { try { return hdfs.getFileStatus(path).isDirectory(); } catch (FileNotFoundException e) { return false; } } - + @Test public void testLocalityReporter() throws Exception { Random r = random(); - try(HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) { + try (HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) { SolrMetricManager metricManager = new SolrMetricManager(); String registry = TestUtil.randomSimpleString(r, 2, 10); String scope = TestUtil.randomSimpleString(r, 2, 10); @@ -202,13 +218,25 @@ public void testLocalityReporter() throws Exception { factory.initializeMetrics(new SolrMetricsContext(metricManager, registry, "foo"), scope); // get the metrics map for the locality bean - MetricsMap metrics = (MetricsMap) ((SolrMetricManager.GaugeWrapper) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge(); + MetricsMap metrics = + (MetricsMap) + ((SolrMetricManager.GaugeWrapper) + metricManager + .registry(registry) + .getMetrics() + .get("OTHER." + scope + ".hdfsLocality")) + .getGauge(); // We haven't done anything, so there should be no data Map statistics = metrics.getValue(); - assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0L, + assertEquals( + "Saw bytes that were not written: " + + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), + 0L, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); assertEquals( - "Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0, + "Counted bytes as local when none written: " + + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), + 0, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO)); // create a directory and a file @@ -223,13 +251,21 @@ public void testLocalityReporter() throws Exception { // no locality because hostname not set factory.setHost("bogus"); statistics = metrics.getValue(); - assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), - long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); - assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), - 1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL)); assertEquals( - "Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL), - 0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL)); + "Wrong number of total bytes counted: " + + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), + long_bytes, + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); + assertEquals( + "Wrong number of total blocks counted: " + + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), + 1, + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL)); + assertEquals( + "Counted block as local when bad hostname set: " + + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL), + 0, + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL)); // set hostname and check again factory.setHost("127.0.0.1"); @@ -237,21 +273,23 @@ public void testLocalityReporter() throws Exception { assertEquals( "Did not count block as local after setting hostname: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL), - long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL)); + long_bytes, + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL)); } } } @Test public void testIsAbsolute() throws Exception { - try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { - String relativePath = Strings.repeat( - RandomStrings.randomAsciiAlphanumOfLength(random(), random().nextInt(10) + 1) + '/', - random().nextInt(5) + 1); + try (HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { + String relativePath = + Strings.repeat( + RandomStrings.randomAsciiAlphanumOfLength(random(), random().nextInt(10) + 1) + '/', + random().nextInt(5) + 1); assertFalse(hdfsFactory.isAbsolute(relativePath)); assertFalse(hdfsFactory.isAbsolute("/" + relativePath)); - for(String rootPrefix : Arrays.asList("file://", "hdfs://", "s3a://", "foo://")) { + for (String rootPrefix : Arrays.asList("file://", "hdfs://", "s3a://", "foo://")) { assertTrue(hdfsFactory.isAbsolute(rootPrefix + relativePath)); } } diff --git a/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java b/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java index 3d6ab69b297..ddb71724aab 100644 --- a/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java +++ b/solr/core/src/test/org/apache/solr/core/backup/repository/HdfsBackupRepositoryIntegrationTest.java @@ -17,6 +17,7 @@ package org.apache.solr.core.backup.repository; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -41,13 +42,14 @@ @ThreadLeakFilters(defaultFilters = true, filters = { BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) +@ThreadLeakLingering(linger = 5000) public class HdfsBackupRepositoryIntegrationTest extends AbstractBackupRepositoryTest { private static MiniDFSCluster dfsCluster; private static String hdfsUri; private static FileSystem fs; @BeforeClass - public static void setupClass() throws Exception { + public static void beforeHdfsBackupRepositoryIntegrationTest() throws Exception { dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); hdfsUri = HdfsTestUtil.getURI(dfsCluster); try { @@ -78,7 +80,7 @@ public static void setupClass() throws Exception { } @AfterClass - public static void teardownClass() throws Exception { + public static void afterHdfsBackupRepositoryIntegrationTest() throws Exception { IOUtils.closeQuietly(fs); fs = null; try { diff --git a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java index e4168e3a9bd..5a0aae7867e 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java +++ b/solr/core/src/test/org/apache/solr/handler/TestHdfsBackupRestoreCore.java @@ -17,6 +17,12 @@ package org.apache.solr.handler; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import java.io.IOException; +import java.lang.invoke.MethodHandles; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.Map; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -50,53 +56,49 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.lang.invoke.MethodHandles; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.HashMap; -import java.util.Map; - -@ThreadLeakFilters(defaultFilters = true, filters = { - SolrIgnoredThreadsFilter.class, - QuickPatchThreadsFilter.class, - BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) -}) -@SolrTestCaseJ4.SuppressSSL // Currently unknown why SSL does not work with this test +@ThreadLeakFilters( + defaultFilters = true, + filters = { + SolrIgnoredThreadsFilter.class, + QuickPatchThreadsFilter.class, + BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) + }) +@SolrTestCaseJ4.SuppressSSL // Currently unknown why SSL does not work with this test public class TestHdfsBackupRestoreCore extends SolrCloudTestCase { - public static final String HDFS_REPO_SOLR_XML = "\n" + - "\n" + - " ${shareSchema:false}\n" + - " ${configSetBaseDir:configsets}\n" + - " ${coreRootDirectory:.}\n" + - "\n" + - " \n" + - " ${urlScheme:}\n" + - " ${socketTimeout:90000}\n" + - " ${connTimeout:15000}\n" + - " \n" + - "\n" + - " \n" + - " 127.0.0.1\n" + - " ${hostPort:8983}\n" + - " ${hostContext:solr}\n" + - " ${solr.zkclienttimeout:30000}\n" + - " ${genericCoreNodeNames:true}\n" + - " 10000\n" + - " ${distribUpdateConnTimeout:45000}\n" + - " ${distribUpdateSoTimeout:340000}\n" + - " \n" + - " \n" + - " \n" + - " \n" + - " ${solr.hdfs.default.backup.path}\n" + - " ${solr.hdfs.home:}\n" + - " ${solr.hdfs.confdir:}\n" + - " ${solr.hdfs.permissions.umask-mode:000}\n" + - " \n" + - " \n" + - " \n" + - "\n"; + public static final String HDFS_REPO_SOLR_XML = + "\n" + + "\n" + + " ${shareSchema:false}\n" + + " ${configSetBaseDir:configsets}\n" + + " ${coreRootDirectory:.}\n" + + "\n" + + " \n" + + " ${urlScheme:}\n" + + " ${socketTimeout:90000}\n" + + " ${connTimeout:15000}\n" + + " \n" + + "\n" + + " \n" + + " 127.0.0.1\n" + + " ${hostPort:8983}\n" + + " ${hostContext:solr}\n" + + " ${solr.zkclienttimeout:30000}\n" + + " ${genericCoreNodeNames:true}\n" + + " 10000\n" + + " ${distribUpdateConnTimeout:45000}\n" + + " ${distribUpdateSoTimeout:340000}\n" + + " \n" + + " \n" + + " \n" + + " \n" + + " ${solr.hdfs.default.backup.path}\n" + + " ${solr.hdfs.home:}\n" + + " ${solr.hdfs.confdir:}\n" + + " ${solr.hdfs.permissions.umask-mode:000}\n" + + " \n" + + " \n" + + " \n" + + "\n"; private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private static MiniDFSCluster dfsCluster; @@ -135,11 +137,12 @@ public static void setupClass() throws Exception { System.setProperty("solr.hdfs.home", hdfsUri + "/solr"); useFactory("solr.StandardDirectoryFactory"); - configureCluster(1)// nodes - .addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) - .withSolrXml(HDFS_REPO_SOLR_XML) - .configure(); - + configureCluster(1) // nodes + .addConfig( + "conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf")) + .withSolrXml(HDFS_REPO_SOLR_XML) + .configure(); + docsSeed = random().nextLong(); } @@ -160,6 +163,7 @@ public static void teardownClass() throws Exception { System.clearProperty("test.cache.data"); } } + interruptThreadsOnTearDown(); // not closed properly } @Test @@ -172,7 +176,8 @@ public void test() throws Exception { int nDocs = BackupRestoreUtils.indexDocs(solrClient, collectionName, docsSeed); - DocCollection collectionState = solrClient.getZkStateReader().getClusterState().getCollection(collectionName); + DocCollection collectionState = + solrClient.getZkStateReader().getClusterState().getCollection(collectionName); assertEquals(1, collectionState.getActiveSlices().size()); Slice shard = collectionState.getActiveSlices().iterator().next(); assertEquals(1, shard.getReplicas().size()); @@ -190,39 +195,42 @@ public void test() throws Exception { // Create a backup. if (testViaReplicationHandler) { log.info("Running Backup via replication handler"); - BackupRestoreUtils.runReplicationHandlerCommand(baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName); - final BackupStatusChecker backupStatus - = new BackupStatusChecker(leaderClient, "/" + coreName + "/replication"); + BackupRestoreUtils.runReplicationHandlerCommand( + baseUrl, coreName, ReplicationHandler.CMD_BACKUP, "hdfs", backupName); + final BackupStatusChecker backupStatus = + new BackupStatusChecker(leaderClient, "/" + coreName + "/replication"); backupStatus.waitForBackupSuccess(backupName, 30); } else { log.info("Running Backup via core admin api"); - Map params = new HashMap<>(); + Map params = new HashMap<>(); params.put("name", backupName); params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs"); params.put(CoreAdminParams.SHARD_BACKUP_ID, shardBackupId); - BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params); + BackupRestoreUtils.runCoreAdminCommand( + replicaBaseUrl, coreName, CoreAdminAction.BACKUPCORE.toString(), params); } int numRestoreTests = nDocs > 0 ? TestUtil.nextInt(random(), 1, 5) : 1; - for (int attempts=0; attempts 0) { - //Delete a few docs + // Delete a few docs int numDeletes = TestUtil.nextInt(random(), 1, nDocs); - for(int i=0; i params = new HashMap<>(); + Map params = new HashMap<>(); params.put("name", "snapshot." + backupName); params.put(CoreAdminParams.BACKUP_REPOSITORY, "hdfs"); params.put(CoreAdminParams.SHARD_BACKUP_ID, shardBackupId); - BackupRestoreUtils.runCoreAdminCommand(replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params); + BackupRestoreUtils.runCoreAdminCommand( + replicaBaseUrl, coreName, CoreAdminAction.RESTORECORE.toString(), params); } - //See if restore was successful by checking if all the docs are present again + // See if restore was successful by checking if all the docs are present again BackupRestoreUtils.verifyDocs(nDocs, leaderClient, coreName); // Verify the permissions on the backup folder. - final String backupPath = (testViaReplicationHandler) ? - "/backup/snapshot."+ backupName : - "/backup/shard_backup_metadata"; - final FsAction expectedPerms = (testViaReplicationHandler) ? FsAction.ALL : FsAction.READ_EXECUTE; + final String backupPath = + (testViaReplicationHandler) + ? "/backup/snapshot." + backupName + : "/backup/shard_backup_metadata"; + final FsAction expectedPerms = + (testViaReplicationHandler) ? FsAction.ALL : FsAction.READ_EXECUTE; FileStatus status = fs.getFileStatus(new org.apache.hadoop.fs.Path(backupPath)); FsPermission perm = status.getPermission(); diff --git a/solr/core/src/test/org/apache/solr/handler/admin/DaemonStreamApiTest.java b/solr/core/src/test/org/apache/solr/handler/admin/DaemonStreamApiTest.java index 34227aea19d..c819ba25789 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/DaemonStreamApiTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/DaemonStreamApiTest.java @@ -17,6 +17,7 @@ package org.apache.solr.handler.admin; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; @@ -32,14 +33,17 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.cloud.MiniSolrCloudCluster; +import org.apache.solr.cloud.hdfs.HdfsTestUtil; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.TimeSource; import org.apache.solr.handler.TestSQLHandler; import org.apache.solr.util.TimeOut; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; +@ThreadLeakLingering(linger = 5000) public class DaemonStreamApiTest extends SolrTestCaseJ4 { private MiniSolrCloudCluster cluster; @@ -61,6 +65,11 @@ public class DaemonStreamApiTest extends SolrTestCaseJ4 { private String url; + @AfterClass + public static void afterDaemonStreamApiTest() throws Exception { + interruptThreadsOnTearDown(); + } + @Override @Before diff --git a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java index b78e45bb211..65b6a090d9d 100644 --- a/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java +++ b/solr/core/src/test/org/apache/solr/metrics/reporters/SolrSlf4jReporterTest.java @@ -94,7 +94,7 @@ public void testReporter() throws Exception { if (history.stream().filter(d -> "foobar".equals(d.getFirstValue("logger"))).count() == 0) { fail("No 'foobar' logs in: " + history.toString()); } - if (history.stream().filter(d -> "x:collection1".equals(d.getFirstValue("core"))).count() == 0) { + if (history.stream().filter(d -> "collection1".equals(d.getFirstValue("core"))).count() == 0) { fail("No 'solr.core' or MDC context in logs: " + history.toString()); } } diff --git a/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java b/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java index 8eade38a943..02e6d20861b 100644 --- a/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java +++ b/solr/core/src/test/org/apache/solr/schema/SpatialRPTFieldTypeTest.java @@ -28,13 +28,13 @@ import org.locationtech.spatial4j.shape.Shape; public class SpatialRPTFieldTypeTest extends AbstractBadConfigTestBase { - + private static File tmpSolrHome; private static File tmpConfDir; - + private static final String collection = "collection1"; private static final String confDir = collection + "/conf"; - + @Before private void initManagedSchemaCore() throws Exception { tmpSolrHome = createTempDir().toFile(); @@ -48,14 +48,14 @@ private void initManagedSchemaCore() throws Exception { FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-minimal.xml"), tmpConfDir); FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema_codec.xml"), tmpConfDir); FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-bm25.xml"), tmpConfDir); - + // initCore will trigger an upgrade to managed schema, since the solrconfig has // System.setProperty("managed.schema.mutable", "false"); System.setProperty("enable.update.log", "false"); initCore("solrconfig-managed-schema.xml", "schema-minimal.xml", tmpSolrHome.getPath()); } - + @After private void afterClass() throws Exception { deleteCore(); @@ -68,47 +68,47 @@ private void afterClass() throws Exception { static final String DISTANCE_DEGREES = "1.3520328"; static final String DISTANCE_KILOMETERS = "150.33939"; static final String DISTANCE_MILES = "93.416565"; - + public void testDistanceUnitsDegrees() throws Exception { setupRPTField("degrees", "true"); - + assertU(adoc("str", "X", "geo", INDEXED_COORDINATES)); assertU(commit()); String q; - + q = "geo:{!geofilt score=distance filter=false sfield=geo pt="+QUERY_COORDINATES+" d=180}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_DEGREES+"']"); - + q = "geo:{!geofilt score=degrees filter=false sfield=geo pt="+QUERY_COORDINATES+" d=180}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_DEGREES+"']"); - + q = "geo:{!geofilt score=kilometers filter=false sfield=geo pt="+QUERY_COORDINATES+" d=180}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_KILOMETERS+"']"); - + q = "geo:{!geofilt score=miles filter=false sfield=geo pt="+QUERY_COORDINATES+" d=180}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_MILES+"']"); } - + public void testDistanceUnitsKilometers() throws Exception { setupRPTField("kilometers", "true"); - + assertU(adoc("str", "X", "geo", INDEXED_COORDINATES)); assertU(commit()); String q; - + q = "geo:{!geofilt score=distance filter=false sfield=geo pt="+QUERY_COORDINATES+" d=1000}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_KILOMETERS+"']"); - + q = "geo:{!geofilt score=degrees filter=false sfield=geo pt="+QUERY_COORDINATES+" d=1000}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_DEGREES+"']"); - + q = "geo:{!geofilt score=kilometers filter=false sfield=geo pt="+QUERY_COORDINATES+" d=1000}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_KILOMETERS+"']"); - + q = "geo:{!geofilt score=miles filter=false sfield=geo pt="+QUERY_COORDINATES+" d=1000}"; assertQ(req("q", q, "fl", "*,score"), "//result/doc/float[@name='score'][.='"+DISTANCE_MILES+"']"); } - + public void testJunkValuesForDistanceUnits() throws Exception { Exception ex = expectThrows(Exception.class, () -> setupRPTField("rose", "true")); assertTrue(ex.getMessage().startsWith("Must specify distanceUnits as one of")); @@ -116,17 +116,17 @@ public void testJunkValuesForDistanceUnits() throws Exception { public void testMaxDistErrConversion() throws Exception { deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); - + String fieldName = "new_text_field"; assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); - + IndexSchema oldSchema = h.getCore().getLatestSchema(); - + SpatialRecursivePrefixTreeFieldType rptFieldType = new SpatialRecursivePrefixTreeFieldType(); Map rptMap = new HashMap(); @@ -232,15 +232,15 @@ public void testShapeToFromStringGeoJSON() throws Exception { private void setupRPTField(String distanceUnits, String geo, String format, FieldType fieldType) throws Exception { deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); String fieldName = "new_text_field"; assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); - + IndexSchema oldSchema = h.getCore().getLatestSchema(); if (fieldType == null) { diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java index 652063b9988..54c679d096a 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java +++ b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchema.java @@ -65,12 +65,12 @@ public void test() throws Exception { NamedList collectionStatus = (NamedList)status.getVal(0); String collectionSchema = (String)collectionStatus.get(CoreAdminParams.SCHEMA); // Make sure the upgrade to managed schema happened - assertEquals("Schema resource name differs from expected name", "managed-schema", collectionSchema); + assertEquals("Schema resource name differs from expected name", "managed-schema.xml", collectionSchema); SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), 30000); try { // Make sure "DO NOT EDIT" is in the content of the managed schema - String fileContent = getFileContentFromZooKeeper(zkClient, "/solr/configs/conf1/managed-schema"); + String fileContent = getFileContentFromZooKeeper(zkClient, "/solr/configs/conf1/managed-schema.xml"); assertTrue("Managed schema is missing", fileContent.contains("DO NOT EDIT")); // Make sure the original non-managed schema is no longer in ZooKeeper diff --git a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java index d209f9d9edb..42a5a79a321 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java +++ b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java @@ -48,7 +48,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { private static final String collection = "collection1"; private static final String confDir = collection + "/conf"; - + @Before private void initManagedSchemaCore() throws Exception { tmpSolrHome = createTempDir().toFile(); @@ -64,6 +64,7 @@ private void initManagedSchemaCore() throws Exception { FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema_codec.xml"), tmpConfDir); FileUtils.copyFileToDirectory(new File(testHomeConfDir, "schema-bm25.xml"), tmpConfDir); + // initCore will trigger an upgrade to managed schema, since the solrconfig has // System.setProperty("managed.schema.mutable", "false"); @@ -77,30 +78,30 @@ private void afterClass() throws Exception { System.clearProperty("managed.schema.mutable"); System.clearProperty("enable.update.log"); } - + public void testUpgrade() throws Exception { - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); assertTrue(managedSchemaFile.exists()); String managedSchema = FileUtils.readFileToString(managedSchemaFile, "UTF-8"); assertTrue(managedSchema.contains("DO NOT EDIT")); File upgradedOriginalSchemaFile = new File(tmpConfDir, "schema-minimal.xml.bak"); assertTrue(upgradedOriginalSchemaFile.exists()); - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); } - + public void testUpgradeThenRestart() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); deleteCore(); File nonManagedSchemaFile = new File(tmpConfDir, "schema-minimal.xml"); assertFalse(nonManagedSchemaFile.exists()); initCore("solrconfig-managed-schema.xml", "schema-minimal.xml", tmpSolrHome.getPath()); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); assertTrue(managedSchemaFile.exists()); String managedSchema = FileUtils.readFileToString(managedSchemaFile, "UTF-8"); assertTrue(managedSchema.contains("DO NOT EDIT")); File upgradedOriginalSchemaFile = new File(tmpConfDir, "schema-minimal.xml.bak"); assertTrue(upgradedOriginalSchemaFile.exists()); - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); } public void testUpgradeThenRestartNonManaged() throws Exception { @@ -112,13 +113,13 @@ public void testUpgradeThenRestartNonManaged() throws Exception { } public void testUpgradeThenRestartNonManagedAfterPuttingBackNonManagedSchema() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); deleteCore(); File nonManagedSchemaFile = new File(tmpConfDir, "schema-minimal.xml"); assertFalse(nonManagedSchemaFile.exists()); File upgradedOriginalSchemaFile = new File(tmpConfDir, "schema-minimal.xml.bak"); assertTrue(upgradedOriginalSchemaFile.exists()); - + // After upgrade to managed schema, downgrading to non-managed should work after putting back the non-managed schema. FileUtils.moveFile(upgradedOriginalSchemaFile, nonManagedSchemaFile); initCore("solrconfig-basic.xml", "schema-minimal.xml", tmpSolrHome.getPath()); @@ -135,9 +136,9 @@ public void testDefaultSchemaFactory() throws Exception { SolrQueryResponse response = new SolrQueryResponse(); admin.handleRequestBody(request, response); assertNull("Exception on create", response.getException()); - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); } - + private void assertSchemaResource(String collection, String expectedSchemaResource) throws Exception { final CoreContainer cores = h.getCoreContainer(); final CoreAdminHandler admin = new CoreAdminHandler(cores); @@ -153,7 +154,7 @@ private void assertSchemaResource(String collection, String expectedSchemaResour } public void testAddFieldWhenNotMutable() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); String errString = "This ManagedIndexSchema is not mutable."; ignoreException(Pattern.quote(errString)); try { @@ -177,20 +178,20 @@ public void testAddFieldWhenNotMutable() throws Exception { resetExceptionIgnores(); } } - + public void testAddFieldPersistence() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); - + assertTrue(managedSchemaFile.exists()); String managedSchemaContents = FileUtils.readFileToString(managedSchemaFile, "UTF-8"); assertFalse(managedSchemaContents.contains("\"new_field\"")); - + Map options = new HashMap<>(); options.put("stored", "false"); IndexSchema oldSchema = h.getCore().getLatestSchema(); @@ -206,12 +207,12 @@ public void testAddFieldPersistence() throws Exception { stream.close(); // Explicitly close so that Windows can delete this file assertTrue(managedSchemaContents.contains("")); } - + public void testAddedFieldIndexableAndQueryable() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); @@ -254,16 +255,16 @@ public void testAddedFieldIndexableAndQueryable() throws Exception { assertQ(req("new_field:thing1"), "//*[@numFound='1']"); } - + public void testAddFieldWhenItAlreadyExists() throws Exception{ deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); assertNotNull("Field 'str' is not present in the schema", h.getCore().getLatestSchema().getFieldOrNull("str")); - + String errString = "Field 'str' already exists."; ignoreException(Pattern.quote(errString)); try { @@ -290,8 +291,8 @@ public void testAddFieldWhenItAlreadyExists() throws Exception{ public void testAddSameFieldTwice() throws Exception{ deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); @@ -325,8 +326,8 @@ public void testAddSameFieldTwice() throws Exception{ public void testAddDynamicField() throws Exception{ deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); @@ -355,20 +356,20 @@ public void testAddDynamicField() throws Exception{ resetExceptionIgnores(); } } - + public void testAddWithSchemaCodecFactory() throws Exception { deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema_codec.xml", tmpSolrHome.getPath()); String uniqueKey = "string_f"; - assertNotNull("Unique key field '" + uniqueKey + "' is not present in the schema", + assertNotNull("Unique key field '" + uniqueKey + "' is not present in the schema", h.getCore().getLatestSchema().getFieldOrNull(uniqueKey)); String fieldName = "string_disk_new_field"; - assertNull("Field '" + fieldName + "' is present in the schema", + assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); Map options = new HashMap<>(); @@ -386,8 +387,8 @@ public void testAddWithSchemaCodecFactory() throws Exception { public void testAddWithSchemaSimilarityFactory() throws Exception { deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-bm25.xml", tmpSolrHome.getPath()); @@ -413,10 +414,10 @@ public void testAddWithSchemaSimilarityFactory() throws Exception { } public void testPersistUniqueKey() throws Exception { - assertSchemaResource(collection, "managed-schema"); + assertSchemaResource(collection, "managed-schema.xml"); deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field-unique-key.xml", tmpSolrHome.getPath()); @@ -451,8 +452,8 @@ public void testPersistUniqueKey() throws Exception { public void testAddFieldThenReload() throws Exception { deleteCore(); - File managedSchemaFile = new File(tmpConfDir, "managed-schema"); - Files.delete(managedSchemaFile.toPath()); // Delete managed-schema so it won't block parsing a new schema + File managedSchemaFile = new File(tmpConfDir, "managed-schema.xml"); + Files.delete(managedSchemaFile.toPath()); // Delete managed-schema.xml so it won't block parsing a new schema System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); diff --git a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java index fc2689f7f42..d5259670510 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java +++ b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java @@ -57,7 +57,8 @@ public class TestRangeQuery extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { - initCore("solrconfig.xml", "schema11.xml"); + // use a solrconfig that does not have autowarming + initCore("solrconfig_perf.xml", "schema11.xml"); } @Override @@ -375,8 +376,6 @@ public void testRandomRangeQueries() throws Exception { @Test public void testRangeQueryWithFilterCache() throws Exception { - TestInjection.delayBeforeCreatingNewDocSet = 500; - // sometimes a very small index, sometimes a very large index // final int numDocs = random().nextBoolean() ? random().nextInt(50) : atLeast(1000); final int numDocs = 99; @@ -384,30 +383,40 @@ public void testRangeQueryWithFilterCache() throws Exception { addInt(doc, 0, 0, "foo_i"); }); - ExecutorService queryService = ExecutorUtil.newMDCAwareFixedThreadPool(4, new SolrNamedThreadFactory("TestRangeQuery")); - try (SolrCore core = h.getCoreInc()) { - SolrRequestHandler defaultHandler = core.getRequestHandler(""); + // ensure delay comes after createIndex - so we don't affect/count any cache warming from queries left over by other test methods + TestInjection.delayBeforeCreatingNewDocSet = TEST_NIGHTLY ? 50 : 500; // Run more queries nightly, so use shorter delay + + final int MAX_QUERY_RANGE = 222; // Arbitrary number in the middle of the value range + final int QUERY_START = TEST_NIGHTLY ? 1 : MAX_QUERY_RANGE; // Either run queries for the full range, or just the last one + final int NUM_QUERIES = TEST_NIGHTLY ? 101 : 10; + for (int j = QUERY_START ; j <= MAX_QUERY_RANGE; j++) { + ExecutorService queryService = ExecutorUtil.newMDCAwareFixedThreadPool(4, new SolrNamedThreadFactory("TestRangeQuery-" + j)); + try (SolrCore core = h.getCoreInc()) { + SolrRequestHandler defaultHandler = core.getRequestHandler(""); + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.set("q", "*:*"); + params.add("fq", "id:[0 TO " + j + "]"); // These should all come from FilterCache + + // Regular: 10 threads with 4 executors would be enough for 3 waves, or approximately 1500ms of delay + // Nightly: 101 threads with 4 executors is 26 waves, approximately 1300ms delay + CountDownLatch atLeastOnceCompleted = new CountDownLatch(TEST_NIGHTLY ? 30 : 1); + for (int i = 0; i < NUM_QUERIES; i++) { + queryService.submit(() -> { + try (SolrQueryRequest req = req(params)) { + core.execute(defaultHandler, req, new SolrQueryResponse()); + } + atLeastOnceCompleted.countDown(); + }); + } - ModifiableSolrParams params = new ModifiableSolrParams(); - params.set("q", "*:*"); - params.add("fq", "id:[0 TO 222]"); // These should all come from FilterCache + queryService.shutdown(); // No more requests will be queued up + atLeastOnceCompleted.await(); // Wait for the first batch of queries to complete + assertTrue(queryService.awaitTermination(1, TimeUnit.SECONDS)); // All queries after should be very fast - // 10 threads with 4 executors would be enough for 3 waves, or approximately 1500ms of delay - CountDownLatch atLeastOnceCompleted = new CountDownLatch(1); - for (int i = 0; i < 10; i++) { - queryService.submit(() -> { - try (SolrQueryRequest req = req(params)) { - core.execute(defaultHandler, req, new SolrQueryResponse()); - } - atLeastOnceCompleted.countDown(); - }); + assertEquals("Create only one DocSet outside of cache", 1, TestInjection.countDocSetDelays.get()); } - - queryService.shutdown(); // No more requests will be queued up - atLeastOnceCompleted.await(); // Wait for the first query to complete - assertTrue(queryService.awaitTermination(1, TimeUnit.SECONDS)); // All queries after should be very fast - - assertEquals("Create only one DocSet outside of cache", 1, TestInjection.countDocSetDelays.get()); + TestInjection.countDocSetDelays.set(0); } } diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java index 772004687c8..a53c038b451 100644 --- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java +++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java @@ -16,6 +16,7 @@ */ package org.apache.solr.store.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; import java.util.HashSet; @@ -48,14 +49,15 @@ QuickPatchThreadsFilter.class, BadHdfsThreadsFilter.class // hdfs currently leaks thread(s) }) +@ThreadLeakLingering(linger = 5000) public class HdfsDirectoryTest extends SolrTestCaseJ4 { - private static final int MAX_NUMBER_OF_WRITES = 10000; + private static final int MAX_NUMBER_OF_WRITES = 1000; private static final int MIN_FILE_SIZE = 100; - private static final int MAX_FILE_SIZE = 100000; + private static final int MAX_FILE_SIZE = 10000; private static final int MIN_BUFFER_SIZE = 1; private static final int MAX_BUFFER_SIZE = 5000; - private static final int MAX_NUMBER_OF_READS = 10000; + private static final int MAX_NUMBER_OF_READS = 1000; private static MiniDFSCluster dfsCluster; private Configuration directoryConf; private Path directoryPath; @@ -63,12 +65,12 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 { private Random random; @BeforeClass - public static void beforeClass() throws Exception { + public static void beforeHdfsDirectoryTest() throws Exception { dfsCluster = HdfsTestUtil.setupClass(createTempDir().toFile().getAbsolutePath()); } @AfterClass - public static void afterClass() throws Exception { + public static void afterHdfsDirectoryTest() throws Exception { try { HdfsTestUtil.teardownClass(dfsCluster); } finally { diff --git a/solr/docker/tests/cases/test_log4j/log4j2.xml b/solr/docker/tests/cases/test_log4j/log4j2.xml index 9a629595b13..5e319cc12be 100644 --- a/solr/docker/tests/cases/test_log4j/log4j2.xml +++ b/solr/docker/tests/cases/test_log4j/log4j2.xml @@ -22,7 +22,7 @@ - %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n + %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%n @@ -33,7 +33,7 @@ filePattern="${sys:solr.log.dir}/solr.log.%i" > - %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n + %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%n @@ -61,7 +61,7 @@ filePattern="${sys:solr.log.dir}/solr_slow_requests.log.%i" > - %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%n + %d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%n diff --git a/solr/server/etc/jetty-http.xml b/solr/server/etc/jetty-http.xml index 4793c1f3194..d4fceccc976 100644 --- a/solr/server/etc/jetty-http.xml +++ b/solr/server/etc/jetty-http.xml @@ -29,6 +29,8 @@ + 512 + 16393 diff --git a/solr/server/etc/jetty-https.xml b/solr/server/etc/jetty-https.xml index ab03ba4d867..c13cf9ed865 100644 --- a/solr/server/etc/jetty-https.xml +++ b/solr/server/etc/jetty-https.xml @@ -54,6 +54,7 @@ + 512 diff --git a/solr/server/resources/log4j2.xml b/solr/server/resources/log4j2.xml index 0a545b2a68c..4ec2fa2ecfb 100644 --- a/solr/server/resources/log4j2.xml +++ b/solr/server/resources/log4j2.xml @@ -23,7 +23,7 @@ - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -34,7 +34,7 @@ filePattern="${sys:solr.log.dir}/solr.log.%i" > - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n @@ -50,7 +50,7 @@ filePattern="${sys:solr.log.dir}/solr_slow_requests.log.%i" > - %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%X{collection} %X{shard} %X{replica} %X{core}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n + %maxLen{%d{yyyy-MM-dd HH:mm:ss.SSS} %-5p (%t) [%notEmpty{c:%X{collection}}%notEmpty{ s:%X{shard}}%notEmpty{ r:%X{replica}}%notEmpty{ x:%X{core}}] %c{1.} %m%notEmpty{ =>%ex{short}}}{10240}%n diff --git a/solr/server/solr/configsets/_default/conf/managed-schema b/solr/server/solr/configsets/_default/conf/managed-schema.xml similarity index 97% rename from solr/server/solr/configsets/_default/conf/managed-schema rename to solr/server/solr/configsets/_default/conf/managed-schema.xml index 7d46d789a1f..5be18eb5d80 100644 --- a/solr/server/solr/configsets/_default/conf/managed-schema +++ b/solr/server/solr/configsets/_default/conf/managed-schema.xml @@ -40,34 +40,34 @@ @@ -130,7 +130,7 @@ for fields via the specification of patterns to match field names. EXAMPLE: name="*_i" will match any field ending in _i (like myid_i, z_i) RESTRICTION: the glob-like pattern in the name attribute must have a "*" only at the start or the end. --> - + @@ -227,7 +227,7 @@ - + - - + --> + removes stop words from case-insensitive "stopwords.txt" + (empty by default), and down cases. At query time only, it + also applies synonyms. + --> @@ -310,15 +310,15 @@ - + + --> @@ -357,7 +357,7 @@ + --> @@ -372,7 +372,7 @@ + --> @@ -451,7 +451,7 @@ @@ -487,7 +487,7 @@ - @@ -517,7 +517,7 @@ - + @@ -578,18 +578,18 @@ - + - + - + @@ -598,7 +598,7 @@ - + @@ -615,29 +615,29 @@ - + - + - + - + - + @@ -647,11 +647,11 @@ - + - + @@ -659,11 +659,11 @@ - + - + @@ -686,14 +686,14 @@ - + - + @@ -707,11 +707,11 @@ - + - + @@ -719,11 +719,11 @@ - + - + @@ -734,11 +734,11 @@ - + - + @@ -749,11 +749,11 @@ - + - + @@ -761,11 +761,11 @@ - + - + @@ -776,11 +776,11 @@ - + - + @@ -788,22 +788,22 @@ - + - + - + - + @@ -811,11 +811,11 @@ - + - + @@ -825,7 +825,7 @@ - + @@ -887,7 +887,7 @@ This dictionary was built with MeCab, it defines a format for the features adapted for the Korean language. - + Nori also has a convenient user dictionary feature that allows overriding the statistical model with your own entries for segmentation, part-of-speech tags and readings without a need to specify weights. Notice that user dictionaries have not been subject to extensive testing. @@ -900,7 +900,7 @@ --> @@ -914,18 +914,18 @@ - + - + - + @@ -933,11 +933,11 @@ - + - + @@ -946,11 +946,11 @@ - + - + @@ -960,22 +960,22 @@ - + - + - + - + @@ -983,11 +983,11 @@ - + - + @@ -995,7 +995,7 @@ - + @@ -1005,11 +1005,11 @@ - + - + @@ -1018,8 +1018,8 @@ ${tests.luceneMatchVersion:LATEST} true - managed-schema + managed-schema.xml - diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java index 2e9b5e8de30..8548677cb68 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java @@ -16,6 +16,8 @@ */ package org.apache.solr.client.solrj.request; +import static org.apache.solr.common.params.CommonParams.CHILDDOC; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -28,7 +30,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import junit.framework.Assert; import org.apache.solr.SolrTestCase; import org.apache.solr.common.IteratorWriter; @@ -39,8 +40,6 @@ import org.apache.solr.common.util.Utils; import org.junit.Test; -import static org.apache.solr.common.params.CommonParams.CHILDDOC; - /** * Test for UpdateRequestCodec * @@ -50,7 +49,8 @@ public class TestUpdateRequestCodec extends SolrTestCase { @Test - // commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 + // commented out on: 24-Dec-2018 + // @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 public void simple() throws IOException { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.deleteById("*:*"); @@ -83,19 +83,21 @@ public void simple() throws IOException { doc.addField("foobar", foobar); updateRequest.add(doc); -// updateRequest.setWaitFlush(true); + // updateRequest.setWaitFlush(true); updateRequest.deleteById("2"); updateRequest.deleteByQuery("id:3"); JavaBinUpdateRequestCodec codec = new JavaBinUpdateRequestCodec(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); codec.marshal(updateRequest, baos); final List docs = new ArrayList<>(); - JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = (document, req, commitWithin, overwrite) -> { - Assert.assertNotNull(req.getParams()); - docs.add(document); - }; + JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = + (document, req, commitWithin, overwrite) -> { + Assert.assertNotNull(req.getParams()); + docs.add(document); + }; - UpdateRequest updateUnmarshalled = codec.unmarshal(new ByteArrayInputStream(baos.toByteArray()), handler); + UpdateRequest updateUnmarshalled = + codec.unmarshal(new ByteArrayInputStream(baos.toByteArray()), handler); for (SolrInputDocument document : docs) { updateUnmarshalled.add(document); @@ -105,16 +107,17 @@ public void simple() throws IOException { SolrInputDocument outDoc = updateUnmarshalled.getDocuments().get(i); compareDocs("doc#" + i, inDoc, outDoc); } - Assert.assertEquals(updateUnmarshalled.getDeleteById().get(0), - updateRequest.getDeleteById().get(0)); - Assert.assertEquals(updateUnmarshalled.getDeleteQuery().get(0), - updateRequest.getDeleteQuery().get(0)); + Assert.assertEquals( + updateUnmarshalled.getDeleteById().get(0), updateRequest.getDeleteById().get(0)); + Assert.assertEquals( + updateUnmarshalled.getDeleteQuery().get(0), updateRequest.getDeleteQuery().get(0)); assertEquals("b", updateUnmarshalled.getParams().get("a")); } @Test - // commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 + // commented out on: 24-Dec-2018 + // @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 public void testIteratable() throws IOException { final List values = new ArrayList<>(); values.add("iterItem1"); @@ -126,7 +129,7 @@ public void testIteratable() throws IOException { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", 1); doc.addField("desc", "one"); - // imagine someone adding a custom Bean that implements Iterable + // imagine someone adding a custom Bean that implements Iterable // but is not a Collection doc.addField("iter", (Iterable) values::iterator); doc.addField("desc", "1"); @@ -136,12 +139,14 @@ public void testIteratable() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); codec.marshal(updateRequest, baos); final List docs = new ArrayList<>(); - JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = (document, req, commitWithin, overwrite) -> { - Assert.assertNotNull(req.getParams()); - docs.add(document); - }; + JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = + (document, req, commitWithin, overwrite) -> { + Assert.assertNotNull(req.getParams()); + docs.add(document); + }; - UpdateRequest updateUnmarshalled = codec.unmarshal(new ByteArrayInputStream(baos.toByteArray()), handler); + UpdateRequest updateUnmarshalled = + codec.unmarshal(new ByteArrayInputStream(baos.toByteArray()), handler); for (SolrInputDocument document : docs) { updateUnmarshalled.add(document); @@ -151,32 +156,39 @@ public void testIteratable() throws IOException { SolrInputField iter = outDoc.getField("iter"); Assert.assertNotNull("iter field is null", iter); Object iterVal = iter.getValue(); - Assert.assertTrue("iterVal is not a Collection", - iterVal instanceof Collection); - Assert.assertEquals("iterVal contents", values, iterVal); - + Assert.assertTrue("iterVal is not a Collection", iterVal instanceof Collection); + Assert.assertEquals("iterVal contents", values.toString(), iterVal.toString()); } - //this format accepts a 1:1 mapping of the json format and javabin format + // this format accepts a 1:1 mapping of the json format and javabin format public void testStreamableInputDocFormat() throws IOException { Map m = new LinkedHashMap<>(); m.put("id", "1"); m.put("desc", "The desc 1"); - m.put(CHILDDOC, (MapWriter) ew -> { - ew.put("id","1.1"); - ew.put("desc" ,"The desc 1.1"); - ew.put(CHILDDOC, (IteratorWriter) iw -> { - iw.add(Map.of("id", "1.1.1","desc","The desc 1.1.1")); - iw.add((MapWriter) ew1 -> { - ew1.put("id", "1.1.2"); - ew1.put("desc", "The desc 1.1.2"); - }); - }); - }); - MapWriter m2 = ew -> { - ew.put("id", "2"); - ew.put("des", "The desc 2"); - }; + m.put( + CHILDDOC, + (MapWriter) + ew -> { + ew.put("id", "1.1"); + ew.put("desc", "The desc 1.1"); + ew.put( + CHILDDOC, + (IteratorWriter) + iw -> { + iw.add(Map.of("id", "1.1.1", "desc", "The desc 1.1.1")); + iw.add( + (MapWriter) + ew1 -> { + ew1.put("id", "1.1.2"); + ew1.put("desc", "The desc 1.1.2"); + }); + }); + }); + MapWriter m2 = + ew -> { + ew.put("id", "2"); + ew.put("des", "The desc 2"); + }; List l = new ArrayList<>(); l.add(m); @@ -184,22 +196,25 @@ public void testStreamableInputDocFormat() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); new JavaBinCodec().marshal(l.iterator(), baos); - List l2 = new ArrayList<>(); + List l2 = new ArrayList<>(); - new JavaBinUpdateRequestCodec().unmarshal(new ByteArrayInputStream(baos.toByteArray()), (document, req, commitWithin, override) -> l2.add(document)); + new JavaBinUpdateRequestCodec() + .unmarshal( + new ByteArrayInputStream(baos.toByteArray()), + (document, req, commitWithin, override) -> l2.add(document)); - assertEquals(l2.get(0).getChildDocuments().size(), 1); - - Object o = Utils.fromJSONString(Utils.writeJson(l.get(0), new StringWriter(), true).toString()); - Object cdoc = Utils.getObjectByPath(o, false, CHILDDOC); - assertEquals(Utils.writeJson(cdoc, new StringWriter(), true).toString(), - Utils.writeJson(l2.get(0).getChildDocuments().get(0) ,new StringWriter(), true).toString()); + assertEquals(l2.get(0).getChildDocuments().size(), 1); + Object o = Utils.fromJSONString(Utils.writeJson(l.get(0), new StringWriter(), true).toString()); + Object cdoc = Utils.getObjectByPath(o, false, CHILDDOC); + assertEquals( + Utils.writeJson(cdoc, new StringWriter(), true).toString(), + Utils.writeJson(l2.get(0).getChildDocuments().get(0), new StringWriter(), true).toString()); } - @Test - // commented out on: 24-Dec-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 + // commented out on: 24-Dec-2018 + // @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Sep-2018 public void testBackCompat4_5() throws IOException { UpdateRequest updateRequest = new UpdateRequest(); @@ -236,15 +251,24 @@ public void testBackCompat4_5() throws IOException { updateRequest.deleteById("2"); updateRequest.deleteByQuery("id:3"); - InputStream is = getClass().getResourceAsStream("/solrj/updateReq_4_5.bin"); assertNotNull("updateReq_4_5.bin was not found", is); - UpdateRequest updateUnmarshalled = new JavaBinUpdateRequestCodec().unmarshal(is, (document, req, commitWithin, override) -> { - if (commitWithin == null) { - req.add(document); - } - System.err.println("Doc" + document + " ,commitWithin:" + commitWithin + " , override:" + override); - }); + UpdateRequest updateUnmarshalled = + new JavaBinUpdateRequestCodec() + .unmarshal( + is, + (document, req, commitWithin, override) -> { + if (commitWithin == null) { + req.add(document); + } + System.err.println( + "Doc" + + document + + " ,commitWithin:" + + commitWithin + + " , override:" + + override); + }); System.err.println(updateUnmarshalled.getDocumentsMap()); System.err.println(updateUnmarshalled.getDocuments()); @@ -254,38 +278,41 @@ public void testBackCompat4_5() throws IOException { SolrInputDocument outDoc = updateUnmarshalled.getDocuments().get(i); compareDocs("doc#" + i, inDoc, outDoc); } - Assert.assertEquals(updateUnmarshalled.getDeleteById().get(0), - updateRequest.getDeleteById().get(0)); - Assert.assertEquals(updateUnmarshalled.getDeleteQuery().get(0), - updateRequest.getDeleteQuery().get(0)); + Assert.assertEquals( + updateUnmarshalled.getDeleteById().get(0), updateRequest.getDeleteById().get(0)); + Assert.assertEquals( + updateUnmarshalled.getDeleteQuery().get(0), updateRequest.getDeleteQuery().get(0)); assertEquals("b", updateUnmarshalled.getParams().get("a")); is.close(); } - - private void compareDocs(String m, - SolrInputDocument expectedDoc, - SolrInputDocument actualDoc) { + private void compareDocs(String m, SolrInputDocument expectedDoc, SolrInputDocument actualDoc) { for (String s : expectedDoc.getFieldNames()) { SolrInputField expectedField = expectedDoc.getField(s); SolrInputField actualField = actualDoc.getField(s); Object expectedVal = expectedField.getValue(); Object actualVal = actualField.getValue(); - if (expectedVal instanceof Set && - actualVal instanceof Collection) { - // unmarshaled documents never contain Sets, they are just a - // List in an arbitrary order based on what the iterator of + if (expectedVal instanceof Set && actualVal instanceof Collection) { + // unmarshaled documents never contain Sets, they are just a + // List in an arbitrary order based on what the iterator of // the original Set returned, so we need a comparison that is // order agnostic. actualVal = new HashSet<>((Collection) actualVal); m += " (Set comparison)"; } - Assert.assertEquals(m + " diff values for field: " + s, - expectedVal, actualVal); + Assert.assertEquals( + m + + " " + + expectedVal.getClass().getName() + + " diff values for field: " + + s + + " " + + actualVal.getClass().getName(), + expectedVal.toString(), + actualVal.toString()); } } - } diff --git a/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java b/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java index f22c7cdc5ca..ab3a7410f23 100644 --- a/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java +++ b/solr/solrj/src/test/org/apache/solr/common/cloud/TestDocCollectionWatcher.java @@ -45,7 +45,7 @@ public class TestDocCollectionWatcher extends SolrCloudTestCase { private static final int CLUSTER_SIZE = 4; - private static final int MAX_WAIT_TIMEOUT = 120; // seconds, only use for await -- NO SLEEP!!! + private static final int MAX_WAIT_TIMEOUT = 20; // seconds, only use for await -- NO SLEEP!!! private ExecutorService executor = null; @@ -86,7 +86,7 @@ private void waitFor(String message, long timeout, TimeUnit unit, Callable= 0x01 && code <= 0x7F) - os.write(code); + if (code >= 0x01 && code <= 0x7F) os.write(code); else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) { os.write(0xC0 | (code >> 6)); os.write(0x80 | (code & 0x3F)); @@ -90,4 +88,12 @@ else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) { } } } + + private static void writeVInt(int i, FastOutputStream out) throws IOException { + while ((i & ~0x7F) != 0) { + out.writeByte((byte) ((i & 0x7f) | 0x80)); + i >>>= 7; + } + out.writeByte((byte) i); + } } diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestFastJavabinDecoder.java b/solr/solrj/src/test/org/apache/solr/common/util/TestFastJavabinDecoder.java index 3ea5f44c56a..543e8361b81 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/TestFastJavabinDecoder.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/TestFastJavabinDecoder.java @@ -25,7 +25,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; - import org.apache.commons.io.IOUtils; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.client.solrj.FastStreamingDocsCallback; @@ -41,38 +40,37 @@ public void testTagRead() throws Exception { BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS(); FastOutputStream faos = FastOutputStream.wrap(baos); - try (JavaBinCodec codec = new JavaBinCodec(faos, null)) { - codec.writeVal(10); - codec.writeVal(100); - codec.writeVal("Hello!"); + try (JavaBinCodec codec = new JavaBinCodec(faos, null, true)) { + JavaBinCodec.writeVal(codec, 10); + JavaBinCodec.writeVal(codec, 100); + JavaBinCodec.writeVal(codec, "Hello!"); } faos.flushBuffer(); faos.close(); - FastInputStream fis = new FastInputStream(null, baos.getbuf(), 0, baos.size()); try (FastJavaBinDecoder.StreamCodec scodec = new FastJavaBinDecoder.StreamCodec(fis)) { scodec.start(); Tag tag = scodec.getTag(); assertEquals(Tag._SINT, tag); - assertEquals(10, scodec.readSmallInt(scodec.dis)); + assertEquals(10, JavaBinCodec.readSmallInt(scodec)); tag = scodec.getTag(); assertEquals(Tag._SINT, tag); - assertEquals(100, scodec.readSmallInt(scodec.dis)); + assertEquals(100, JavaBinCodec.readSmallInt(scodec)); tag = scodec.getTag(); assertEquals(Tag._STR, tag); - assertEquals("Hello!", scodec.readStr(fis)); + assertEquals("Hello!", JavaBinCodec.readStr(scodec)); } } public void testSimple() throws IOException { - String sampleObj = "{k : v , " + - "mapk : {k1: v1, k2 : [v2_1 , v2_2 ]}," + - "listk : [ 1, 2, 3 ]," + - "maps : [ {id: kov1}, {id : kov2} ,{id:kov3 , longv : 234} ]," + - "}"; - + String sampleObj = + "{k : v , " + + "mapk : {k1: v1, k2 : [v2_1 , v2_2 ]}," + + "listk : [ 1, 2, 3 ]," + + "maps : [ {id: kov1}, {id : kov2} ,{id:kov3 , longv : 234} ]," + + "}"; @SuppressWarnings({"rawtypes"}) Map m = (Map) Utils.fromJSONString(sampleObj); @@ -87,37 +85,47 @@ public void testSimple() throws IOException { m2 = (Map) jbc.unmarshal(new FastInputStream(null, baos.getbuf(), 0, baos.size())); } @SuppressWarnings({"rawtypes"}) - LinkedHashMap fastMap = (LinkedHashMap) new FastJavaBinDecoder() - .withInputStream(new FastInputStream(null, baos.getbuf(), 0, baos.size())) - .decode(FastJavaBinDecoder.getEntryListener()); - assertEquals(Utils.writeJson(m2, new StringWriter(), true).toString(), + LinkedHashMap fastMap = + (LinkedHashMap) + new FastJavaBinDecoder() + .withInputStream(new FastInputStream(null, baos.getbuf(), 0, baos.size())) + .decode(FastJavaBinDecoder.getEntryListener()); + assertEquals( + Utils.writeJson(m2, new StringWriter(), true).toString(), Utils.writeJson(fastMap, new StringWriter(), true).toString()); @SuppressWarnings({"unchecked", "rawtypes"}) - Object newMap = new FastJavaBinDecoder() - .withInputStream(new FastInputStream(null, baos.getbuf(), 0, baos.size())) - .decode(e -> { - e.listenContainer(new LinkedHashMap<>(), e_ -> { - Map rootMap = (Map) e_.ctx(); - if (e_.type() == DataEntry.Type.ENTRY_ITER) { - e_.listenContainer(rootMap.computeIfAbsent(e_.name(), o -> new ArrayList<>()), - FastJavaBinDecoder.getEntryListener()); - } else if (e_.type() == DataEntry.Type.KEYVAL_ITER) { - e_.listenContainer(rootMap.computeIfAbsent(e_.name(), o -> new LinkedHashMap<>()), e1 -> { - Map m1 = (Map) e1.ctx(); - if ("k1".equals(e1.name())) { - m1.put(e1.name(), e1.val().toString()); - } - //eat up k2 - }); - } else if (e_.type() == DataEntry.Type.STR) { - rootMap.put(e_.name(), e_.val().toString()); - } - - }); - }); + Object newMap = + new FastJavaBinDecoder() + .withInputStream(new FastInputStream(null, baos.getbuf(), 0, baos.size())) + .decode( + e -> { + e.listenContainer( + new LinkedHashMap<>(), + e_ -> { + Map rootMap = (Map) e_.ctx(); + if (e_.type() == DataEntry.Type.ENTRY_ITER) { + e_.listenContainer( + rootMap.computeIfAbsent(e_.name(), o -> new ArrayList<>()), + FastJavaBinDecoder.getEntryListener()); + } else if (e_.type() == DataEntry.Type.KEYVAL_ITER) { + e_.listenContainer( + rootMap.computeIfAbsent(e_.name(), o -> new LinkedHashMap<>()), + e1 -> { + Map m1 = (Map) e1.ctx(); + if ("k1".equals(e1.name())) { + m1.put(e1.name(), e1.val().toString()); + } + // eat up k2 + }); + } else if (e_.type() == DataEntry.Type.STR) { + rootMap.put(e_.name(), e_.val().toString()); + } + }); + }); ((Map) m2.get("mapk")).remove("k2"); - assertEquals(Utils.writeJson(m2, new StringWriter(), true).toString(), + assertEquals( + Utils.writeJson(m2, new StringWriter(), true).toString(), Utils.writeJson(newMap, new StringWriter(), true).toString()); } @@ -134,56 +142,58 @@ public void testFastJavabinStreamingDecoder() throws IOException { list = (SolrDocumentList) o.get("response"); } - System.out.println(" " + list.getNumFound() + " , " + list.getStart() + " , " + list.getMaxScore()); + System.out.println( + " " + list.getNumFound() + " , " + list.getStart() + " , " + list.getMaxScore()); class Pojo { long _idx; CharSequence id; boolean inStock; float price; + @SuppressWarnings({"rawtypes"}) List children; } - StreamingBinaryResponseParser parser = new StreamingBinaryResponseParser(new FastStreamingDocsCallback() { - - @Override - public Object initDocList(Long numFound, Long start, Float maxScore) { - assertEquals((Long) list.getNumFound(), numFound); - assertEquals((Long) list.getStart(), start); - assertEquals(list.getMaxScore(), maxScore); - return new int[1]; - } - - @Override - public Object startDoc(Object docListObj) { - Pojo pojo = new Pojo(); - pojo._idx = ((int[]) docListObj)[0]++; - return pojo; - } - - @Override - public void field(DataEntry field, Object docObj) { - Pojo pojo = (Pojo) docObj; - if ("id".equals(field.name())) { - pojo.id = ((Utf8CharSequence) field.val()).clone(); - } else if (field.type() == DataEntry.Type.BOOL && "inStock".equals(field.name())) { - pojo.inStock = field.boolVal(); - } else if (field.type() == DataEntry.Type.FLOAT && "price".equals(field.name())) { - pojo.price = field.floatVal(); - } - - } - - @Override - public void endDoc(Object docObj) { - Pojo pojo = (Pojo) docObj; - SolrDocument doc = list.get((int) pojo._idx); - assertEquals(doc.get("id"), pojo.id.toString()); - if (doc.get("inStock") != null) - assertEquals(doc.get("inStock"), pojo.inStock); - if (doc.get("price") != null) - assertEquals((Float) doc.get("price"), pojo.price, 0.001); - } - }); + StreamingBinaryResponseParser parser = + new StreamingBinaryResponseParser( + new FastStreamingDocsCallback() { + + @Override + public Object initDocList(Long numFound, Long start, Float maxScore) { + assertEquals((Long) list.getNumFound(), numFound); + assertEquals((Long) list.getStart(), start); + assertEquals(list.getMaxScore(), maxScore); + return new int[1]; + } + + @Override + public Object startDoc(Object docListObj) { + Pojo pojo = new Pojo(); + pojo._idx = ((int[]) docListObj)[0]++; + return pojo; + } + + @Override + public void field(DataEntry field, Object docObj) { + Pojo pojo = (Pojo) docObj; + if ("id".equals(field.name())) { + pojo.id = ((Utf8CharSequence) field.val()).clone(); + } else if (field.type() == DataEntry.Type.BOOL && "inStock".equals(field.name())) { + pojo.inStock = field.boolVal(); + } else if (field.type() == DataEntry.Type.FLOAT && "price".equals(field.name())) { + pojo.price = field.floatVal(); + } + } + + @Override + public void endDoc(Object docObj) { + Pojo pojo = (Pojo) docObj; + SolrDocument doc = list.get((int) pojo._idx); + assertEquals(doc.get("id"), pojo.id.toString()); + if (doc.get("inStock") != null) assertEquals(doc.get("inStock"), pojo.inStock); + if (doc.get("price") != null) + assertEquals((Float) doc.get("price"), pojo.price, 0.001); + } + }); parser.processResponse(new FastInputStream(null, baos.getbuf(), 0, baos.size()), null); } @@ -203,7 +213,7 @@ public void testParsingWithChildDocs() throws IOException { SimpleOrderedMap orderedMap = new SimpleOrderedMap<>(); orderedMap.add("response", sdocs); - BinaryRequestWriter.BAOS baos = new BinaryRequestWriter.BAOS(); + BytesOutputStream baos = new BytesOutputStream(); try (JavaBinCodec jbc = new JavaBinCodec()) { jbc.marshal(orderedMap, baos); } @@ -224,7 +234,7 @@ public void compare(SolrDocument d) { assertEquals(d.getChildDocumentCount(), children.size()); @SuppressWarnings({"unchecked"}) List l = (List) d.getFieldValue("longs"); - if(l != null){ + if (l != null) { assertNotNull(longs); for (int i = 0; i < l.size(); i++) { Long v = l.get(i); @@ -236,62 +246,60 @@ public void compare(SolrDocument d) { for (int i = 0; i < childDocuments.size(); i++) { children.get(i).compare(childDocuments.get(i)); } - } - } List l = new ArrayList<>(); - StreamingBinaryResponseParser binaryResponseParser = new StreamingBinaryResponseParser(new FastStreamingDocsCallback() { - - @Override - public Object initDocList(Long numFound, Long start, Float maxScore) { - return l; - } - - @Override - @SuppressWarnings({"unchecked"}) - public Object startDoc(Object docListObj) { - Pojo pojo = new Pojo(); - ((List) docListObj).add(pojo); - return pojo; - } - - @Override - public void field(DataEntry field, Object docObj) { - Pojo pojo = (Pojo) docObj; - if (field.name().equals("id")) { - pojo.id = field.strValue(); - } else if (field.name().equals("subject")) { - pojo.subject = field.strValue(); - } else if (field.name().equals("cat")) { - pojo.cat = field.strValue(); - } else if (field.type() == DataEntry.Type.ENTRY_ITER && "longs".equals(field.name())) { - if(useListener[0]){ - field.listenContainer(pojo.longs = new long[field.length()], READLONGS); - } else { - @SuppressWarnings({"unchecked"}) - List longList = (List) field.val(); - pojo.longs = new long[longList.size()]; - for (int i = 0; i < longList.size(); i++) { - pojo.longs[i] = longList.get(i); - - } - - } - } - - } - - - @Override - public Object startChildDoc(Object parentDocObj) { - Pojo parent = (Pojo) parentDocObj; - Pojo child = new Pojo(); - parent.children.add(child); - return child; - } - }); - binaryResponseParser.processResponse(new FastInputStream(null, baos.getbuf(), 0, baos.size()), null); + StreamingBinaryResponseParser binaryResponseParser = + new StreamingBinaryResponseParser( + new FastStreamingDocsCallback() { + + @Override + public Object initDocList(Long numFound, Long start, Float maxScore) { + return l; + } + + @Override + @SuppressWarnings({"unchecked"}) + public Object startDoc(Object docListObj) { + Pojo pojo = new Pojo(); + ((List) docListObj).add(pojo); + return pojo; + } + + @Override + public void field(DataEntry field, Object docObj) { + Pojo pojo = (Pojo) docObj; + if (field.name().equals("id")) { + pojo.id = field.strValue(); + } else if (field.name().equals("subject")) { + pojo.subject = field.strValue(); + } else if (field.name().equals("cat")) { + pojo.cat = field.strValue(); + } else if (field.type() == DataEntry.Type.ENTRY_ITER + && "longs".equals(field.name())) { + if (useListener[0]) { + field.listenContainer(pojo.longs = new long[field.length()], READLONGS); + } else { + @SuppressWarnings({"unchecked"}) + List longList = (List) field.val(); + pojo.longs = new long[longList.size()]; + for (int i = 0; i < longList.size(); i++) { + pojo.longs[i] = longList.get(i); + } + } + } + } + + @Override + public Object startChildDoc(Object parentDocObj) { + Pojo parent = (Pojo) parentDocObj; + Pojo child = new Pojo(); + parent.children.add(child); + return child; + } + }); + binaryResponseParser.processResponse( + new FastInputStream(null, baos.toBytes(), 0, baos.size()), null); for (int i = 0; i < sdocs.size(); i++) { l.get(i).compare(sdocs.get(i)); } @@ -299,18 +307,17 @@ public Object startChildDoc(Object parentDocObj) { l.clear(); useListener[0] = false; - binaryResponseParser.processResponse(new FastInputStream(null, baos.getbuf(), 0, baos.size()), null); + binaryResponseParser.processResponse( + new FastInputStream(null, baos.toBytes(), 0, baos.size()), null); for (int i = 0; i < sdocs.size(); i++) { l.get(i).compare(sdocs.get(i)); } - - } - static final DataEntry.EntryListener READLONGS = e -> { - if (e.type() != DataEntry.Type.LONG) return; - long[] array = (long[]) e.ctx(); - array[(int) e.index()] = e.longVal(); - - }; + static final DataEntry.EntryListener READLONGS = + e -> { + if (e.type() != DataEntry.Type.LONG) return; + long[] array = (long[]) e.ctx(); + array[(int) e.index()] = e.longVal(); + }; } diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java index 9f789215ba2..ee1477229dc 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java @@ -16,19 +16,6 @@ */ package org.apache.solr.common.util; -import org.apache.commons.io.IOUtils; -import org.apache.lucene.util.TestUtil; -import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.common.EnumFieldValue; -import org.apache.solr.common.SolrDocument; -import org.apache.solr.common.SolrDocumentList; -import org.apache.solr.common.SolrInputDocument; -import org.apache.solr.common.SolrInputField; -import org.apache.solr.util.ConcurrentLRUCache; -import org.apache.solr.util.RTimer; -import org.junit.Test; -import org.noggit.CharArr; - import java.io.BufferedOutputStream; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -43,14 +30,29 @@ import java.util.List; import java.util.Map; import java.util.Random; +import org.apache.commons.io.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.common.EnumFieldValue; +import org.apache.solr.common.SolrDocument; +import org.apache.solr.common.SolrDocumentList; +import org.apache.solr.common.SolrInputDocument; +import org.apache.solr.common.SolrInputField; +import org.apache.solr.util.ConcurrentLRUCache; +import org.apache.solr.util.RTimer; +import org.junit.Test; +import org.noggit.CharArr; public class TestJavaBinCodec extends SolrTestCaseJ4 { private static final String SOLRJ_JAVABIN_BACKCOMPAT_BIN = "/solrj/javabin_backcompat.bin"; - private static final String BIN_FILE_LOCATION = "./solr/solrj/src/test-files/solrj/javabin_backcompat.bin"; + private static final String BIN_FILE_LOCATION = + "./solr/solrj/src/test-files/solrj/javabin_backcompat.bin"; - private static final String SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS = "/solrj/javabin_backcompat_child_docs.bin"; - private static final String BIN_FILE_LOCATION_CHILD_DOCS = "./solr/solrj/src/test-files/solrj/javabin_backcompat_child_docs.bin"; + private static final String SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS = + "/solrj/javabin_backcompat_child_docs.bin"; + private static final String BIN_FILE_LOCATION_CHILD_DOCS = + "./solr/solrj/src/test-files/solrj/javabin_backcompat_child_docs.bin"; private static final String SOLRJ_DOCS_1 = "/solrj/docs1.xml"; private static final String SOLRJ_DOCS_2 = "/solrj/sampleClusteringResponse.xml"; @@ -58,9 +60,11 @@ public class TestJavaBinCodec extends SolrTestCaseJ4 { public void testStrings() throws Exception { for (int i = 0; i < 10000 * RANDOM_MULTIPLIER; i++) { String s = TestUtil.randomUnicodeString(random()); - try (JavaBinCodec jbcO = new JavaBinCodec(); ByteArrayOutputStream os = new ByteArrayOutputStream()) { + try (JavaBinCodec jbcO = new JavaBinCodec(); + ByteArrayOutputStream os = new ByteArrayOutputStream()) { jbcO.marshal(s, os); - try (JavaBinCodec jbcI = new JavaBinCodec(); ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray())) { + try (JavaBinCodec jbcI = new JavaBinCodec(); + ByteArrayInputStream is = new ByteArrayInputStream(os.toByteArray())) { Object o = jbcI.unmarshal(is); assertEquals(s, o); } @@ -72,14 +76,10 @@ public void testReadAsCharSeq() throws Exception { List types = new ArrayList<>(); SolrInputDocument idoc = new SolrInputDocument(); idoc.addField("foo", "bar"); - idoc.addField("foos", Arrays.asList("bar1","bar2")); + idoc.addField("foos", Arrays.asList("bar1", "bar2")); idoc.addField("enumf", new EnumFieldValue(1, "foo")); types.add(idoc); - compareObjects( - (List) getObject(getBytes(types, true)), - (List) types - ); - + compareObjects((List) getObject(getBytes(types, true)), (List) types); } public static SolrDocument generateSolrDocumentWithChildDocs() { @@ -108,7 +108,7 @@ public static SolrDocument generateSolrDocumentWithChildDocs() { private List generateAllDataTypes() { List types = new ArrayList<>(); - types.add(null); //NULL + types.add(null); // NULL types.add(true); types.add(false); types.add((byte) 1); @@ -142,14 +142,14 @@ private List generateAllDataTypes() { solrDocs.add(0, doc); types.add(solrDocs); - types.add(new byte[] {1,2,3,4,5}); + types.add(new byte[] {1, 2, 3, 4, 5}); // TODO? // List list = new ArrayList(); // list.add("one"); // types.add(list.iterator()); - types.add((byte) 15); //END + types.add((byte) 15); // END SolrInputDocument idoc = new SolrInputDocument(); idoc.addField("foo", "bar"); @@ -164,9 +164,9 @@ private List generateAllDataTypes() { types.add(new EnumFieldValue(1, "foo")); - types.add(map.entrySet().iterator().next()); //Map.Entry + types.add(map.entrySet().iterator().next()); // Map.Entry - types.add((byte) (1 << 5)); //TAG_AND_LEN + types.add((byte) (1 << 5)); // TAG_AND_LEN types.add("foo"); types.add(1); @@ -185,13 +185,14 @@ private List generateAllDataTypes() { @Test public void testBackCompat() throws IOException { - try (InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN); JavaBinCodec javabin = new JavaBinCodec(){ - @Override - public List readIterator(DataInputInputStream fis) throws IOException { - return super.readIterator(fis); - } - };) - { + try (InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN); + JavaBinCodec javabin = + new JavaBinCodec() { + @Override + public List readIterator(JavaBinCodec javaBinCodec) throws IOException { + return super.readIterator(javaBinCodec); + } + }; ) { @SuppressWarnings({"unchecked"}) List unmarshaledObj = (List) javabin.unmarshal(is); List matchObj = generateAllDataTypes(); @@ -199,7 +200,6 @@ public List readIterator(DataInputInputStream fis) throws IOException { } catch (IOException e) { throw e; } - } private void compareObjects(List unmarshaledObj, List matchObj) { @@ -210,30 +210,33 @@ private void compareObjects(List unmarshaledObj, List matchObj) { byte[] b1 = (byte[]) unmarshaledObj.get(i); byte[] b2 = (byte[]) matchObj.get(i); assertTrue(Arrays.equals(b1, b2)); - } else if (unmarshaledObj.get(i) instanceof SolrDocument && matchObj.get(i) instanceof SolrDocument) { + } else if (unmarshaledObj.get(i) instanceof SolrDocument + && matchObj.get(i) instanceof SolrDocument) { assertTrue(compareSolrDocument(unmarshaledObj.get(i), matchObj.get(i))); - } else if (unmarshaledObj.get(i) instanceof SolrDocumentList && matchObj.get(i) instanceof SolrDocumentList) { + } else if (unmarshaledObj.get(i) instanceof SolrDocumentList + && matchObj.get(i) instanceof SolrDocumentList) { assertTrue(compareSolrDocumentList(unmarshaledObj.get(i), matchObj.get(i))); - } else if (unmarshaledObj.get(i) instanceof SolrInputDocument && matchObj.get(i) instanceof SolrInputDocument) { + } else if (unmarshaledObj.get(i) instanceof SolrInputDocument + && matchObj.get(i) instanceof SolrInputDocument) { assertTrue(compareSolrInputDocument(unmarshaledObj.get(i), matchObj.get(i))); - } else if (unmarshaledObj.get(i) instanceof SolrInputField && matchObj.get(i) instanceof SolrInputField) { + } else if (unmarshaledObj.get(i) instanceof SolrInputField + && matchObj.get(i) instanceof SolrInputField) { assertTrue(assertSolrInputFieldEquals(unmarshaledObj.get(i), matchObj.get(i))); } else { assertEquals(unmarshaledObj.get(i), matchObj.get(i)); } - } } @Test public void testBackCompatForSolrDocumentWithChildDocs() throws IOException { - try (JavaBinCodec javabin = new JavaBinCodec(){ - @Override - public List readIterator(DataInputInputStream fis) throws IOException { - return super.readIterator(fis); - } - };) - { + try (JavaBinCodec javabin = + new JavaBinCodec() { + @Override + public List readIterator(JavaBinCodec javaBinCodec) throws IOException { + return super.readIterator(javaBinCodec); + } + }; ) { InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS); SolrDocument sdoc = (SolrDocument) javabin.unmarshal(is); SolrDocument matchSolrDoc = generateSolrDocumentWithChildDocs(); @@ -245,7 +248,8 @@ public List readIterator(DataInputInputStream fis) throws IOException { @Test public void testForwardCompat() throws IOException { - try (JavaBinCodec javabin = new JavaBinCodec(); ByteArrayOutputStream os = new ByteArrayOutputStream()) { + try (JavaBinCodec javabin = new JavaBinCodec(); + ByteArrayOutputStream os = new ByteArrayOutputStream()) { Object data = generateAllDataTypes(); try { @@ -255,7 +259,9 @@ public void testForwardCompat() throws IOException { InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN); byte[] currentFormatBytes = IOUtils.toByteArray(is); - for (int i = 1; i < currentFormatBytes.length; i++) {//ignore the first byte. It is version information + for (int i = 1; + i < currentFormatBytes.length; + i++) { // ignore the first byte. It is version information assertEquals(newFormatBytes[i], currentFormatBytes[i]); } @@ -268,14 +274,17 @@ public void testForwardCompat() throws IOException { @Test public void testForwardCompatForSolrDocumentWithChildDocs() throws IOException { SolrDocument sdoc = generateSolrDocumentWithChildDocs(); - try (JavaBinCodec javabin = new JavaBinCodec(); ByteArrayOutputStream os = new ByteArrayOutputStream()) { + try (JavaBinCodec javabin = new JavaBinCodec(); + ByteArrayOutputStream os = new ByteArrayOutputStream()) { javabin.marshal(sdoc, os); byte[] newFormatBytes = os.toByteArray(); InputStream is = getClass().getResourceAsStream(SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS); byte[] currentFormatBytes = IOUtils.toByteArray(is); - for (int i = 1; i < currentFormatBytes.length; i++) {//ignore the first byte. It is version information + for (int i = 1; + i < currentFormatBytes.length; + i++) { // ignore the first byte. It is version information assertEquals(newFormatBytes[i], currentFormatBytes[i]); } } catch (IOException e) { @@ -286,10 +295,7 @@ public void testForwardCompatForSolrDocumentWithChildDocs() throws IOException { @Test public void testAllTypes() throws IOException { List obj = generateAllDataTypes(); - compareObjects( - (List) getObject(getBytes(obj)), - (List) obj - ); + compareObjects((List) getObject(getBytes(obj)), (List) obj); } @Test @@ -301,40 +307,56 @@ public void testReadMapEntryTextStreamSource() throws IOException { Map.Entry entryFromTextDoc2_clone = getMapFromJavaBinCodec(SOLRJ_DOCS_2); // exactly same document read twice should have same content - assertEquals ("text-doc1 exactly same document read twice should have same content",entryFromTextDoc1,entryFromTextDoc1_clone); + assertEquals( + "text-doc1 exactly same document read twice should have same content", + entryFromTextDoc1, + entryFromTextDoc1_clone); // doc1 and doc2 are 2 text files with different content on line 1 - assertNotEquals ("2 text streams with 2 different contents should be unequal",entryFromTextDoc2,entryFromTextDoc1); + assertNotEquals( + "2 text streams with 2 different contents should be unequal", + entryFromTextDoc2, + entryFromTextDoc1); // exactly same document read twice should have same content - assertEquals ("text-doc2 exactly same document read twice should have same content",entryFromTextDoc2,entryFromTextDoc2_clone); + assertEquals( + "text-doc2 exactly same document read twice should have same content", + entryFromTextDoc2, + entryFromTextDoc2_clone); } @Test - public void testReadMapEntryBinaryStreamSource() throws IOException { + public void testReadMapEntryBinaryStreamSource() throws IOException { // now lets look at binary files - Map.Entry entryFromBinFileA = getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN); - Map.Entry entryFromBinFileA_clone = getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN); + Map.Entry entryFromBinFileA = + getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN); + Map.Entry entryFromBinFileA_clone = + getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN); - assertEquals("same map entry references should be equal",entryFromBinFileA,entryFromBinFileA); + assertEquals("same map entry references should be equal", entryFromBinFileA, entryFromBinFileA); // Commenting-out this test as it may have inadvertent effect on someone changing this in future // but keeping this in code to make a point, that even the same exact bin file, - // there could be sub-objects in the key or value of the maps, with types that do not implement equals - // and in these cases equals would fail as these sub-objects would be equated on their memory-references which is highly probbale to be unique + // there could be sub-objects in the key or value of the maps, with types that do not implement + // equals + // and in these cases equals would fail as these sub-objects would be equated on their + // memory-references which is highly probbale to be unique // and hence the top-level map's equals will also fail - // assertNotEquals("2 different references even though from same source are un-equal",entryFromBinFileA,entryFromBinFileA_clone); - + // assertNotEquals("2 different references even though from same source are + // un-equal",entryFromBinFileA,entryFromBinFileA_clone); // read in a different binary file and this should definitely not be equal to the other bi file - Map.Entry entryFromBinFileB = getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS); - assertNotEquals("2 different references from 2 different source bin streams should still be unequal",entryFromBinFileA,entryFromBinFileB); + Map.Entry entryFromBinFileB = + getMapFromJavaBinCodec(SOLRJ_JAVABIN_BACKCOMPAT_BIN_CHILD_DOCS); + assertNotEquals( + "2 different references from 2 different source bin streams should still be unequal", + entryFromBinFileA, + entryFromBinFileB); } private Map.Entry getMapFromJavaBinCodec(String fileName) throws IOException { try (InputStream is = getClass().getResourceAsStream(fileName)) { - try (DataInputInputStream dis = new FastInputStream(is)) { - try (JavaBinCodec javabin = new JavaBinCodec()) { - return javabin.readMapEntry(dis); - } + try (JavaBinCodec javabin = new JavaBinCodec()) { + javabin.init(is); + return JavaBinCodec.readMapEntry(javabin); } } } @@ -342,15 +364,18 @@ private Map.Entry getMapFromJavaBinCodec(String fileName) throws private static Object serializeAndDeserialize(Object o) throws IOException { return getObject(getBytes(o)); } + private static byte[] getBytes(Object o) throws IOException { - try (JavaBinCodec javabin = new JavaBinCodec(); ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + try (JavaBinCodec javabin = new JavaBinCodec(); + ByteArrayOutputStream baos = new ByteArrayOutputStream()) { javabin.marshal(o, baos); return baos.toByteArray(); } } private static byte[] getBytes(Object o, boolean readAsCharSeq) throws IOException { - try (JavaBinCodec javabin = new JavaBinCodec(); ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + try (JavaBinCodec javabin = new JavaBinCodec(); + ByteArrayOutputStream baos = new ByteArrayOutputStream()) { javabin.readStringAsCharSeq = readAsCharSeq; javabin.marshal(o, baos); return baos.toByteArray(); @@ -363,10 +388,10 @@ private static Object getObject(byte[] bytes) throws IOException { } } - @Test public void testResponseChildDocuments() throws IOException { - SolrDocument result = (SolrDocument) serializeAndDeserialize(generateSolrDocumentWithChildDocs()); + SolrDocument result = + (SolrDocument) serializeAndDeserialize(generateSolrDocumentWithChildDocs()); assertEquals(2, result.size()); assertEquals("1", result.getFieldValue("id")); assertEquals("parentDocument", result.getFieldValue("subject")); @@ -394,13 +419,14 @@ public void testResponseChildDocuments() throws IOException { assertFalse(grandChildDocuments.get(0).hasChildDocuments()); assertNull(grandChildDocuments.get(0).getChildDocuments()); } + @Test public void testStringCaching() throws Exception { Map m = Map.of("key1", "val1", "key2", "val2"); - byte[] b1 = getBytes(m);//copy 1 - byte[] b2 = getBytes(m);//copy 2 - Map m1 = (Map) getObject(b1); - Map m2 = (Map) getObject(b2); + byte[] b1 = getBytes(m); // copy 1 + byte[] b2 = getBytes(m); // copy 2 + Map m1 = (Map) getObject(b1); + Map m2 = (Map) getObject(b2); List l1 = new ArrayList<>(m1.keySet()); List l2 = new ArrayList<>(m2.keySet()); @@ -409,14 +435,14 @@ public void testStringCaching() throws Exception { assertNotSame(l1.get(0), l2.get(0)); assertNotSame(l1.get(1), l2.get(1)); - JavaBinCodec.StringCache stringCache = new JavaBinCodec.StringCache(new MapBackedCache<>(new HashMap<>())); - + JavaBinCodec.StringCache stringCache = + new JavaBinCodec.StringCache(new MapBackedCache<>(new HashMap<>())); try (JavaBinCodec c1 = new JavaBinCodec(null, stringCache); - JavaBinCodec c2 = new JavaBinCodec(null, stringCache)) { + JavaBinCodec c2 = new JavaBinCodec(null, stringCache)) { - m1 = (Map) c1.unmarshal(new ByteArrayInputStream(b1)); - m2 = (Map) c2.unmarshal(new ByteArrayInputStream(b2)); + m1 = (Map) c1.unmarshal(new ByteArrayInputStream(b1)); + m2 = (Map) c2.unmarshal(new ByteArrayInputStream(b2)); l1 = new ArrayList<>(m1.keySet()); l2 = new ArrayList<>(m2.keySet()); @@ -424,8 +450,6 @@ public void testStringCaching() throws Exception { assertEquals(l1, l2); assertSame(l1.get(0), l2.get(0)); assertSame(l1.get(1), l2.get(1)); - - } public void genBinaryFiles() throws IOException { @@ -437,19 +461,18 @@ public void genBinaryFiles() throws IOException { bos.write(out); bos.close(); - //Binary file with child documents + // Binary file with child documents SolrDocument sdoc = generateSolrDocumentWithChildDocs(); fs = new FileOutputStream(new File(BIN_FILE_LOCATION_CHILD_DOCS)); bos = new BufferedOutputStream(fs); bos.write(getBytes(sdoc)); bos.close(); - } private void testPerf() throws InterruptedException { final ArrayList l = new ArrayList<>(); Cache cache = null; - /* cache = new ConcurrentLRUCache(10000, 9000, 10000, 1000, false, true, null){ + /* cache = new ConcurrentLRUCache(10000, 9000, 10000, 1000, false, true, null){ @Override public String put(JavaBinCodec.StringBytes key, String val) { l.add(key); @@ -459,14 +482,14 @@ public String put(JavaBinCodec.StringBytes key, String val) { Runtime.getRuntime().gc(); printMem("before cache init"); - Cache cache1 = new MapBackedCache<>(new HashMap<>()) ; + Cache cache1 = new MapBackedCache<>(new HashMap<>()); final JavaBinCodec.StringCache STRING_CACHE = new JavaBinCodec.StringCache(cache1); -// STRING_CACHE = new JavaBinCodec.StringCache(cache); + // STRING_CACHE = new JavaBinCodec.StringCache(cache); byte[] bytes = new byte[0]; - StringBytes stringBytes = new StringBytes(null,0,0); + StringBytes stringBytes = new StringBytes(null, 0, 0); - for(int i=0;i<10000;i++) { + for (int i = 0; i < 10000; i++) { String s = String.valueOf(random().nextLong()); int end = s.length(); int maxSize = end * 4; @@ -480,85 +503,78 @@ public String put(JavaBinCodec.StringBytes key, String val) { final int ITERS = 1000000; int THREADS = 10; - runInThreads(THREADS, () -> { - StringBytes stringBytes1 = new StringBytes(new byte[0], 0, 0); - for (int i = 0; i < ITERS; i++) { - StringBytes b = l.get(i % l.size()); - stringBytes1.reset(b.bytes, 0, b.bytes.length); - if (STRING_CACHE.get(stringBytes1) == null) throw new RuntimeException("error"); - } - - }); - - + runInThreads( + THREADS, + () -> { + StringBytes stringBytes1 = new StringBytes(new byte[0], 0, 0); + for (int i = 0; i < ITERS; i++) { + StringBytes b = l.get(i % l.size()); + stringBytes1.reset(b.bytes, 0, b.bytes.length); + if (STRING_CACHE.get(stringBytes1) == null) throw new RuntimeException("error"); + } + }); printMem("after cache test"); System.out.println("time taken by LRUCACHE " + timer.getTime()); timer = new RTimer(); - runInThreads(THREADS, () -> { - String a = null; - CharArr arr = new CharArr(); - for (int i = 0; i < ITERS; i++) { - StringBytes sb = l.get(i % l.size()); - arr.reset(); - ByteUtils.UTF8toUTF16(sb.bytes, 0, sb.bytes.length, arr); - a = arr.toString(); - } - }); + runInThreads( + THREADS, + () -> { + String a = null; + CharArr arr = new CharArr(); + for (int i = 0; i < ITERS; i++) { + StringBytes sb = l.get(i % l.size()); + arr.reset(); + ByteUtils.UTF8toUTF16(sb.bytes, 0, sb.bytes.length, arr); + a = arr.toString(); + } + }); printMem("after new string test"); - System.out.println("time taken by string creation "+ timer.getTime()); - - - + System.out.println("time taken by string creation " + timer.getTime()); } - private static void runInThreads(int count, Runnable runnable) throws InterruptedException { - ArrayList t =new ArrayList<>(); - for(int i=0;i t = new ArrayList<>(); + for (int i = 0; i < count; i++) t.add(new Thread(runnable)); for (Thread thread : t) thread.start(); for (Thread thread : t) thread.join(); } static void printMem(String head) { System.out.println("*************" + head + "***********"); - int mb = 1024*1024; - //Getting the runtime reference from system + int mb = 1024 * 1024; + // Getting the runtime reference from system Runtime runtime = Runtime.getRuntime(); - //Print used memory - System.out.println("Used Memory:" - + (runtime.totalMemory() - runtime.freeMemory()) / mb); - - //Print free memory - System.out.println("Free Memory:" - + runtime.freeMemory() / mb); - + // Print used memory + System.out.println("Used Memory:" + (runtime.totalMemory() - runtime.freeMemory()) / mb); + // Print free memory + System.out.println("Free Memory:" + runtime.freeMemory() / mb); } public static void main(String[] args) throws IOException { - TestJavaBinCodec test = new TestJavaBinCodec(); - test.genBinaryFiles(); -// try { -// doDecodePerf(args); -// } catch (Exception e) { -// throw new RuntimeException(e); -// } + TestJavaBinCodec test = new TestJavaBinCodec(); + test.genBinaryFiles(); + // try { + // doDecodePerf(args); + // } catch (Exception e) { + // throw new RuntimeException(e); + // } } // common-case ascii static String str(Random r, int sz) { StringBuffer sb = new StringBuffer(sz); - for (int i=0; i underlyingCache = cacheSz > 0 ? new ConcurrentLRUCache<>(cacheSz,cacheSz-cacheSz/10,cacheSz,cacheSz/10,false,true,null) : null; // the cache in the first version of the patch was 10000,9000,10000,1000,false,true,null - final JavaBinCodec.StringCache stringCache = underlyingCache==null ? null : new JavaBinCodec.StringCache(underlyingCache); + ConcurrentLRUCache underlyingCache = + cacheSz > 0 + ? new ConcurrentLRUCache<>( + cacheSz, cacheSz - cacheSz / 10, cacheSz, cacheSz / 10, false, true, null) + : null; // the cache in the first version of the patch was + // 10000,9000,10000,1000,false,true,null + final JavaBinCodec.StringCache stringCache = + underlyingCache == null ? null : new JavaBinCodec.StringCache(underlyingCache); if (nThreads <= 0) { ret += doDecode(buffers, iter, stringCache); } else { - runInThreads(nThreads, () -> { - try { - doDecode(buffers, iter, stringCache); - } catch (IOException e) { - e.printStackTrace(); - } - }); + runInThreads( + nThreads, + () -> { + try { + doDecode(buffers, iter, stringCache); + } catch (IOException e) { + e.printStackTrace(); + } + }); } - long n = iter * Math.max(1,nThreads); - System.out.println("ret=" + ret + " THROUGHPUT=" + (n*1000 / timer.getTime())); - if (underlyingCache != null) System.out.println("cache: hits=" + underlyingCache.getStats().getCumulativeHits() + " lookups=" + underlyingCache.getStats().getCumulativeLookups() + " size=" + underlyingCache.getStats().getCurrentSize()); + long n = iter * Math.max(1, nThreads); + System.out.println("ret=" + ret + " THROUGHPUT=" + (n * 1000 / timer.getTime())); + if (underlyingCache != null) + System.out.println( + "cache: hits=" + + underlyingCache.getStats().getCumulativeHits() + + " lookups=" + + underlyingCache.getStats().getCumulativeLookups() + + " size=" + + underlyingCache.getStats().getCurrentSize()); } - public static int doDecode(byte[][] buffers, long iter, JavaBinCodec.StringCache stringCache) throws IOException { + public static int doDecode(byte[][] buffers, long iter, JavaBinCodec.StringCache stringCache) + throws IOException { int ret = 0; int bufnum = -1; - InputStream empty = new InputStream() { - @Override - public int read() throws IOException { - return -1; - } - }; + InputStream empty = + new InputStream() { + @Override + public int read() throws IOException { + return -1; + } + }; while (--iter >= 0) { if (++bufnum >= buffers.length) bufnum = 0; @@ -639,7 +672,4 @@ public int read() throws IOException { } return ret; } - } - - diff --git a/solr/solrj/src/test/org/apache/solr/common/util/Utf8CharSequenceTest.java b/solr/solrj/src/test/org/apache/solr/common/util/Utf8CharSequenceTest.java index 24a7ea596c5..69f12ace3b3 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/Utf8CharSequenceTest.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/Utf8CharSequenceTest.java @@ -103,7 +103,7 @@ public void testUnMarshal() throws IOException { ByteArrayUtf8CharSequence val = (ByteArrayUtf8CharSequence) nl1.get("key" + i); assertEquals(buf, val.getBuf()); String s = val.toString(); - assertTrue(s.startsWith("" + i)); + assertTrue(s + " i=" + i, s.startsWith("" + i)); assertTrue(s, s.endsWith(str)); } diff --git a/solr/test-framework/src/java/org/apache/solr/SolrIgnoredThreadsFilter.java b/solr/test-framework/src/java/org/apache/solr/SolrIgnoredThreadsFilter.java index 3fce31db253..c7a439ab45f 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrIgnoredThreadsFilter.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrIgnoredThreadsFilter.java @@ -16,9 +16,9 @@ */ package org.apache.solr; -import org.apache.lucene.search.TimeLimitingCollector.TimerThread; - import com.carrotsearch.randomizedtesting.ThreadFilter; +import java.lang.Thread.State; +import org.apache.lucene.search.TimeLimitingCollector.TimerThread; /** @@ -36,6 +36,10 @@ public boolean reject(Thread t) { * test-dependent information. */ + if (t.getState().equals(State.TERMINATED)) { + return true; + } + String threadName = t.getName(); if (threadName.equals(TimerThread.THREAD_NAME)) { return true; @@ -55,6 +59,22 @@ public boolean reject(Thread t) { if (threadName.startsWith("ForkJoinPool.")) { return true; } + + // load balancer is leaky + if (threadName.startsWith("aliveCheckExecutor")) { + return true; + } + + + // we don't handle zk shutdown well, but these threads are harmless and will shortly go away + if (threadName.startsWith("SessionTracker")) { + return true; + } + + // tools + if (threadName.startsWith("Reference Handler") && threadName.startsWith("Signal Dispatcher") && threadName.startsWith("Monitor") && threadName.startsWith("YJPAgent-RequestListener")) { + return true; + } if (threadName.startsWith("Image Fetcher")) { return true; diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCase.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCase.java index b215c69eae4..4b01d32972c 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCase.java @@ -17,11 +17,21 @@ package org.apache.solr; -import java.lang.invoke.MethodHandles; -import java.io.File; -import java.util.regex.Pattern; +import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; +import java.io.File; +import java.lang.invoke.MethodHandles; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.QuickPatchThreadsFilter; import org.apache.lucene.util.VerifyTestClassNamingConvention; @@ -38,118 +48,220 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; - /** - * All Solr test cases should derive from this class eventually. This is originally a result of async logging, see: - * SOLR-12055 and associated. To enable async logging, we must gracefully shut down logging. Many Solr tests subclass - * LuceneTestCase. + * All Solr test cases should derive from this class eventually. This is originally a result of + * async logging, see: SOLR-12055 and associated. To enable async logging, we must gracefully shut + * down logging. Many Solr tests subclass LuceneTestCase. * - * Rather than add the cruft from SolrTestCaseJ4 to all the Solr tests that currently subclass LuceneTestCase, - * we'll add the shutdown to this class and subclass it. + *

Rather than add the cruft from SolrTestCaseJ4 to all the Solr tests that currently subclass + * LuceneTestCase, we'll add the shutdown to this class and subclass it. * - * Other changes that should affect every Solr test case may go here if they don't require the added capabilities in - * SolrTestCaseJ4. + *

Other changes that should affect every Solr test case may go here if they don't require the + * added capabilities in SolrTestCaseJ4. */ - // ThreadLeakFilters are not additive. Any subclass that requires filters - // other than these must redefine these as well. -@ThreadLeakFilters(defaultFilters = true, filters = { - SolrIgnoredThreadsFilter.class, - QuickPatchThreadsFilter.class -}) -@ThreadLeakLingering(linger = 10000) +// ThreadLeakFilters are not additive. Any subclass that requires filters +// other than these must redefine these as well. +@ThreadLeakFilters( + defaultFilters = true, + filters = {SolrIgnoredThreadsFilter.class, QuickPatchThreadsFilter.class}) +@ThreadLeakLingering(linger = 0) +@ThreadLeakAction(Action.WARN) public class SolrTestCase extends LuceneTestCase { /** * DO NOT REMOVE THIS LOGGER - *

- * For reasons that aren't 100% obvious, the existence of this logger is neccessary to ensure - * that the logging framework is properly initialized (even if concrete subclasses do not - * themselves initialize any loggers) so that the async logger threads can be properly shutdown - * on completion of the test suite - *

+ * + *

For reasons that aren't 100% obvious, the existence of this logger is neccessary to ensure + * that the logging framework is properly initialized (even if concrete subclasses do not + * themselves initialize any loggers) so that the async logger threads can be properly shutdown on + * completion of the test suite + * * @see SOLR-14247 * @see #shutdownLogger */ private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); - private static final Pattern NAMING_CONVENTION_TEST_SUFFIX = Pattern.compile("(.+\\.)([^.]+)(Test)"); + protected static final Pattern NAMING_CONVENTION_TEST_SUFFIX = + Pattern.compile("(.+\\.)([^.]+)(Test)"); - private static final Pattern NAMING_CONVENTION_TEST_PREFIX = Pattern.compile("(.+\\.)(Test)([^.]+)"); + protected static final Pattern NAMING_CONVENTION_TEST_PREFIX = + Pattern.compile("(.+\\.)(Test)([^.]+)"); @ClassRule - public static TestRule solrClassRules = - RuleChain.outerRule(new SystemPropertiesRestoreRule()) - .around( - new VerifyTestClassNamingConvention( - "org.apache.solr.analytics", NAMING_CONVENTION_TEST_SUFFIX)) - .around( - new VerifyTestClassNamingConvention( - "org.apache.solr.ltr", NAMING_CONVENTION_TEST_PREFIX)) - .around(new RevertDefaultThreadHandlerRule()); + public static TestRule solrClassRules = + RuleChain.outerRule(new SystemPropertiesRestoreRule()) + .around( + new VerifyTestClassNamingConvention( + "org.apache.solr.analytics", NAMING_CONVENTION_TEST_SUFFIX)) + .around( + new VerifyTestClassNamingConvention( + "org.apache.solr.ltr", NAMING_CONVENTION_TEST_PREFIX)) + .around(new RevertDefaultThreadHandlerRule()); /** - * Sets the solr.default.confdir system property to the value of - * {@link ExternalPaths#DEFAULT_CONFIGSET} if and only if the system property is not already set, - * and the DEFAULT_CONFIGSET exists and is a readable directory. - *

- * Logs INFO/WARNing messages as appropriate based on these 2 conditions. - *

+ * Sets the solr.default.confdir system property to the value of {@link + * ExternalPaths#DEFAULT_CONFIGSET} if and only if the system property is not already set, and the + * DEFAULT_CONFIGSET exists and is a readable directory. + * + *

Logs INFO/WARNing messages as appropriate based on these 2 conditions. + * * @see SolrDispatchFilter#SOLR_DEFAULT_CONFDIR_ATTRIBUTE */ @BeforeClass public static void setDefaultConfigDirSysPropIfNotSet() { - final String existingValue = System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE); + final String existingValue = + System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE); if (null != existingValue) { - log.info("Test env includes configset dir system property '{}'='{}'", SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, existingValue); + log.info( + "Test env includes configset dir system property '{}'='{}'", + SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, + existingValue); return; } final File extPath = new File(ExternalPaths.DEFAULT_CONFIGSET); - if (extPath.canRead(/* implies exists() */) && extPath.isDirectory()) { - log.info("Setting '{}' system property to test-framework derived value of '{}'", - SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, ExternalPaths.DEFAULT_CONFIGSET); + if (extPath.canRead(/* implies exists() */ ) && extPath.isDirectory()) { + log.info( + "Setting '{}' system property to test-framework derived value of '{}'", + SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, + ExternalPaths.DEFAULT_CONFIGSET); assert null == existingValue; - System.setProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, ExternalPaths.DEFAULT_CONFIGSET); + System.setProperty( + SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, ExternalPaths.DEFAULT_CONFIGSET); } else { - log.warn("System property '{}' is not already set, but test-framework derived value ('{}') either " + - "does not exist or is not a readable directory, you may need to set the property yourself " + - "for tests to run properly", - SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, ExternalPaths.DEFAULT_CONFIGSET); + log.warn( + "System property '{}' is not already set, but test-framework derived value ('{}') either " + + "does not exist or is not a readable directory, you may need to set the property yourself " + + "for tests to run properly", + SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE, + ExternalPaths.DEFAULT_CONFIGSET); } } - - /** - * Special hook for sanity checking if any tests trigger failures when an - * Assumption failure occures in a {@link BeforeClass} method + + /** + * Special hook for sanity checking if any tests trigger failures when an Assumption failure + * occures in a {@link BeforeClass} method + * * @lucene.internal */ @BeforeClass public static void checkSyspropForceBeforeClassAssumptionFailure() { // ant test -Dargs="-Dtests.force.assumption.failure.beforeclass=true" final String PROP = "tests.force.assumption.failure.beforeclass"; - assumeFalse(PROP + " == true", - systemPropertyAsBoolean(PROP, false)); + assumeFalse(PROP + " == true", systemPropertyAsBoolean(PROP, false)); } - - /** - * Special hook for sanity checking if any tests trigger failures when an - * Assumption failure occures in a {@link Before} method + + /** + * Special hook for sanity checking if any tests trigger failures when an Assumption failure + * occures in a {@link Before} method + * * @lucene.internal */ @Before public void checkSyspropForceBeforeAssumptionFailure() { // ant test -Dargs="-Dtests.force.assumption.failure.before=true" final String PROP = "tests.force.assumption.failure.before"; - assumeFalse(PROP + " == true", - systemPropertyAsBoolean(PROP, false)); + assumeFalse(PROP + " == true", systemPropertyAsBoolean(PROP, false)); } - + @AfterClass - public static void shutdownLogger() throws Exception { + public static void shutdownLogger() { StartupLoggingUtils.shutdown(); } + + public static void interruptThreadsOnTearDown() { + + log.info("Checking leaked threads after test"); + + ThreadGroup tg = Thread.currentThread().getThreadGroup(); + + Set> threadSet = Thread.getAllStackTraces().entrySet(); + if (log.isInfoEnabled()) { + log.info("thread count={}", threadSet.size()); + } + Collection waitThreads = new ArrayList<>(); + for (Map.Entry threadEntry : threadSet) { + Thread thread = threadEntry.getKey(); + ThreadGroup threadGroup = thread.getThreadGroup(); + if (threadGroup != null + && !(thread.getName().startsWith("SUITE") + && thread.getName().charAt(thread.getName().length() - 1) == ']') + && !"main".equals(thread.getName())) { + if (log.isTraceEnabled()) { + log.trace("thread is {} state={}", thread.getName(), thread.getState()); + } + if (threadGroup.getName().equals(tg.getName()) && interrupt(thread)) { + waitThreads.add(thread); + } + } + + while (true) { + boolean cont = + threadGroup != null && threadGroup.getParent() != null && !( + thread.getName().startsWith("SUITE") + && thread.getName().charAt(thread.getName().length() - 1) == ']') + && !"main".equals(thread.getName()); + if (!cont) break; + threadGroup = threadGroup.getParent(); + + if (threadGroup.getName().equals(tg.getName())) { + if (log.isTraceEnabled()) { + log.trace("thread is {} state={}", thread.getName(), thread.getState()); + } + if (interrupt(thread)) { + waitThreads.add(thread); + } + } + } + } + + for (Thread thread : waitThreads) { + int cnt = 0; + do { + if (log.isDebugEnabled()) { + log.debug("waiting on {} {}", thread.getName(), thread.getState()); + } + thread.interrupt(); + try { + thread.join(5); + } catch (InterruptedException e) { + // ignore + } + } while (cnt++ < 20); + } + + waitThreads.clear(); + } + + private static boolean interrupt(Thread thread) { + + if (thread.getName().startsWith("Reference Handler") + || thread.getName().startsWith("Signal Dispatcher") + || thread.getName().startsWith("Monitor") + || thread.getName().startsWith("YJPAgent-RequestListener")) { + return false; + } + + if (thread.getName().startsWith("ForkJoinPool.") + || thread.getName().startsWith("Log4j2-") + || thread.getName().startsWith("SessionTracker")) { + return false; + } + + // pool is forkjoin + if (thread.getName().contains("pool-")) { + return false; + } + + Thread.State state = thread.getState(); + + if (state == Thread.State.TERMINATED) { + return false; + } + if (log.isDebugEnabled()) { + log.debug("Interrupt on {} state={}", thread.getName(), thread.getState()); + } + thread.interrupt(); + return true; + } } diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 3f4e62a3dc0..c9853526438 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -192,7 +192,7 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { "java.lang.reflect.", "com.carrotsearch.randomizedtesting.", }); - + public static final String DEFAULT_TEST_COLLECTION_NAME = "collection1"; public static final String DEFAULT_TEST_CORENAME = DEFAULT_TEST_COLLECTION_NAME; protected static final String CORE_PROPERTIES_FILENAME = "core.properties"; @@ -204,9 +204,9 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { protected static String coreName = DEFAULT_TEST_CORENAME; public static int DEFAULT_CONNECTION_TIMEOUT = 60000; // default socket connection timeout in ms - + private static String initialRootLogLevel; - + protected volatile static ExecutorService testExecutor; protected void writeCoreProperties(Path coreDirectory, String corename) throws IOException { @@ -223,13 +223,13 @@ public static void writeCoreProperties(Path coreDirectory, Properties properties log.info("Writing core.properties file to {}", coreDirectory); Files.createDirectories(coreDirectory); try (Writer writer = - new OutputStreamWriter(Files.newOutputStream(coreDirectory.resolve(CORE_PROPERTIES_FILENAME)), Charset.forName("UTF-8"))) { + new OutputStreamWriter(Files.newOutputStream(coreDirectory.resolve(CORE_PROPERTIES_FILENAME)), Charset.forName("UTF-8"))) { properties.store(writer, testname); } } protected void assertExceptionThrownWithMessageContaining(Class expectedType, - List expectedStrings, ThrowingRunnable runnable) { + List expectedStrings, ThrowingRunnable runnable) { Throwable thrown = expectThrows(expectedType, runnable); if (expectedStrings != null) { @@ -250,7 +250,7 @@ protected void assertExceptionThrownWithMessageContaining(Class(), @@ -291,8 +291,8 @@ public static void setupTestCases() { // non-null after calling setupTestCases() initAndGetDataDir(); - System.setProperty("solr.zkclienttimeout", "90000"); - + System.setProperty("solr.zkclienttimeout", "90000"); + System.setProperty("solr.httpclient.retries", "1"); System.setProperty("solr.retries.on.forward", "1"); System.setProperty("solr.retries.to.followers", "1"); @@ -300,11 +300,10 @@ public static void setupTestCases() { System.setProperty("solr.v2RealPath", "true"); System.setProperty("zookeeper.forceSync", "no"); System.setProperty("jetty.testMode", "true"); - System.setProperty("enable.update.log", Boolean.toString(usually())); + System.setProperty("enable.update.log", usually() ? "true" : "false"); System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong())); System.setProperty("solr.clustering.enabled", "false"); System.setProperty("solr.cloud.wait-for-updates-with-stale-state-pause", "500"); - System.setProperty("solr.filterCache.async", String.valueOf(random().nextBoolean())); System.setProperty("pkiHandlerPrivateKeyPath", SolrTestCaseJ4.class.getClassLoader().getResource("cryptokeys/priv_key512_pkcs8.pem").toExternalForm()); System.setProperty("pkiHandlerPublicKeyPath", SolrTestCaseJ4.class.getClassLoader().getResource("cryptokeys/pub_key512.der").toExternalForm()); @@ -365,7 +364,7 @@ public static void teardownTestCases() throws Exception { } catch (Exception e) { log.error("Error deleting SolrCore."); } - + if (null != testExecutor) { ExecutorUtil.shutdownAndAwaitTermination(testExecutor); testExecutor = null; @@ -413,7 +412,7 @@ public static void teardownTestCases() throws Exception { StartupLoggingUtils.changeLogLevel(initialRootLogLevel); } } - + /** * a "dead" host, if you try to connect to it, it will likely fail fast * please consider using mocks and not real networking to simulate failure @@ -442,14 +441,14 @@ public static void assumeWorkingMockito() { fail("ByteBuddy and Mockito are not available on classpath: " + e.toString()); } } - + /** * @return null if ok else error message */ public static String clearObjectTrackerAndCheckEmpty(int waitSeconds) { return clearObjectTrackerAndCheckEmpty(waitSeconds, false); } - + /** * @return null if ok else error message */ @@ -477,12 +476,12 @@ public static String clearObjectTrackerAndCheckEmpty(int waitSeconds, boolean tr } catch (InterruptedException e) { break; } } while (retries++ < waitSeconds); - - + + log.info("------------------------------------------------------- Done waiting for tracked resources to be released"); - + ObjectReleaseTracker.clear(); - + return result; } @@ -517,7 +516,7 @@ public void restoreMethodLogLevels() { LogLevel.Configurer.restoreLogLevels(savedMethodLogLevels); savedMethodLogLevels.clear(); } - + protected static boolean isSSLMode() { return sslConfig != null && sslConfig.isSSLMode(); } @@ -552,8 +551,8 @@ public static void resetFactory() throws Exception { private static SSLTestConfig buildSSLConfig() { SSLRandomizer sslRandomizer = - SSLRandomizer.getSSLRandomizerForClass(RandomizedContext.current().getTargetClass()); - + SSLRandomizer.getSSLRandomizerForClass(RandomizedContext.current().getTargetClass()); + if (Constants.MAC_OS_X) { // see SOLR-9039 // If a solution is found to remove this, please make sure to also update @@ -572,7 +571,7 @@ private static SSLTestConfig buildSSLConfig() { protected static JettyConfig buildJettyConfig(String context) { return JettyConfig.builder().setContext(context).withSSLConfig(sslConfig.buildServerSSLConfig()).build(); } - + protected static String buildUrl(final int port, final String context) { return (isSSLMode() ? "https" : "http") + "://127.0.0.1:" + port + context; } @@ -604,11 +603,11 @@ public static void setupNoCoreTest(Path solrHome, String xmlStr) throws Exceptio h = new TestHarness(SolrXmlConfig.fromSolrHome(solrHome, new Properties())); lrf = h.getRequestFactory("/select", 0, 20, CommonParams.VERSION, "2.2"); } - - /** + + /** * Sets system properties to allow generation of random configurations of - * solrconfig.xml and schema.xml. - * Sets properties used on + * solrconfig.xml and schema.xml. + * Sets properties used on * {@link #newIndexWriterConfig(org.apache.lucene.analysis.Analyzer)} * and base schema.xml (Point Fields) */ @@ -654,12 +653,12 @@ public void tearDown() throws Exception { } /** - * Subclasses may call this method to access the "dataDir" that will be used by + * Subclasses may call this method to access the "dataDir" that will be used by * {@link #initCore} (either prior to or after the core is created). *

* If the dataDir has not yet been initialized when this method is called, this method will do so. - * Calling {@link #deleteCore} will "reset" the value, such that subsequent calls will - * re-initialize a new value. All directories returned by any calls to this method will + * Calling {@link #deleteCore} will "reset" the value, such that subsequent calls will + * re-initialize a new value. All directories returned by any calls to this method will * automatically be cleaned up per {@link #createTempDir} *

*

@@ -682,17 +681,17 @@ protected static File initAndGetDataDir() { } return dataDir; } - /** - * Counter for ensuring we don't ask {@link #createTempDir} to try and + /** + * Counter for ensuring we don't ask {@link #createTempDir} to try and * re-create the same dir prefix over and over. *

- * (createTempDir has it's own counter for uniqueness, but it tries all numbers in a loop - * until it finds one available. No reason to force that O(N^2) behavior when we know we've + * (createTempDir has it's own counter for uniqueness, but it tries all numbers in a loop + * until it finds one available. No reason to force that O(N^2) behavior when we know we've * already created N previous directories with the same prefix.) *

*/ private static final AtomicInteger dataDirCount = new AtomicInteger(0); - + /** Call initCore in @BeforeClass to instantiate a solr core in your test class. * deleteCore will be called for you via SolrTestCaseJ4 @AfterClass */ public static void initCore(String config, String schema) throws Exception { @@ -716,7 +715,7 @@ public static void initCore(String config, String schema, String solrHome, Strin coreName=pCoreName; initCore(config,schema,solrHome); } - + static long numOpens; static long numCloses; public static void startTrackingSearchers() { @@ -728,7 +727,7 @@ public static void startTrackingSearchers() { numOpens = numCloses = 0; } } - + /** Causes an exception matching the regex pattern to not be logged. */ public static void ignoreException(String pattern) { if (SolrException.ignorePatterns == null) // usually initialized already but in case not... @@ -801,10 +800,10 @@ public static String getSolrConfigFile() { } /** - * The directory used as the dataDir for the TestHarness unless - * {@link #hdfsDataDir} is non null. + * The directory used as the dataDir for the TestHarness unless + * {@link #hdfsDataDir} is non null. *

- * Will be set to null by {@link #deleteCore} and re-initialized as needed by {@link #createCore}. + * Will be set to null by {@link #deleteCore} and re-initialized as needed by {@link #createCore}. * In the event of a test failure, the contents will be left on disk. *

* @see #createTempDir(String) @@ -813,7 +812,7 @@ public static String getSolrConfigFile() { */ @Deprecated protected static volatile File initCoreDataDir; - + // hack due to File dataDir protected static String hdfsDataDir; @@ -855,10 +854,10 @@ public static void createCore() { assertNotNull(testSolrHome); solrConfig = TestHarness.createConfig(testSolrHome, coreName, getSolrConfigFile()); h = new TestHarness( coreName, hdfsDataDir == null ? initAndGetDataDir().getAbsolutePath() : hdfsDataDir, - solrConfig, - getSchemaFile()); + solrConfig, + getSchemaFile()); lrf = h.getRequestFactory - ("",0,20,CommonParams.VERSION,"2.2"); + ("",0,20,CommonParams.VERSION,"2.2"); } public static CoreContainer createCoreContainer(Path solrHome, String solrXML) { @@ -958,7 +957,7 @@ public static void deleteCore() { // clears the updatelog sysprop at the end of the test run System.clearProperty(UPDATELOG_SYSPROP); } - + solrConfig = null; h = null; lrf = null; @@ -997,11 +996,11 @@ private static void checkUpdateU(String message, String update, boolean shouldSu try { String m = (null == message) ? "" : message + " "; if (shouldSucceed) { - String res = h.validateUpdate(update); - if (res != null) fail(m + "update was not successful: " + res); + String res = h.validateUpdate(update); + if (res != null) fail(m + "update was not successful: " + res); } else { - String res = h.validateErrorUpdate(update); - if (res != null) fail(m + "update succeeded, but should have failed: " + res); + String res = h.validateErrorUpdate(update); + if (res != null) fail(m + "update succeeded, but should have failed: " + res); } } catch (SAXException e) { throw new RuntimeException("Invalid XML", e); @@ -1149,7 +1148,7 @@ public static String assertJQ(SolrQueryRequest req, double delta, String... test // restore the params if (params != null && params != req.getParams()) req.setParams(params); } - } + } /** Makes sure a query throws a SolrException with the listed response code */ @@ -1194,7 +1193,7 @@ public static void assertQEx(String failMessage, String exceptionMessage, SolrQu fail( failMessage ); } catch (SolrException e) { assertEquals( code.code, e.code() ); - assertTrue("Unexpected error message. Expecting \"" + exceptionMessage + + assertTrue("Unexpected error message. Expecting \"" + exceptionMessage + "\" but got \"" + e.getMessage() + "\"", e.getMessage()!= null && e.getMessage().contains(exceptionMessage)); } catch (Exception e2) { throw new RuntimeException("Exception during query", e2); @@ -1286,7 +1285,7 @@ public static String add(XmlDoc doc, String... args) { return r.getBuffer().toString(); } catch (IOException e) { throw new RuntimeException - ("this should never happen with a StringWriter", e); + ("this should never happen with a StringWriter", e); } } @@ -1387,11 +1386,11 @@ public static class XmlDoc { } /** - * Does a low level delete of all docs in the index. + * Does a low level delete of all docs in the index. * * The behavior of this method is slightly different then doing a normal *:* DBQ because it - * takes advantage of internal methods to ensure all index data is wiped, regardless of optimistic - * concurrency version constraints -- making it suitable for tests that create synthetic versions, + * takes advantage of internal methods to ensure all index data is wiped, regardless of optimistic + * concurrency version constraints -- making it suitable for tests that create synthetic versions, * and/or require a completely pristine index w/o any field metdata. * * @see #deleteByQueryAndGetVersion @@ -1404,7 +1403,7 @@ public void clearIndex() { } try { deleteByQueryAndGetVersion("*:*", params("_version_", Long.toString(-Long.MAX_VALUE), - DISTRIB_UPDATE_PARAM,DistribPhase.FROMLEADER.toString())); + DISTRIB_UPDATE_PARAM,DistribPhase.FROMLEADER.toString())); } catch (Exception e) { throw new RuntimeException(e); } @@ -1571,7 +1570,7 @@ public static String jsonAdd(SolrInputDocument... docs) { return out.toString(); } - /** Creates a JSON delete command from an id list */ + /** Creates a JSON delete command from an id list */ public static String jsonDelId(Object... ids) { CharArr out = new CharArr(); try { @@ -1661,7 +1660,7 @@ public static Long deleteByQueryAndGetVersion(String q, SolrParams params) throw ///////////////////////////////////////////////////////////////////////////////////// //////////////////////////// random document / index creation /////////////////////// ///////////////////////////////////////////////////////////////////////////////////// - + public abstract static class Vals { @SuppressWarnings({"rawtypes"}) public abstract Comparable get(); @@ -1869,7 +1868,7 @@ public FldType(String fname, Vals vals) { public FldType(String fname, IVals numValues, Vals vals) { this.fname = fname; this.numValues = numValues; - this.vals = vals; + this.vals = vals; } @SuppressWarnings({"rawtypes"}) @@ -1895,7 +1894,7 @@ public Fld createField() { Fld fld = new Fld(); fld.ftype = this; fld.vals = vals; - return fld; + return fld; } } @@ -1933,7 +1932,7 @@ public Map indexDocs(List descriptor, Map createSort(IndexSchema schema, List field if (comparators.size() == 0) { // default sort is by score desc - comparators.add(createComparator("score", false, false, false, false)); + comparators.add(createComparator("score", false, false, false, false)); } return createComparator(comparators); @@ -2024,7 +2023,7 @@ public static Comparator createComparator(final String field, final boolean final int mul = asc ? 1 : -1; if (field.equals("_docid_")) { - return (o1, o2) -> (o1.order - o2.order) * mul; + return (o1, o2) -> (o1.order - o2.order) * mul; } if (field.equals("score")) { @@ -2155,7 +2154,7 @@ public static File getFile(String name) { try { return new File(url.toURI()); } catch (Exception e) { - throw new RuntimeException("Resource was found on classpath, but cannot be resolved to a " + + throw new RuntimeException("Resource was found on classpath, but cannot be resolved to a " + "normal file (maybe it is part of a JAR file): " + name); } } @@ -2165,7 +2164,7 @@ public static File getFile(String name) { } throw new RuntimeException("Cannot find resource in classpath or in file-system (relative to CWD): " + name); } - + public static String TEST_HOME() { return getFile("solr/collection1").getParent(); } @@ -2199,7 +2198,7 @@ public static void assertXmlFile(final File file, String... xpath) throw new RuntimeException("XPath is invalid", e2); } } - + /** * Fails if the number of documents in the given SolrDocumentList differs * from the given number of expected values, or if any of the values in the @@ -2267,7 +2266,7 @@ public static void copyXmlToHome(File dstRoot, String fromFile) throws IOExcepti } File xmlF = new File(SolrTestCaseJ4.TEST_HOME(), fromFile); FileUtils.copyFile(xmlF, new File(dstRoot, "solr.xml")); - + } // Creates a consistent configuration, _including_ solr.xml at dstRoot. Creates collection1/conf and copies // the stock files in there. @@ -2359,7 +2358,7 @@ public boolean compareSolrDocumentList(Object expected, Object actual) { if (list1.getMaxScore() == null) { if (list2.getMaxScore() != null) { return false; - } + } } else if (list2.getMaxScore() == null) { return false; } else { @@ -2475,7 +2474,7 @@ public boolean assertSolrInputFieldEquals(Object expected, Object actual) { return true; } - /** + /** * Returns likely most (1/10) of the time, otherwise unlikely */ public static Object skewed(Object likely, Object unlikely) { @@ -2542,27 +2541,27 @@ public static CloudHttp2SolrClient getCloudHttp2SolrClient(MiniSolrCloudCluster * some internal settings. */ public static class CloudSolrClientBuilder extends CloudSolrClient.Builder { - + public CloudSolrClientBuilder(List zkHosts, Optional zkChroot) { super(zkHosts, zkChroot); randomizeCloudSolrClient(); } - + public CloudSolrClientBuilder(ClusterStateProvider stateProvider) { this.stateProvider = stateProvider; randomizeCloudSolrClient(); } - + public CloudSolrClientBuilder(MiniSolrCloudCluster cluster) { if (random().nextBoolean()) { this.zkHosts.add(cluster.getZkServer().getZkAddress()); } else { populateSolrUrls(cluster); } - + randomizeCloudSolrClient(); } - + private void populateSolrUrls(MiniSolrCloudCluster cluster) { if (random().nextBoolean()) { final List solrNodes = cluster.getJettySolrRunners(); @@ -2573,7 +2572,7 @@ private void populateSolrUrls(MiniSolrCloudCluster cluster) { this.solrUrls.add(cluster.getRandomJetty(random()).getBaseUrl().toString()); } } - + private void randomizeCloudSolrClient() { this.directUpdatesToLeadersOnly = random().nextBoolean(); this.shardLeadersOnly = random().nextBoolean(); @@ -2585,7 +2584,7 @@ private void randomizeCloudSolrClient() { * This method may randomize unspecified aspects of the resulting SolrClient. * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()).build(); } @@ -2601,20 +2600,20 @@ public static CloudSolrClient getCloudSolrClient(MiniSolrCloudCluster cluster) { /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, HttpClient httpClient) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()) .withHttpClient(httpClient) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly) { if (shardLeadersOnly) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()) @@ -2632,9 +2631,9 @@ public static CloudSolrClientBuilder newCloudSolrClient(String zkHost) { /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, int socketTimeoutMillis) { if (shardLeadersOnly) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()) @@ -2647,12 +2646,12 @@ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLea .withSocketTimeout(socketTimeoutMillis) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, int connectionTimeoutMillis, int socketTimeoutMillis) { if (shardLeadersOnly) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()) @@ -2667,14 +2666,14 @@ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLea .withSocketTimeout(socketTimeoutMillis) .build(); } - - - + + + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, HttpClient httpClient) { if (shardLeadersOnly) { return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty()) @@ -2687,12 +2686,12 @@ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLea .sendUpdatesToAllReplicasInShard() .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly - */ + */ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, HttpClient httpClient, int connectionTimeoutMillis, int socketTimeoutMillis) { if (shardLeadersOnly) { @@ -2710,24 +2709,24 @@ public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLea .withSocketTimeout(socketTimeoutMillis) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly - */ + */ public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, int queueSize, int threadCount) { return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl) .withQueueSize(queueSize) .withThreadCount(threadCount) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly - */ + */ public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, int queueSize, int threadCount, int connectionTimeoutMillis) { return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl) .withQueueSize(queueSize) @@ -2735,12 +2734,12 @@ public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String ba .withConnectionTimeout(connectionTimeoutMillis) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient.Builder} class directly - */ + */ public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String baseSolrUrl, HttpClient httpClient, int queueSize, int threadCount) { return new ConcurrentUpdateSolrClient.Builder(baseSolrUrl) .withHttpClient(httpClient) @@ -2748,24 +2747,24 @@ public static ConcurrentUpdateSolrClient getConcurrentUpdateSolrClient(String ba .withThreadCount(threadCount) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly - */ + */ public static LBHttpSolrClient getLBHttpSolrClient(HttpClient client, String... solrUrls) { return new LBHttpSolrClient.Builder() .withHttpClient(client) .withBaseSolrUrls(solrUrls) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly - */ + */ public static LBHttpSolrClient getLBHttpSolrClient(HttpClient client, int connectionTimeoutMillis, int socketTimeoutMillis, String... solrUrls) { return new LBHttpSolrClient.Builder() @@ -2775,23 +2774,23 @@ public static LBHttpSolrClient getLBHttpSolrClient(HttpClient client, int connec .withSocketTimeout(socketTimeoutMillis) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.LBHttpSolrClient.Builder} class directly - */ + */ public static LBHttpSolrClient getLBHttpSolrClient(String... solrUrls) throws MalformedURLException { return new LBHttpSolrClient.Builder() .withBaseSolrUrls(solrUrls) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, ResponseParser responseParser, boolean compression) { return new Builder(url) .withHttpClient(httpClient) @@ -2799,35 +2798,35 @@ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient .allowCompression(compression) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, ResponseParser responseParser) { return new Builder(url) .withHttpClient(httpClient) .withResponseParser(responseParser) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient) { return new Builder(url) .withHttpClient(httpClient) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient, int connectionTimeoutMillis) { return new Builder(url) .withHttpClient(httpClient) @@ -2837,30 +2836,30 @@ public static HttpSolrClient getHttpSolrClient(String url, HttpClient httpClient /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url) { return new Builder(url) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, int connectionTimeoutMillis) { return new Builder(url) .withConnectionTimeout(connectionTimeoutMillis) .build(); } - + /** * This method may randomize unspecified aspects of the resulting SolrClient. - * Tests that do not wish to have any randomized behavior should use the + * Tests that do not wish to have any randomized behavior should use the * {@link org.apache.solr.client.solrj.impl.HttpSolrClient.Builder} class directly - */ + */ public static HttpSolrClient getHttpSolrClient(String url, int connectionTimeoutMillis, int socketTimeoutMillis) { return new Builder(url) .withConnectionTimeout(connectionTimeoutMillis) @@ -2868,25 +2867,25 @@ public static HttpSolrClient getHttpSolrClient(String url, int connectionTimeout .build(); } - /** - * Returns a randomly generated Date in the appropriate Solr external (input) format + /** + * Returns a randomly generated Date in the appropriate Solr external (input) format * @see #randomSkewedDate */ public static String randomDate() { return Instant.ofEpochMilli(random().nextLong()).toString(); } - /** - * Returns a Date such that all results from this method always have the same values for - * year+month+day+hour+minute but the seconds are randomized. This can be helpful for - * indexing documents with random date values that are biased for a narrow window + /** + * Returns a Date such that all results from this method always have the same values for + * year+month+day+hour+minute but the seconds are randomized. This can be helpful for + * indexing documents with random date values that are biased for a narrow window * (one day) to test collisions/overlaps * * @see #randomDate */ public static String randomSkewedDate() { return String.format(Locale.ROOT, "2010-10-31T10:31:%02d.000Z", - TestUtil.nextInt(random(), 0, 59)); + TestUtil.nextInt(random(), 0, 59)); } /** @@ -2900,7 +2899,7 @@ public static String randomXmlUsableUnicodeString() { } return result; } - + protected static void waitForWarming(SolrCore core) throws InterruptedException { RefCounted registeredSearcher = core.getRegisteredSearcher(); RefCounted newestSearcher = core.getNewestSearcher(false); @@ -2931,7 +2930,7 @@ protected String getSaferTestName() { } return testName; } - + @BeforeClass public static void assertNonBlockingRandomGeneratorAvailable() throws InterruptedException { final String EGD = "java.security.egd"; @@ -2939,38 +2938,38 @@ public static void assertNonBlockingRandomGeneratorAvailable() throws Interrupte final String ALLOWED = "test.solr.allowed.securerandom"; final String allowedAlg = System.getProperty(ALLOWED); final String actualEGD = System.getProperty(EGD); - + log.info("SecureRandom sanity checks: {}={} & {}={}", ALLOWED, allowedAlg, EGD, actualEGD); if (null != allowedAlg) { // the user has explicitly requested to bypass our assertions and allow a particular alg // the only thing we should do is assert that the algorithm they have whitelisted is actaully used - - + + final String actualAlg = (new SecureRandom()).getAlgorithm(); assertEquals("Algorithm specified using "+ALLOWED+" system property " + - "does not match actual algorithm", allowedAlg, actualAlg); + "does not match actual algorithm", allowedAlg, actualAlg); return; } - // else: no user override, do the checks we want including - + // else: no user override, do the checks we want including + if (null == actualEGD) { System.setProperty(EGD, URANDOM); log.warn("System property {} was not set by test runner, forcibly set to expected: {}", EGD, URANDOM); } else if (! URANDOM.equals(actualEGD) ) { log.warn("System property {}={} .. test runner should use expected: {}", EGD, actualEGD, URANDOM); } - + final String algorithm = (new SecureRandom()).getAlgorithm(); - + assertFalse("SecureRandom algorithm '" + algorithm + "' is in use by your JVM, " + - "which is a potentially blocking algorithm on some environments. " + - "Please report the details of this failure (and your JVM vendor/version) to solr-user@lucene.apache.org. " + - "You can try to run your tests with -D"+EGD+"="+URANDOM+" or bypass this check using " + - "-Dtest.solr.allowed.securerandom="+ algorithm +" as a JVM option when running tests.", - // be permissive in our checks and blacklist only algorithms - // that are known to be blocking under some circumstances - algorithm.equals("NativePRNG") || algorithm.equals("NativePRNGBlocking")); + "which is a potentially blocking algorithm on some environments. " + + "Please report the details of this failure (and your JVM vendor/version) to solr-user@lucene.apache.org. " + + "You can try to run your tests with -D"+EGD+"="+URANDOM+" or bypass this check using " + + "-Dtest.solr.allowed.securerandom="+ algorithm +" as a JVM option when running tests.", + // be permissive in our checks and blacklist only algorithms + // that are known to be blocking under some circumstances + algorithm.equals("NativePRNG") || algorithm.equals("NativePRNGBlocking")); } protected static void systemSetPropertySolrTestsMergePolicyFactory(String value) { @@ -2980,7 +2979,7 @@ protected static void systemSetPropertySolrTestsMergePolicyFactory(String value) protected static void systemClearPropertySolrTestsMergePolicyFactory() { System.clearProperty(SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY); } - + @Deprecated // For backwards compatibility only. Please do not use in new tests. protected static void systemSetPropertySolrDisableUrlAllowList(String value) { System.setProperty(AllowListUrlChecker.DISABLE_URL_ALLOW_LIST, value); @@ -2995,9 +2994,9 @@ protected static void systemClearPropertySolrDisableUrlAllowList() { protected static T pickRandom(T... options) { return options[random().nextInt(options.length)]; } - + /** - * The name of a sysprop that can be set by users when running tests to force the types of numerics + * The name of a sysprop that can be set by users when running tests to force the types of numerics * used for test classes that do not have the {@link SuppressPointFields} annotation: *
    *
  • If unset, then a random variable will be used to decide the type of numerics.
  • @@ -3007,23 +3006,23 @@ protected static T pickRandom(T... options) { * @see #NUMERIC_POINTS_SYSPROP */ public static final String USE_NUMERIC_POINTS_SYSPROP = "solr.tests.use.numeric.points"; - + /** - * The name of a sysprop that will either true or false indicating if - * numeric points fields are currently in use, depending on the user specified value of - * {@link #USE_NUMERIC_POINTS_SYSPROP} and/or the {@link SuppressPointFields} annotation and/or + * The name of a sysprop that will either true or false indicating if + * numeric points fields are currently in use, depending on the user specified value of + * {@link #USE_NUMERIC_POINTS_SYSPROP} and/or the {@link SuppressPointFields} annotation and/or * randomization. Tests can use Boolean.getBoolean(NUMERIC_POINTS_SYSPROP). * * @see #randomizeNumericTypesProperties */ public static final String NUMERIC_POINTS_SYSPROP = "solr.tests.numeric.points"; - + /** - * The name of a sysprop that will be either true or false indicating if - * docValues should be used on a numeric field. This property string should be used in the - * docValues attribute of (most) numeric fieldTypes in schemas, and by default will be - * randomized by this class in a @BeforeClass. Subclasses that need to force specific - * behavior can use System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true") + * The name of a sysprop that will be either true or false indicating if + * docValues should be used on a numeric field. This property string should be used in the + * docValues attribute of (most) numeric fieldTypes in schemas, and by default will be + * randomized by this class in a @BeforeClass. Subclasses that need to force specific + * behavior can use System.setProperty(NUMERIC_DOCVALUES_SYSPROP,"true") * to override the default randomization. * * @see #randomizeNumericTypesProperties @@ -3033,7 +3032,7 @@ protected static T pickRandom(T... options) { public static final String UPDATELOG_SYSPROP = "solr.tests.ulog"; /** - * Sets various sys props related to user specified or randomized choices regarding the types + * Sets various sys props related to user specified or randomized choices regarding the types * of numerics that should be used in tests. * * @see #NUMERIC_DOCVALUES_SYSPROP @@ -3046,17 +3045,17 @@ private static void randomizeNumericTypesProperties() { final boolean useDV = random().nextBoolean(); System.setProperty(NUMERIC_DOCVALUES_SYSPROP, ""+useDV); - + // consume a consistent amount of random data even if sysprop/annotation is set final boolean randUsePoints = 0 != random().nextInt(5); // 80% likelihood final String usePointsStr = System.getProperty(USE_NUMERIC_POINTS_SYSPROP); final boolean usePoints = (null == usePointsStr) ? randUsePoints : Boolean.parseBoolean(usePointsStr); - + if (RandomizedContext.current().getTargetClass().isAnnotationPresent(SolrTestCaseJ4.SuppressPointFields.class) || (! usePoints)) { log.info("Using TrieFields (NUMERIC_POINTS_SYSPROP=false) w/NUMERIC_DOCVALUES_SYSPROP={}", useDV); - + org.apache.solr.schema.PointField.TEST_HACK_IGNORE_USELESS_TRIEFIELD_ARGS = false; private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Integer.class, "solr.TrieIntField"); private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Float.class, "solr.TrieFloatField"); @@ -3064,7 +3063,7 @@ private static void randomizeNumericTypesProperties() { private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Double.class, "solr.TrieDoubleField"); private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Date.class, "solr.TrieDateField"); private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Enum.class, "solr.EnumField"); - + System.setProperty(NUMERIC_POINTS_SYSPROP, "false"); } else { log.info("Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP={}", useDV); @@ -3076,24 +3075,24 @@ private static void randomizeNumericTypesProperties() { private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Double.class, "solr.DoublePointField"); private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Date.class, "solr.DatePointField"); private_RANDOMIZED_NUMERIC_FIELDTYPES.put(Enum.class, "solr.EnumFieldType"); - + System.setProperty(NUMERIC_POINTS_SYSPROP, "true"); } for (Map.Entry,String> entry : RANDOMIZED_NUMERIC_FIELDTYPES.entrySet()) { System.setProperty("solr.tests." + entry.getKey().getSimpleName() + "FieldType", - entry.getValue()); + entry.getValue()); } } public static DistributedUpdateProcessor createDistributedUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp, - UpdateRequestProcessor next) { + UpdateRequestProcessor next) { if(h.getCoreContainer().isZooKeeperAware()) { return new DistributedZkUpdateProcessor(req, rsp, next); } return new DistributedUpdateProcessor(req, rsp, next); } - + /** * Cleans up the randomized sysproperties and variables set by {@link #randomizeNumericTypesProperties} * @@ -3131,7 +3130,7 @@ private static boolean isChildDoc(Object o) { } private static final Map,String> private_RANDOMIZED_NUMERIC_FIELDTYPES = new HashMap<>(); - + /** * A Map of "primitive" java "numeric" types and the string name of the class used in the * corresponding schema fieldType declaration. @@ -3142,6 +3141,6 @@ private static boolean isChildDoc(Object o) { * @see #randomizeNumericTypesProperties */ protected static final Map,String> RANDOMIZED_NUMERIC_FIELDTYPES - = Collections.unmodifiableMap(private_RANDOMIZED_NUMERIC_FIELDTYPES); + = Collections.unmodifiableMap(private_RANDOMIZED_NUMERIC_FIELDTYPES); } diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java index a854f911e2e..d7b1c34f19b 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java @@ -48,6 +48,7 @@ import org.apache.solr.core.backup.ShardBackupId; import org.apache.solr.core.backup.ShardBackupMetadata; import org.apache.solr.core.backup.repository.BackupRepository; +import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -95,11 +96,16 @@ public abstract class AbstractIncrementalBackupTest extends SolrCloudTestCase { protected String testSuffix = "test1"; @BeforeClass - public static void createCluster() throws Exception { + public static void beforeAbstractIncrementalBackupTest() throws Exception { docsSeed = random().nextLong(); System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory"); } + @AfterClass + public static void afterAbstractIncrementalBackupTest() throws Exception { + interruptThreadsOnTearDown(); // not closed properly + } + @Before public void setUpTrackingRepo() { TrackingBackupRepository.clear(); @@ -316,8 +322,8 @@ public void testBackupIncremental() throws Exception { .setIncremental(true) .setMaxNumberBackupPoints(3) .setRepositoryName(BACKUP_REPO_NAME); - if (random().nextBoolean()) { - RequestStatusState state = backup.processAndWait(cluster.getSolrClient(), 1000); + if (rarely()) { + RequestStatusState state = backup.processAndWait(cluster.getSolrClient(), 100); if (state != RequestStatusState.FAILED) { fail("This backup should be failed"); } diff --git a/solr/test-framework/src/java/org/apache/solr/util/NotSecurePseudoRandom.java b/solr/test-framework/src/java/org/apache/solr/util/NotSecurePseudoRandom.java new file mode 100644 index 00000000000..c4786050405 --- /dev/null +++ b/solr/test-framework/src/java/org/apache/solr/util/NotSecurePseudoRandom.java @@ -0,0 +1,2 @@ +package org.apache.solr.util;public class NotSecurePseudoRandom { +} diff --git a/solr/test-framework/src/java/org/apache/solr/util/NotSecurePseudoRandomSpi.java b/solr/test-framework/src/java/org/apache/solr/util/NotSecurePseudoRandomSpi.java new file mode 100644 index 00000000000..e69de29bb2d diff --git a/solr/test-framework/src/java/org/apache/solr/util/SolrTestNonSecureRandomProvider.java b/solr/test-framework/src/java/org/apache/solr/util/SolrTestNonSecureRandomProvider.java new file mode 100644 index 00000000000..f9c3210133c --- /dev/null +++ b/solr/test-framework/src/java/org/apache/solr/util/SolrTestNonSecureRandomProvider.java @@ -0,0 +1,25 @@ +//package org.apache.solr.util; +// +//import java.security.Provider; +// +//public class SolrTestNonSecureRandomProvider extends Provider { +// +// public SolrTestNonSecureRandomProvider() { +// super("LinuxPRNG", +// 1.0, +// "A Test only, non secure provider"); +// put("SecureRandom.SHA1PRNG", NotSecurePseudoRandomSpi.class.getName()); +// put("SecureRandom.NativePRNG", NotSecurePseudoRandomSpi.class.getName()); +// put("SecureRandom.DRBG", NotSecurePseudoRandomSpi.class.getName()); +// +// +// put("SecureRandom.SHA1PRNG ThreadSafe", "true"); +// put("SecureRandom.NativePRNG ThreadSafe", "true"); +// put("SecureRandom.DRBG ThreadSafe", "true"); +// +// +// put("SecureRandom.SHA1PRNG ImplementedIn", "Software"); +// put("SecureRandom.NativePRNG ImplementedIn", "Software"); +// put("SecureRandom.DRBG ImplementedIn", "Software"); +// } +// } \ No newline at end of file diff --git a/versions.props b/versions.props index 2c71a28b947..37c525956d2 100644 --- a/versions.props +++ b/versions.props @@ -62,6 +62,7 @@ io.opencensus:opencensus-contrib-http-util=0.21.0 io.opentracing:*=0.33.0 io.prometheus:*=0.2.0 io.sgr:s2-geometry-library-java=1.0.0 +it.unimi.dsi:fastutil-core=8.5.6 javax.servlet:javax.servlet-api=3.1.0 junit:junit=4.13.1 net.arnx:jsonic=1.2.7