From 3361bcb3e1605c60fee06396e96a4becc82e1451 Mon Sep 17 00:00:00 2001 From: Paul Irwin Date: Tue, 31 Dec 2024 12:19:27 -0700 Subject: [PATCH] Convert static readonly fields to const, #662 --- .../Analysis/Cjk/CJKBigramFilter.cs | 30 ++++++------ .../Miscellaneous/CapitalizationFilter.cs | 6 +-- .../Segmentation/DefaultICUTokenizerConfig.cs | 5 +- .../Dict/BinaryDictionary.cs | 23 +++++---- .../Dict/CharacterDefinition.cs | 29 +++++------ .../Dict/ConnectionCosts.cs | 5 +- .../Dict/Dictionary.cs | 4 +- .../Dict/TokenInfoDictionary.cs | 2 +- .../JapaneseIterationMarkCharFilter.cs | 10 ++-- .../JapaneseTokenizer.cs | 2 +- .../DoubleMetaphoneFilterFactory.cs | 6 +-- .../Language/Bm/Languages.cs | 2 +- .../Language/RefinedSoundex.cs | 2 +- .../Language/Soundex.cs | 10 ++-- src/Lucene.Net.Analysis.SmartCn/Utility.cs | 10 ++-- .../Pl/PolishAnalyzer.cs | 8 ++-- .../Stempel/StempelFilter.cs | 2 +- .../ByTask/Feeds/DocMaker.cs | 20 ++++---- .../ByTask/Feeds/SpatialDocMaker.cs | 6 +-- .../ByTask/Feeds/TrecContentSource.cs | 20 ++++---- .../ByTask/Feeds/TrecDocParser.cs | 2 +- .../ByTask/Tasks/AddIndexesTask.cs | 2 +- .../ByTask/Tasks/OpenIndexTask.cs | 6 +-- .../ByTask/Tasks/OpenReaderTask.cs | 2 +- .../ByTask/Tasks/ReportTask.cs | 16 +++---- .../Quality/QualityStats.cs | 18 +++---- .../BlockTerms/BlockTermsWriter.cs | 8 ++-- .../DiskDV/DiskDocValuesFormat.cs | 12 ++--- .../Memory/MemoryDocValuesFormat.cs | 14 +++--- .../Sep/SepPostingsWriter.cs | 12 ++--- src/Lucene.Net.Facet/Taxonomy/FacetLabel.cs | 5 +- .../Highlight/Highlighter.cs | 2 +- .../PostingsHighlight/PostingsHighlighter.cs | 2 +- .../VectorHighlight/BaseFragListBuilder.cs | 4 +- .../VectorHighlight/FastVectorHighlighter.cs | 6 +-- .../VectorHighlight/SimpleBoundaryScanner.cs | 2 +- .../Index/Sorter/SortingMergePolicy.cs | 16 +++---- src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs | 28 +++++------ src/Lucene.Net.QueryParser/Ext/Extensions.cs | 2 +- .../Flexible/Core/Nodes/QueryNodeImpl.cs | 8 ++-- .../MultiTermRewriteMethodProcessor.cs | 2 +- .../Processors/OpenRangeQueryNodeProcessor.cs | 2 +- .../Surround/Parser/QueryParser.cs | 20 ++++---- .../Surround/Query/FieldsQuery.cs | 2 +- src/Lucene.Net.Spatial/Query/SpatialArgs.cs | 6 +-- src/Lucene.Net.Suggest/Spell/SpellChecker.cs | 5 +- .../Suggest/Jaspell/JaspellLookup.cs | 17 +++---- .../Analysis/MockTokenizer.cs | 3 +- .../Codecs/Lucene40/Lucene40PostingsWriter.cs | 4 +- .../Codecs/RAMOnly/RAMOnlyPostingsFormat.cs | 2 +- .../Util/DefaultNamespaceTypeWrapper.cs | 10 ++-- .../Analysis/Th/TestThaiAnalyzer.cs | 19 ++++---- .../TestOpenNLPLemmatizerFilterFactory.cs | 48 +++++++++---------- src/Lucene.Net/Codecs/CodecUtil.cs | 8 ++-- .../Lucene3x/Lucene3xSegmentInfoFormat.cs | 14 +++--- .../Lucene40/Lucene40DocValuesFormat.cs | 6 +-- .../Lucene42/Lucene42DocValuesFormat.cs | 8 ++-- .../Lucene45/Lucene45DocValuesConsumer.cs | 4 +- .../Lucene46/Lucene46SegmentInfoFormat.cs | 6 +-- .../Codecs/MultiLevelSkipListReader.cs | 4 +- src/Lucene.Net/Index/BufferedUpdates.cs | 10 ++-- src/Lucene.Net/Index/DirectoryReader.cs | 14 +++--- src/Lucene.Net/Index/DocValuesUpdate.cs | 4 +- .../Index/DocumentsWriterPerThread.cs | 4 +- src/Lucene.Net/Index/IndexFileNames.cs | 16 +++---- src/Lucene.Net/Index/IndexWriter.cs | 12 ++--- src/Lucene.Net/Index/IndexWriterConfig.cs | 22 ++++----- .../Index/LogByteSizeMergePolicy.cs | 18 +++---- src/Lucene.Net/Index/LogDocMergePolicy.cs | 4 +- src/Lucene.Net/Index/LogMergePolicy.cs | 8 ++-- src/Lucene.Net/Index/MergePolicy.cs | 4 +- src/Lucene.Net/Index/SegmentInfo.cs | 4 +- src/Lucene.Net/Index/SegmentInfos.cs | 5 +- src/Lucene.Net/Index/SortedSetDocValues.cs | 2 +- src/Lucene.Net/Index/TieredMergePolicy.cs | 2 +- src/Lucene.Net/Store/CompoundFileDirectory.cs | 8 ++-- .../Support/Util/ExceptionExtensions.cs | 2 +- src/Lucene.Net/Util/ArrayUtil.cs | 1 + src/Lucene.Net/Util/BroadWord.cs | 4 +- src/Lucene.Net/Util/ByteBlockPool.cs | 6 +-- src/Lucene.Net/Util/Constants.cs | 4 +- src/Lucene.Net/Util/Fst/FST.cs | 2 +- src/Lucene.Net/Util/IntBlockPool.cs | 14 +++--- 83 files changed, 372 insertions(+), 357 deletions(-) diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs index 3de2c2f884..04240c9dca 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs +++ b/src/Lucene.Net.Analysis.Common/Analysis/Cjk/CJKBigramFilter.cs @@ -49,7 +49,7 @@ public enum CJKScript /// Forms bigrams of CJK terms that are generated from /// or ICUTokenizer. /// - /// CJK types are set by these tokenizers, but you can also use + /// CJK types are set by these tokenizers, but you can also use /// to explicitly control which /// of the CJK scripts are turned into bigrams. /// @@ -83,8 +83,8 @@ public sealed class CJKBigramFilter : TokenFilter private static readonly string KATAKANA_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.KATAKANA]; private static readonly string HANGUL_TYPE = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.HANGUL]; - // sentinel value for ignoring a script - private static readonly string NO = ""; + // sentinel value for ignoring a script + private const string NO = ""; // these are set to either their type or NO if we want to pass them thru private readonly string doHan; @@ -133,7 +133,7 @@ public CJKBigramFilter(TokenStream @in) /// /// /// Input - /// OR'ed set from , , + /// OR'ed set from , , /// , public CJKBigramFilter(TokenStream @in, CJKScript flags) : this(@in, flags, false) @@ -145,7 +145,7 @@ public CJKBigramFilter(TokenStream @in, CJKScript flags) /// and whether or not unigrams should also be output. /// /// Input - /// OR'ed set from , , + /// OR'ed set from , , /// , /// true if unigrams for the selected writing systems should also be output. /// when this is false, this is only done when there are no adjacent characters to form @@ -166,8 +166,8 @@ public CJKBigramFilter(TokenStream @in, CJKScript flags, bool outputUnigrams) } /* - * much of this complexity revolves around handling the special case of a - * "lone cjk character" where cjktokenizer would output a unigram. this + * much of this complexity revolves around handling the special case of a + * "lone cjk character" where cjktokenizer would output a unigram. this * is also the only time we ever have to captureState. */ public override bool IncrementToken() @@ -186,7 +186,7 @@ public override bool IncrementToken() // when also outputting unigrams, we output the unigram first, // then rewind back to revisit the bigram. // so an input of ABC is A + (rewind)AB + B + (rewind)BC + C - // the logic in hasBufferedUnigram ensures we output the C, + // the logic in hasBufferedUnigram ensures we output the C, // even though it did actually have adjacent CJK characters. if (ngramState) @@ -225,7 +225,7 @@ public override bool IncrementToken() { // we have a buffered unigram, and we peeked ahead to see if we could form - // a bigram, but we can't, because the offsets are unaligned. capture the state + // a bigram, but we can't, because the offsets are unaligned. capture the state // of this peeked data to be revisited next time thru the loop, and dump our unigram. loneState = CaptureState(); @@ -246,7 +246,7 @@ public override bool IncrementToken() { // we have a buffered unigram, and we peeked ahead to see if we could form - // a bigram, but we can't, because its not a CJK type. capture the state + // a bigram, but we can't, because its not a CJK type. capture the state // of this peeked data to be revisited next time thru the loop, and dump our unigram. loneState = CaptureState(); @@ -259,7 +259,7 @@ public override bool IncrementToken() else { - // case 3: we have only zero or 1 codepoints buffered, + // case 3: we have only zero or 1 codepoints buffered, // so not enough to form a bigram. But, we also have no // more input. So if we have a buffered codepoint, emit // a unigram, otherwise, its end of stream. @@ -277,7 +277,7 @@ public override bool IncrementToken() private State loneState; // rarely used: only for "lone cjk characters", where we emit unigrams /// - /// looks at next input token, returning false is none is available + /// looks at next input token, returning false is none is available /// private bool DoNext() { @@ -359,7 +359,7 @@ private void Refill() } /// - /// Flushes a bigram token to output from our buffer + /// Flushes a bigram token to output from our buffer /// This is the normal case, e.g. ABC -> AB BC /// private void FlushBigram() @@ -383,7 +383,7 @@ private void FlushBigram() /// /// Flushes a unigram token to output from our buffer. /// This happens when we encounter isolated CJK characters, either the whole - /// CJK string is a single character, or we encounter a CJK character surrounded + /// CJK string is a single character, or we encounter a CJK character surrounded /// by space, punctuation, english, etc, but not beside any other CJK. /// private void FlushUnigram() @@ -435,4 +435,4 @@ public override void Reset() ngramState = false; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilter.cs index 696b615ec7..845abe41b1 100644 --- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilter.cs +++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/CapitalizationFilter.cs @@ -34,8 +34,8 @@ namespace Lucene.Net.Analysis.Miscellaneous /// public sealed class CapitalizationFilter : TokenFilter { - public static readonly int DEFAULT_MAX_WORD_COUNT = int.MaxValue; - public static readonly int DEFAULT_MAX_TOKEN_LENGTH = int.MaxValue; + public const int DEFAULT_MAX_WORD_COUNT = int.MaxValue; + public const int DEFAULT_MAX_TOKEN_LENGTH = int.MaxValue; private readonly bool onlyFirstWord; private readonly CharArraySet keep; @@ -269,4 +269,4 @@ private void ProcessWord(char[] buffer, int offset, int length, int wordCount) //return word.toString(); } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Analysis.ICU/Analysis/Icu/Segmentation/DefaultICUTokenizerConfig.cs b/src/Lucene.Net.Analysis.ICU/Analysis/Icu/Segmentation/DefaultICUTokenizerConfig.cs index 9981c5fff8..8351b05620 100644 --- a/src/Lucene.Net.Analysis.ICU/Analysis/Icu/Segmentation/DefaultICUTokenizerConfig.cs +++ b/src/Lucene.Net.Analysis.ICU/Analysis/Icu/Segmentation/DefaultICUTokenizerConfig.cs @@ -30,8 +30,8 @@ namespace Lucene.Net.Analysis.Icu.Segmentation /// to many languages. /// /// - /// Generally tokenizes Unicode text according to UAX#29 - /// (), + /// Generally tokenizes Unicode text according to UAX#29 + /// (), /// but with the following tailorings: /// /// Thai, Lao, Myanmar, Khmer, and CJK text is broken into words with a dictionary. @@ -54,6 +54,7 @@ public class DefaultICUTokenizerConfig : ICUTokenizerConfig /// Token type for words that appear to be numbers public static readonly string WORD_NUMBER = StandardTokenizer.TOKEN_TYPES[StandardTokenizer.NUM]; /// Token type for words that appear to be emoji sequences + // ReSharper disable once ConvertToConstant.Global - matches the fields above to keep it static readonly public static readonly string WORD_EMOJI = ""; //StandardTokenizer.TOKEN_TYPES[StandardTokenizer.EMOJI]; // LUCENENET: 4.8.1 StandardTokenizer doesn't contain EMOJI /// diff --git a/src/Lucene.Net.Analysis.Kuromoji/Dict/BinaryDictionary.cs b/src/Lucene.Net.Analysis.Kuromoji/Dict/BinaryDictionary.cs index c4b6b77eb1..dedf34abfe 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/Dict/BinaryDictionary.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/Dict/BinaryDictionary.cs @@ -38,13 +38,14 @@ namespace Lucene.Net.Analysis.Ja.Dict /// public abstract class BinaryDictionary : IDictionary { - public static readonly string DICT_FILENAME_SUFFIX = "$buffer.dat"; - public static readonly string TARGETMAP_FILENAME_SUFFIX = "$targetMap.dat"; - public static readonly string POSDICT_FILENAME_SUFFIX = "$posDict.dat"; - - public static readonly string DICT_HEADER = "kuromoji_dict"; - public static readonly string TARGETMAP_HEADER = "kuromoji_dict_map"; - public static readonly string POSDICT_HEADER = "kuromoji_dict_pos"; + public const string DICT_FILENAME_SUFFIX = "$buffer.dat"; + public const string TARGETMAP_FILENAME_SUFFIX = "$targetMap.dat"; + public const string POSDICT_FILENAME_SUFFIX = "$posDict.dat"; + + public const string DICT_HEADER = "kuromoji_dict"; + public const string TARGETMAP_HEADER = "kuromoji_dict_map"; + public const string POSDICT_HEADER = "kuromoji_dict_pos"; + // ReSharper disable once ConvertToConstant.Global - VERSION should be a field public static readonly int VERSION = 1; private readonly ByteBuffer buffer; @@ -387,10 +388,12 @@ private string ReadString(int offset, int length, bool kana) } /// flag that the entry has baseform data. otherwise its not inflected (same as surface form) - public static readonly int HAS_BASEFORM = 1; + public const int HAS_BASEFORM = 1; + /// flag that the entry has reading data. otherwise reading is surface form converted to katakana - public static readonly int HAS_READING = 2; + public const int HAS_READING = 2; + /// flag that the entry has pronunciation data. otherwise pronunciation is the reading - public static readonly int HAS_PRONUNCIATION = 4; + public const int HAS_PRONUNCIATION = 4; } } diff --git a/src/Lucene.Net.Analysis.Kuromoji/Dict/CharacterDefinition.cs b/src/Lucene.Net.Analysis.Kuromoji/Dict/CharacterDefinition.cs index c9fccc184c..b72e0b7ff5 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/Dict/CharacterDefinition.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/Dict/CharacterDefinition.cs @@ -27,8 +27,9 @@ namespace Lucene.Net.Analysis.Ja.Dict /// public sealed class CharacterDefinition { - public static readonly string FILENAME_SUFFIX = ".dat"; - public static readonly string HEADER = "kuromoji_cd"; + public const string FILENAME_SUFFIX = ".dat"; + public const string HEADER = "kuromoji_cd"; + // ReSharper disable once ConvertToConstant.Global - VERSION should be a field public static readonly int VERSION = 1; public static readonly int CLASS_COUNT = Enum.GetValues(typeof(CharacterClass)).Length; @@ -45,18 +46,18 @@ private enum CharacterClass : byte private readonly bool[] groupMap = new bool[CLASS_COUNT]; // the classes: - public static readonly byte NGRAM = (byte)CharacterClass.NGRAM; - public static readonly byte DEFAULT = (byte)CharacterClass.DEFAULT; - public static readonly byte SPACE = (byte)CharacterClass.SPACE; - public static readonly byte SYMBOL = (byte)CharacterClass.SYMBOL; - public static readonly byte NUMERIC = (byte)CharacterClass.NUMERIC; - public static readonly byte ALPHA = (byte)CharacterClass.ALPHA; - public static readonly byte CYRILLIC = (byte)CharacterClass.CYRILLIC; - public static readonly byte GREEK = (byte)CharacterClass.GREEK; - public static readonly byte HIRAGANA = (byte)CharacterClass.HIRAGANA; - public static readonly byte KATAKANA = (byte)CharacterClass.KATAKANA; - public static readonly byte KANJI = (byte)CharacterClass.KANJI; - public static readonly byte KANJINUMERIC = (byte)CharacterClass.KANJINUMERIC; + public const byte NGRAM = (byte)CharacterClass.NGRAM; + public const byte DEFAULT = (byte)CharacterClass.DEFAULT; + public const byte SPACE = (byte)CharacterClass.SPACE; + public const byte SYMBOL = (byte)CharacterClass.SYMBOL; + public const byte NUMERIC = (byte)CharacterClass.NUMERIC; + public const byte ALPHA = (byte)CharacterClass.ALPHA; + public const byte CYRILLIC = (byte)CharacterClass.CYRILLIC; + public const byte GREEK = (byte)CharacterClass.GREEK; + public const byte HIRAGANA = (byte)CharacterClass.HIRAGANA; + public const byte KATAKANA = (byte)CharacterClass.KATAKANA; + public const byte KANJI = (byte)CharacterClass.KANJI; + public const byte KANJINUMERIC = (byte)CharacterClass.KANJINUMERIC; private CharacterDefinition() { diff --git a/src/Lucene.Net.Analysis.Kuromoji/Dict/ConnectionCosts.cs b/src/Lucene.Net.Analysis.Kuromoji/Dict/ConnectionCosts.cs index 3095f39830..0ce6e90aea 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/Dict/ConnectionCosts.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/Dict/ConnectionCosts.cs @@ -28,8 +28,9 @@ namespace Lucene.Net.Analysis.Ja.Dict /// public sealed class ConnectionCosts { - public static readonly string FILENAME_SUFFIX = ".dat"; - public static readonly string HEADER = "kuromoji_cc"; + public const string FILENAME_SUFFIX = ".dat"; + public const string HEADER = "kuromoji_cc"; + // ReSharper disable once ConvertToConstant.Global - VERSION should be a field public static readonly int VERSION = 1; private readonly short[][] costs; // array is backward IDs first since get is called using the same backward ID consecutively. maybe doesn't matter. diff --git a/src/Lucene.Net.Analysis.Kuromoji/Dict/Dictionary.cs b/src/Lucene.Net.Analysis.Kuromoji/Dict/Dictionary.cs index 1b8b808560..2c579f2efe 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/Dict/Dictionary.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/Dict/Dictionary.cs @@ -98,9 +98,9 @@ public interface IDictionary // 'getAdditionalData' if other dictionaries like unidic have additional data } - // LUCENENT TODO: Make this whole thing into an abstact class?? + // LUCENENET TODO: Make this whole thing into an abstract class?? public static class Dictionary // LUCENENET specific: CA1052 Static holder types should be Static or NotInheritable { - public static readonly string INTERNAL_SEPARATOR = "\u0000"; + public const string INTERNAL_SEPARATOR = "\0"; } } diff --git a/src/Lucene.Net.Analysis.Kuromoji/Dict/TokenInfoDictionary.cs b/src/Lucene.Net.Analysis.Kuromoji/Dict/TokenInfoDictionary.cs index c5229dc1f4..2e6cfcbd6f 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/Dict/TokenInfoDictionary.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/Dict/TokenInfoDictionary.cs @@ -29,7 +29,7 @@ namespace Lucene.Net.Analysis.Ja.Dict /// public sealed class TokenInfoDictionary : BinaryDictionary { - public static readonly string FST_FILENAME_SUFFIX = "$fst.dat"; + public const string FST_FILENAME_SUFFIX = "$fst.dat"; private readonly TokenInfoFST fst; diff --git a/src/Lucene.Net.Analysis.Kuromoji/JapaneseIterationMarkCharFilter.cs b/src/Lucene.Net.Analysis.Kuromoji/JapaneseIterationMarkCharFilter.cs index 58a60b8fee..060996231a 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/JapaneseIterationMarkCharFilter.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/JapaneseIterationMarkCharFilter.cs @@ -42,10 +42,10 @@ namespace Lucene.Net.Analysis.Ja public class JapaneseIterationMarkCharFilter : CharFilter { /// Normalize kanji iteration marks by default - public static readonly bool NORMALIZE_KANJI_DEFAULT = true; + public const bool NORMALIZE_KANJI_DEFAULT = true; /// Normalize kana iteration marks by default - public static readonly bool NORMALIZE_KANA_DEFAULT = true; + public const bool NORMALIZE_KANA_DEFAULT = true; private const char KANJI_ITERATION_MARK = '\u3005'; // 々 @@ -167,17 +167,17 @@ public JapaneseIterationMarkCharFilter(TextReader input, bool normalizeKanji, bo /// Reads a specified maximum number of characters from the current reader and writes the data to a buffer, beginning at the specified index. /// /// - /// When this method returns, contains the specified character array with the values between index and (index + count - 1) + /// When this method returns, contains the specified character array with the values between index and (index + count - 1) /// replaced by the characters read from the current source. /// /// The position in buffer at which to begin writing. /// /// - /// The maximum number of characters to read. If the end of the reader is reached before the specified number of characters is + /// The maximum number of characters to read. If the end of the reader is reached before the specified number of characters is /// read into the buffer, the method returns. /// /// - /// The number of characters that have been read. The number will be less than or equal to count, depending on whether the data is + /// The number of characters that have been read. The number will be less than or equal to count, depending on whether the data is /// available within the reader. This method returns 0 (zero) if it is called when no more characters are left to read. /// public override int Read(char[] buffer, int offset, int length) diff --git a/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs b/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs index edf76facd7..5d7f2b234d 100644 --- a/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs +++ b/src/Lucene.Net.Analysis.Kuromoji/JapaneseTokenizer.cs @@ -64,7 +64,7 @@ public sealed class JapaneseTokenizer : Tokenizer /// /// Default tokenization mode. Currently this is . /// - public static readonly JapaneseTokenizerMode DEFAULT_MODE = JapaneseTokenizerMode.SEARCH; + public const JapaneseTokenizerMode DEFAULT_MODE = JapaneseTokenizerMode.SEARCH; // LUCENENET specific: de-nested Type and renamed JapaneseTokenizerType diff --git a/src/Lucene.Net.Analysis.Phonetic/DoubleMetaphoneFilterFactory.cs b/src/Lucene.Net.Analysis.Phonetic/DoubleMetaphoneFilterFactory.cs index 8f4d014554..0ec710ee01 100644 --- a/src/Lucene.Net.Analysis.Phonetic/DoubleMetaphoneFilterFactory.cs +++ b/src/Lucene.Net.Analysis.Phonetic/DoubleMetaphoneFilterFactory.cs @@ -36,11 +36,11 @@ namespace Lucene.Net.Analysis.Phonetic public class DoubleMetaphoneFilterFactory : TokenFilterFactory { /// parameter name: true if encoded tokens should be added as synonyms - public static readonly string INJECT = "inject"; + public const string INJECT = "inject"; /// parameter name: restricts the length of the phonetic code - public static readonly string MAX_CODE_LENGTH = "maxCodeLength"; + public const string MAX_CODE_LENGTH = "maxCodeLength"; /// default maxCodeLength if not specified - public static readonly int DEFAULT_MAX_CODE_LENGTH = 4; + public const int DEFAULT_MAX_CODE_LENGTH = 4; private readonly bool inject; private readonly int maxCodeLength; diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Languages.cs b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Languages.cs index 0631200ae3..8ed01933f6 100644 --- a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Languages.cs +++ b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Languages.cs @@ -66,7 +66,7 @@ public class Languages // exposes org/apache/commons/codec/language/bm/%s_languages.txt for %s in NameType.* as a list of supported // languages, and a second part that provides instance methods for accessing this set fo supported languages. - public static readonly string ANY = "any"; + public const string ANY = "any"; private static readonly IDictionary LANGUAGES = LoadLanguages(); diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/RefinedSoundex.cs b/src/Lucene.Net.Analysis.Phonetic/Language/RefinedSoundex.cs index 4bf0a0aadf..09874f15b6 100644 --- a/src/Lucene.Net.Analysis.Phonetic/Language/RefinedSoundex.cs +++ b/src/Lucene.Net.Analysis.Phonetic/Language/RefinedSoundex.cs @@ -34,7 +34,7 @@ public class RefinedSoundex : IStringEncoder /// /// since 1.4 /// - public static readonly string US_ENGLISH_MAPPING_STRING = "01360240043788015936020505"; + public const string US_ENGLISH_MAPPING_STRING = "01360240043788015936020505"; /// /// RefinedSoundex is *refined* for a number of reasons one being that the diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/Soundex.cs b/src/Lucene.Net.Analysis.Phonetic/Language/Soundex.cs index 92a68b5f26..289300e9ef 100644 --- a/src/Lucene.Net.Analysis.Phonetic/Language/Soundex.cs +++ b/src/Lucene.Net.Analysis.Phonetic/Language/Soundex.cs @@ -42,7 +42,7 @@ public class Soundex : IStringEncoder /// /// since 1.11 /// - public static readonly char SILENT_MARKER = '-'; + public const char SILENT_MARKER = '-'; /// /// This is a default mapping of the 26 letters used in US English. A value of 0 for a letter position @@ -57,13 +57,13 @@ public class Soundex : IStringEncoder /// /// // ABCDEFGHIJKLMNOPQRSTUVWXYZ - public static readonly string US_ENGLISH_MAPPING_STRING = "01230120022455012623010202"; + public const string US_ENGLISH_MAPPING_STRING = "01230120022455012623010202"; /// /// This is a default mapping of the 26 letters used in US English. A value of 0 for a letter position /// means do not encode. /// - /// + /// private static readonly char[] US_ENGLISH_MAPPING = US_ENGLISH_MAPPING_STRING.ToCharArray(); /// @@ -97,7 +97,7 @@ public class Soundex : IStringEncoder /// Such letters are ignored (after the first) and do not /// act as separators when dropping duplicate codes. /// - /// The codes for consonants are otherwise the same as for + /// The codes for consonants are otherwise the same as for /// and . /// /// since 1.11 @@ -129,7 +129,7 @@ public class Soundex : IStringEncoder /// /// Creates an instance using . /// - /// + /// /// public Soundex() { diff --git a/src/Lucene.Net.Analysis.SmartCn/Utility.cs b/src/Lucene.Net.Analysis.SmartCn/Utility.cs index 6ab9c843de..90320c7c14 100644 --- a/src/Lucene.Net.Analysis.SmartCn/Utility.cs +++ b/src/Lucene.Net.Analysis.SmartCn/Utility.cs @@ -41,12 +41,12 @@ public static class Utility // LUCENENET specific: CA1052 Static holder types sh /// /// Space-like characters that need to be skipped: such as space, tab, newline, carriage return. /// - public static readonly string SPACES = " \u3000\t\r\n"; // LUCENENET specific - made the U+3000 character explicitly visible: https://sonarcloud.io/project/issues?resolved=false&rules=csharpsquid%3AS2479&id=nikcio_lucenenet + public const string SPACES = " \u3000\t\r\n"; // LUCENENET specific - made the U+3000 character explicitly visible: https://sonarcloud.io/project/issues?resolved=false&rules=csharpsquid%3AS2479&id=nikcio_lucenenet /// /// Maximum bigram frequency (used in the smoothing function). /// - public static readonly int MAX_FREQUENCE = 2079997 + 80000; + public const int MAX_FREQUENCE = 2079997 + 80000; /// /// Compare two arrays starting at the specified offsets. @@ -55,7 +55,7 @@ public static class Utility // LUCENENET specific: CA1052 Static holder types sh /// start offset into /// right array /// start offset into - /// 0 if the arrays are equal,1 if > + /// 0 if the arrays are equal,1 if > /// , -1 if < public static int CompareArray(char[] larray, int lstartIndex, char[] rarray, int rstartIndex) @@ -127,7 +127,7 @@ public static int CompareArray(char[] larray, int lstartIndex, char[] rarray, /// offset into /// long array (word) /// offset into - /// 0 if is a prefix of , + /// 0 if is a prefix of , /// otherwise act as . public static int CompareArrayByPrefix(char[] shortArray, int shortIndex, char[] longArray, int longIndex) @@ -163,7 +163,7 @@ public static int CompareArrayByPrefix(char[] shortArray, int shortIndex, } /// - /// Return the internal constant of a given character. + /// Return the internal constant of a given character. /// /// input character /// Constant from describing the character type. diff --git a/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs b/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs index ac3dfaf778..bde9d3c3f9 100644 --- a/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs +++ b/src/Lucene.Net.Analysis.Stempel/Pl/PolishAnalyzer.cs @@ -40,12 +40,12 @@ public sealed class PolishAnalyzer : StopwordAnalyzerBase /// /// File containing default Polish stopwords. /// - public readonly static string DEFAULT_STOPWORD_FILE = "stopwords.txt"; + public const string DEFAULT_STOPWORD_FILE = "stopwords.txt"; /// /// File containing default Polish stemmer table. /// - public readonly static string DEFAULT_STEMMER_FILE = "stemmer_20000.tbl"; + public const string DEFAULT_STEMMER_FILE = "stemmer_20000.tbl"; /// /// Returns an unmodifiable instance of the default stop words set. @@ -59,7 +59,7 @@ public sealed class PolishAnalyzer : StopwordAnalyzerBase public static Trie DefaultTable => DefaultsHolder.DEFAULT_TABLE; /// - /// Atomically loads the in a lazy fashion once the outer class + /// Atomically loads the in a lazy fashion once the outer class /// accesses the static final set the first time.; /// private static class DefaultsHolder @@ -141,7 +141,7 @@ public PolishAnalyzer(LuceneVersion matchVersion, CharArraySet stopwords, CharAr /// /// /// A built from an - /// filtered with , , , + /// filtered with , , , /// if a stem excusion set is provided and . /// protected internal override TokenStreamComponents CreateComponents(string fieldName, diff --git a/src/Lucene.Net.Analysis.Stempel/Stempel/StempelFilter.cs b/src/Lucene.Net.Analysis.Stempel/Stempel/StempelFilter.cs index 8e3ec4dbc0..dfb0ea2421 100644 --- a/src/Lucene.Net.Analysis.Stempel/Stempel/StempelFilter.cs +++ b/src/Lucene.Net.Analysis.Stempel/Stempel/StempelFilter.cs @@ -40,7 +40,7 @@ public sealed class StempelFilter : TokenFilter /// Minimum length of input words to be processed. Shorter words are returned /// unchanged. /// - public static readonly int DEFAULT_MIN_LENGTH = 3; + public const int DEFAULT_MIN_LENGTH = 3; /// /// Create filter using the supplied stemming table. diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs index 2a10caefad..603eda169d 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/DocMaker.cs @@ -41,7 +41,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Feeds /// doc.body.tokenizedspecifies whether the body field should be tokenized (default = doc.tokenized). /// doc.tokenized.normsspecifies whether norms should be stored in the index or not. (default false). /// doc.body.tokenized.norms - /// specifies whether norms should be stored in the index for the body field. + /// specifies whether norms should be stored in the index for the body field. /// This can be set to true, while doc.tokenized.norms is set to false, to allow norms storing just /// for the body field. (default true). /// @@ -180,14 +180,14 @@ internal Field GetNumericField(string name, NumericType type) private readonly DisposableThreadLocal leftovr = new DisposableThreadLocal(); private DisposableThreadLocal docState = new DisposableThreadLocal(); - public static readonly string BODY_FIELD = "body"; - public static readonly string TITLE_FIELD = "doctitle"; - public static readonly string DATE_FIELD = "docdate"; - public static readonly string DATE_MSEC_FIELD = "docdatenum"; - public static readonly string TIME_SEC_FIELD = "doctimesecnum"; - public static readonly string ID_FIELD = "docid"; - public static readonly string BYTES_FIELD = "bytes"; - public static readonly string NAME_FIELD = "docname"; + public const string BODY_FIELD = "body"; + public const string TITLE_FIELD = "doctitle"; + public const string DATE_FIELD = "docdate"; + public const string DATE_MSEC_FIELD = "docdatenum"; + public const string TIME_SEC_FIELD = "doctimesecnum"; + public const string ID_FIELD = "docid"; + public const string BYTES_FIELD = "bytes"; + public const string NAME_FIELD = "docname"; protected Config m_config; @@ -275,7 +275,7 @@ private Document CreateDocument(DocData docData, int size, int cnt) if (date is null) { // just set to right now - date = DateTime.Now; + date = DateTime.Now; } Field dateField = ds.GetNumericField(DATE_MSEC_FIELD, NumericType.INT64); diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/SpatialDocMaker.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/SpatialDocMaker.cs index c4eea771b0..307c3105d6 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Feeds/SpatialDocMaker.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/SpatialDocMaker.cs @@ -41,7 +41,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Feeds /// public class SpatialDocMaker : DocMaker { - public static readonly string SPATIAL_FIELD = "spatial"; + public const string SPATIAL_FIELD = "spatial"; //cache spatialStrategy by round number private static readonly IDictionary spatialStrategyCache = new Dictionary(); // LUCENENET: marked readonly @@ -126,12 +126,12 @@ public bool ContainsKey(string key) void ICollection>.Add(KeyValuePair item) => throw UnsupportedOperationException.Create(); void ICollection>.Clear() => throw UnsupportedOperationException.Create(); bool ICollection>.Contains(KeyValuePair item) => throw UnsupportedOperationException.Create(); - + void ICollection>.CopyTo(KeyValuePair[] array, int arrayIndex) => throw UnsupportedOperationException.Create(); IEnumerator> IEnumerable>.GetEnumerator() => throw UnsupportedOperationException.Create(); bool IDictionary.Remove(string key) => throw UnsupportedOperationException.Create(); bool ICollection>.Remove(KeyValuePair item) => throw UnsupportedOperationException.Create(); - + IEnumerator IEnumerable.GetEnumerator() => throw UnsupportedOperationException.Create(); #endregion IDictionary members diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecContentSource.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecContentSource.cs index 6cb69b5130..4a6b8db1a8 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecContentSource.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecContentSource.cs @@ -38,7 +38,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Feeds /// /// work.dirspecifies the working directory. Required if "docs.dir" /// denotes a relative path (default=work). - /// docs.dirspecifies the directory where the TREC files reside. + /// docs.dirspecifies the directory where the TREC files reside. /// Can be set to a relative path if "work.dir" is also specified /// (default=trec). /// @@ -56,10 +56,10 @@ public class TrecContentSource : ContentSource { // LUCENENET specific - DateFormatInfo not used - public static readonly string DOCNO = ""; - public static readonly string TERMINATING_DOCNO = ""; - public static readonly string DOC = ""; - public static readonly string TERMINATING_DOC = ""; + public const string DOCNO = ""; + public const string TERMINATING_DOCNO = ""; + public const string DOC = ""; + public const string TERMINATING_DOC = ""; /// separator between lines in the buffer public static readonly string NEW_LINE = Environment.NewLine; @@ -161,7 +161,7 @@ private void Read(StringBuilder buf, string lineStart, internal virtual void OpenNextFile() { DoClose(); - //currPathType = null; + //currPathType = null; while (true) { if (nextFile >= inputFiles.Count) @@ -181,7 +181,7 @@ internal virtual void OpenNextFile() } try { - Stream inputStream = StreamUtils.GetInputStream(f); // support either gzip, bzip2, or regular text file, by extension + Stream inputStream = StreamUtils.GetInputStream(f); // support either gzip, bzip2, or regular text file, by extension reader = new StreamReader(inputStream, m_encoding); currPathType = TrecDocParser.PathType(f); return; @@ -275,7 +275,7 @@ public override DocData GetNextDocData(DocData docData) docBuf.Length = 0; Read(docBuf, DOC, false, false); - // save parsedFile for passing trecDataParser after the sync block, in + // save parsedFile for passing trecDataParser after the sync block, in // case another thread will open another file in between. parsedPathType = currPathType; @@ -302,7 +302,7 @@ public override DocData GetNextDocData(DocData docData) // count char length of text to be parsed (may be larger than the resulted plain doc body text). AddBytes(docBuf.Length); - // This code segment relies on HtmlParser being thread safe. When we get + // This code segment relies on HtmlParser being thread safe. When we get // here, everything else is already private to that thread, so we're safe. docData = trecDocParser.Parse(docData, name, this, docBuf, parsedPathType); AddItem(); @@ -367,7 +367,7 @@ public override void SetConfig(Config config) { m_encoding = Encoding.GetEncoding("iso-8859-1"); //StandardCharsets.ISO_8859_1.name(); } - // iteration exclusion in doc name + // iteration exclusion in doc name excludeDocnameIteration = config.Get("content.source.excludeIteration", false); } } diff --git a/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecDocParser.cs b/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecDocParser.cs index 1b1015f28e..bd84fbec72 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecDocParser.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Feeds/TrecDocParser.cs @@ -34,7 +34,7 @@ public abstract class TrecDocParser public enum ParsePathType { GOV2, FBIS, FT, FR94, LATIMES, UNKNOWN } /// trec parser type used for unknown extensions - public static readonly ParsePathType DEFAULT_PATH_TYPE = ParsePathType.GOV2; + public const ParsePathType DEFAULT_PATH_TYPE = ParsePathType.GOV2; internal static readonly IDictionary pathType2parser = new Dictionary() // LUCENENET: Avoid static constructors (see https://github.com/apache/lucenenet/pull/224#issuecomment-469284006) { diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/AddIndexesTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/AddIndexesTask.cs index 45a52958ee..18f6f371b0 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Tasks/AddIndexesTask.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/AddIndexesTask.cs @@ -34,7 +34,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Tasks /// public class AddIndexesTask : PerfTask { - public static readonly string ADDINDEXES_INPUT_DIR = "addindexes.input.dir"; + public const string ADDINDEXES_INPUT_DIR = "addindexes.input.dir"; public AddIndexesTask(PerfRunData runData) : base(runData) diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenIndexTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenIndexTask.cs index 22158f3d2a..ebe7bdc804 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenIndexTask.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenIndexTask.cs @@ -41,9 +41,9 @@ namespace Lucene.Net.Benchmarks.ByTask.Tasks /// public class OpenIndexTask : PerfTask { - public static readonly int DEFAULT_MAX_BUFFERED = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; - public static readonly int DEFAULT_MERGE_PFACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; - public static readonly double DEFAULT_RAM_FLUSH_MB = (int)IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; + public const int DEFAULT_MAX_BUFFERED = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; + public const int DEFAULT_MERGE_PFACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; + public const double DEFAULT_RAM_FLUSH_MB = (int)IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB; private string commitUserData; public OpenIndexTask(PerfRunData runData) diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenReaderTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenReaderTask.cs index a7967e7684..29366c4ae5 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenReaderTask.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/OpenReaderTask.cs @@ -32,7 +32,7 @@ namespace Lucene.Net.Benchmarks.ByTask.Tasks /// public class OpenReaderTask : PerfTask { - public static readonly string USER_DATA = "userData"; + public const string USER_DATA = "userData"; private string commitUserData = null; public OpenReaderTask(PerfRunData runData) diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/ReportTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/ReportTask.cs index 30b8004155..3fb7b51169 100644 --- a/src/Lucene.Net.Benchmark/ByTask/Tasks/ReportTask.cs +++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/ReportTask.cs @@ -49,14 +49,14 @@ protected ReportTask(PerfRunData runData) // LUCENENET: CA1012: Abstract types s /// /// Get a textual summary of the benchmark results, average from all test runs. /// - protected static readonly string OP = "Operation "; - protected static readonly string ROUND = " round"; - protected static readonly string RUNCNT = " runCnt"; - protected static readonly string RECCNT = " recsPerRun"; - protected static readonly string RECSEC = " rec/s"; - protected static readonly string ELAPSED = " elapsedSec"; - protected static readonly string USEDMEM = " avgUsedMem"; - protected static readonly string TOTMEM = " avgTotalMem"; + protected const string OP = "Operation "; + protected const string ROUND = " round"; + protected const string RUNCNT = " runCnt"; + protected const string RECCNT = " recsPerRun"; + protected const string RECSEC = " rec/s"; + protected const string ELAPSED = " elapsedSec"; + protected const string USEDMEM = " avgUsedMem"; + protected const string TOTMEM = " avgTotalMem"; protected static readonly string[] COLS = { RUNCNT, RECCNT, diff --git a/src/Lucene.Net.Benchmark/Quality/QualityStats.cs b/src/Lucene.Net.Benchmark/Quality/QualityStats.cs index 155b8601bf..2826d5389d 100644 --- a/src/Lucene.Net.Benchmark/Quality/QualityStats.cs +++ b/src/Lucene.Net.Benchmark/Quality/QualityStats.cs @@ -31,7 +31,7 @@ namespace Lucene.Net.Benchmarks.Quality public class QualityStats { /// Number of points for which precision is computed. - public static readonly int MAX_POINTS = 20; + public const int MAX_POINTS = 20; private double maxGoodPoints; private double recall; @@ -66,7 +66,7 @@ internal RecallPoint(int rank, double recall) private readonly IList recallPoints; // LUCENENET: marked readonly /// - /// Construct a QualityStats object with anticipated maximal number of relevant hits. + /// Construct a QualityStats object with anticipated maximal number of relevant hits. /// /// maximal possible relevant hits. /// @@ -75,7 +75,7 @@ public QualityStats(double maxGoodPoints, long searchTime) this.maxGoodPoints = maxGoodPoints; this.searchTime = searchTime; this.recallPoints = new JCG.List(); - pAt = new double[MAX_POINTS + 1]; // pAt[0] unused. + pAt = new double[MAX_POINTS + 1]; // pAt[0] unused. } /// @@ -95,7 +95,7 @@ public virtual void AddResult(int n, bool isRelevant, long docNameExtractTime) numGoodPoints += 1; recallPoints.Add(new RecallPoint(n, numGoodPoints)); if (recallPoints.Count == 1 && n <= 5) - { // first point, but only within 5 top scores. + { // first point, but only within 5 top scores. mrr = 1.0 / n; } } @@ -209,7 +209,7 @@ private static string FracFormat(string frac) // LUCENENET: CA1822: Mark members } /// - /// Create a object that is the average of the input objects. + /// Create a object that is the average of the input objects. /// /// array of input stats to be averaged. /// an average over the input stats. @@ -255,7 +255,7 @@ public static QualityStats Average(QualityStats[] stats) { avg.pAt[j] /= m; } - avg.pReleventSum /= m; // this is actually avgp now + avg.pReleventSum /= m; // this is actually avgp now avg.pReleventSum *= avg.maxGoodPoints; // so that getAvgP() would be correct return avg; @@ -294,11 +294,11 @@ public virtual RecallPoint[] GetRecallPoints() /// Returns the Mean reciprocal rank over the queries or RR for a single query. /// /// - /// Reciprocal rank is defined as 1/r where r is the - /// rank of the first correct result, or 0 if there are no correct + /// Reciprocal rank is defined as 1/r where r is the + /// rank of the first correct result, or 0 if there are no correct /// results within the top 5 results. /// - /// This follows the definition in + /// This follows the definition in /// /// Question Answering - CNLP at the TREC-10 Question Answering Track. /// diff --git a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs index 30c9fb66ec..62a4e852a9 100644 --- a/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs +++ b/src/Lucene.Net.Codecs/BlockTerms/BlockTermsWriter.cs @@ -31,7 +31,7 @@ namespace Lucene.Net.Codecs.BlockTerms // But we could decouple the two, ie allow several blocks in between two indexed terms /// - /// Writes terms dict, block-encoding (column stride) each term's metadata + /// Writes terms dict, block-encoding (column stride) each term's metadata /// for each set of terms between two index terms. /// /// @lucene.experimental @@ -48,7 +48,7 @@ public class BlockTermsWriter : FieldsConsumer public readonly static int VERSION_CURRENT = VERSION_CHECKSUM; /// Extension of terms file - public readonly static string TERMS_EXTENSION = "tib"; + public const string TERMS_EXTENSION = "tib"; #pragma warning disable CA2213 // Disposable fields should be disposed protected IndexOutput m_output; @@ -377,7 +377,7 @@ private void FlushBlock() bytesWriter.WriteTo(outerInstance.m_output); bytesWriter.Reset(); - // 4th pass: write the metadata + // 4th pass: write the metadata long[] longs = new long[longsSize]; bool absolute = true; for (int termCount = 0; termCount < pendingCount; termCount++) @@ -401,4 +401,4 @@ private void FlushBlock() } } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs index ddd5163ea7..349f46d2ed 100644 --- a/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs +++ b/src/Lucene.Net.Codecs/DiskDV/DiskDocValuesFormat.cs @@ -31,7 +31,7 @@ namespace Lucene.Net.Codecs.DiskDV [DocValuesFormatName("Disk")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name public sealed class DiskDocValuesFormat : DocValuesFormat { - public DiskDocValuesFormat() + public DiskDocValuesFormat() : base() { } @@ -59,9 +59,9 @@ public override DocValuesProducer FieldsProducer(SegmentReadState state) return new DiskDocValuesProducer(state, DATA_CODEC, DATA_EXTENSION, META_CODEC, META_EXTENSION); } - public static readonly string DATA_CODEC = "DiskDocValuesData"; - public static readonly string DATA_EXTENSION = "dvdd"; - public static readonly string META_CODEC = "DiskDocValuesMetadata"; - public static readonly string META_EXTENSION = "dvdm"; + public const string DATA_CODEC = "DiskDocValuesData"; + public const string DATA_EXTENSION = "dvdd"; + public const string META_CODEC = "DiskDocValuesMetadata"; + public const string META_EXTENSION = "dvdm"; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs index 4df853ce9e..b79e7d69ea 100644 --- a/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs +++ b/src/Lucene.Net.Codecs/Memory/MemoryDocValuesFormat.cs @@ -26,28 +26,28 @@ namespace Lucene.Net.Codecs.Memory public class MemoryDocValuesFormat : DocValuesFormat { /// Maximum length for each binary doc values field. - public static readonly int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; + public const int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; internal readonly float acceptableOverheadRatio; /// - /// Calls MemoryDocValuesFormat(PackedInts.DEFAULT) + /// Calls MemoryDocValuesFormat(PackedInts.DEFAULT) /// () /// - public MemoryDocValuesFormat() + public MemoryDocValuesFormat() : this(PackedInt32s.DEFAULT) { } /// /// Creates a new with the specified - /// for . + /// for . /// /// @lucene.experimental /// - /// Compression parameter for numerics. + /// Compression parameter for numerics. /// Currently this is only used when the number of unique values is small. - public MemoryDocValuesFormat(float acceptableOverheadRatio) + public MemoryDocValuesFormat(float acceptableOverheadRatio) : base() { this.acceptableOverheadRatio = acceptableOverheadRatio; @@ -69,4 +69,4 @@ public override DocValuesProducer FieldsProducer(SegmentReadState state) internal const string METADATA_CODEC = "MemoryDocValuesMetadata"; internal const string METADATA_EXTENSION = "mdvm"; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs b/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs index e88326c933..1eef629435 100644 --- a/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs +++ b/src/Lucene.Net.Codecs/Sep/SepPostingsWriter.cs @@ -28,7 +28,7 @@ namespace Lucene.Net.Codecs.Sep /// Writes frq to .frq, docs to .doc, pos to .pos, payloads /// to .pyl, skip data to .skp /// - /// @lucene.experimental + /// @lucene.experimental /// public sealed class SepPostingsWriter : PostingsWriterBase { @@ -66,7 +66,7 @@ public sealed class SepPostingsWriter : PostingsWriterBase /// used to accelerate . Larger values result in /// smaller indexes, greater acceleration, but fewer accelerable cases, while /// smaller values result in bigger indexes, less acceleration and more - /// accelerable cases. More detailed experiments would be useful here. + /// accelerable cases. More detailed experiments would be useful here. /// private readonly int skipInterval; private const int DEFAULT_SKIP_INTERVAL = 16; @@ -77,10 +77,10 @@ public sealed class SepPostingsWriter : PostingsWriterBase private readonly int skipMinimum; /// - /// Expert: The maximum number of skip levels. Smaller values result in + /// Expert: The maximum number of skip levels. Smaller values result in /// slightly smaller indexes, but slower skipping in big posting lists. /// - private readonly int maxSkipLevels = 10; + private const int maxSkipLevels = 10; private readonly int totalNumDocs; @@ -231,7 +231,7 @@ private SepTermState SetEmptyState() /// /// Adds a new doc in this term. If this returns null - /// then we just skip consuming positions/payloads. + /// then we just skip consuming positions/payloads. /// public override void StartDoc(int docID, int termDocFreq) { @@ -420,4 +420,4 @@ protected override void Dispose(bool disposing) } } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Facet/Taxonomy/FacetLabel.cs b/src/Lucene.Net.Facet/Taxonomy/FacetLabel.cs index 2f6fce09e6..df88dfaef6 100644 --- a/src/Lucene.Net.Facet/Taxonomy/FacetLabel.cs +++ b/src/Lucene.Net.Facet/Taxonomy/FacetLabel.cs @@ -36,7 +36,8 @@ namespace Lucene.Net.Facet.Taxonomy /// public class FacetLabel : IComparable { - private static readonly int BYTE_BLOCK_SIZE = Lucene.Net.Util.ByteBlockPool.BYTE_BLOCK_SIZE; + private const int BYTE_BLOCK_SIZE = Lucene.Net.Util.ByteBlockPool.BYTE_BLOCK_SIZE; + /* * copied from DocumentWriterPerThread -- if a FacetLabel is resolved to a * drill-down term which is encoded to a larger term than that length, it is @@ -46,7 +47,7 @@ public class FacetLabel : IComparable /// /// The maximum number of characters a can have. /// - public static readonly int MAX_CATEGORY_PATH_LENGTH = (BYTE_BLOCK_SIZE - 2) / 4; + public const int MAX_CATEGORY_PATH_LENGTH = (BYTE_BLOCK_SIZE - 2) / 4; /// /// The components of this . Note that this array may be diff --git a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs index fae80ddc63..4e50bf4a75 100644 --- a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs +++ b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs @@ -33,7 +33,7 @@ namespace Lucene.Net.Search.Highlight /// public class Highlighter { - public static readonly int DEFAULT_MAX_CHARS_TO_ANALYZE = 50 * 1024; + public const int DEFAULT_MAX_CHARS_TO_ANALYZE = 50 * 1024; private int _maxDocCharsToAnalyze = DEFAULT_MAX_CHARS_TO_ANALYZE; private readonly IFormatter _formatter; // LUCENENET: marked readonly diff --git a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs index da517f697c..03d0f25b14 100644 --- a/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs +++ b/src/Lucene.Net.Highlighter/PostingsHighlight/PostingsHighlighter.cs @@ -98,7 +98,7 @@ namespace Lucene.Net.Search.PostingsHighlight /// Default maximum content size to process. Typically snippets /// closer to the beginning of the document better summarize its content /// - public static readonly int DEFAULT_MAX_LENGTH = 10000; + public const int DEFAULT_MAX_LENGTH = 10000; private readonly int maxLength; diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragListBuilder.cs b/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragListBuilder.cs index b9651c3d3d..27ae62a294 100644 --- a/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragListBuilder.cs +++ b/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragListBuilder.cs @@ -28,8 +28,8 @@ namespace Lucene.Net.Search.VectorHighlight /// public abstract class BaseFragListBuilder : IFragListBuilder { - public static readonly int MARGIN_DEFAULT = 6; - public static readonly int MIN_FRAG_CHAR_SIZE_FACTOR = 3; + public const int MARGIN_DEFAULT = 6; + public const int MIN_FRAG_CHAR_SIZE_FACTOR = 3; internal readonly int margin; internal readonly int minFragCharSize; diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/FastVectorHighlighter.cs b/src/Lucene.Net.Highlighter/VectorHighlight/FastVectorHighlighter.cs index e5da34c9e0..864d87c30a 100644 --- a/src/Lucene.Net.Highlighter/VectorHighlight/FastVectorHighlighter.cs +++ b/src/Lucene.Net.Highlighter/VectorHighlight/FastVectorHighlighter.cs @@ -28,8 +28,8 @@ namespace Lucene.Net.Search.VectorHighlight /// public class FastVectorHighlighter { - public static readonly bool DEFAULT_PHRASE_HIGHLIGHT = true; - public static readonly bool DEFAULT_FIELD_MATCH = true; + public const bool DEFAULT_PHRASE_HIGHLIGHT = true; + public const bool DEFAULT_FIELD_MATCH = true; private readonly bool phraseHighlight; private readonly bool fieldMatch; private readonly IFragListBuilder fragListBuilder; @@ -77,7 +77,7 @@ public FastVectorHighlighter(bool phraseHighlight, bool fieldMatch, /// the created object public virtual FieldQuery GetFieldQuery(Query query) { - // TODO: should we deprecate this? + // TODO: should we deprecate this? // because if there is no reader, then we cannot rewrite MTQ. try { diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/SimpleBoundaryScanner.cs b/src/Lucene.Net.Highlighter/VectorHighlight/SimpleBoundaryScanner.cs index 5a13446aa9..e9362fd0f9 100644 --- a/src/Lucene.Net.Highlighter/VectorHighlight/SimpleBoundaryScanner.cs +++ b/src/Lucene.Net.Highlighter/VectorHighlight/SimpleBoundaryScanner.cs @@ -27,7 +27,7 @@ namespace Lucene.Net.Search.VectorHighlight /// public class SimpleBoundaryScanner : IBoundaryScanner { - public static readonly int DEFAULT_MAX_SCAN = 20; + public const int DEFAULT_MAX_SCAN = 20; public static readonly char[] DEFAULT_BOUNDARY_CHARS = { '.', ',', '!', '?', ' ', '\t', '\n' }; protected int m_maxScan; diff --git a/src/Lucene.Net.Misc/Index/Sorter/SortingMergePolicy.cs b/src/Lucene.Net.Misc/Index/Sorter/SortingMergePolicy.cs index 1948bab31c..30ed799f9e 100644 --- a/src/Lucene.Net.Misc/Index/Sorter/SortingMergePolicy.cs +++ b/src/Lucene.Net.Misc/Index/Sorter/SortingMergePolicy.cs @@ -33,15 +33,15 @@ namespace Lucene.Net.Index.Sorter /// will be sorted while segments resulting from a flush will be in the order /// in which documents have been added. /// NOTE: Never use this policy if you rely on - /// IndexWriter.AddDocuments + /// IndexWriter.AddDocuments /// to have sequentially-assigned doc IDs, this policy will scatter doc IDs. /// - /// NOTE: This policy should only be used with idempotent s - /// so that the order of segments is predictable. For example, using - /// in reverse (which is not idempotent) will make - /// the order of documents in a segment depend on the number of times the segment + /// NOTE: This policy should only be used with idempotent s + /// so that the order of segments is predictable. For example, using + /// in reverse (which is not idempotent) will make + /// the order of documents in a segment depend on the number of times the segment /// has been merged. - /// @lucene.experimental + /// @lucene.experimental /// /// public sealed class SortingMergePolicy : MergePolicy @@ -51,7 +51,7 @@ public sealed class SortingMergePolicy : MergePolicy /// Put in the diagnostics to denote that /// this segment is sorted. /// - public static readonly string SORTER_ID_PROP = "sorter"; + public const string SORTER_ID_PROP = "sorter"; internal class SortingOneMerge : OneMerge { @@ -274,4 +274,4 @@ public override string ToString() return "SortingMergePolicy(" + @in + ", sorter=" + sorter + ")"; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs index b4e5528f68..8c4cf4a40d 100644 --- a/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs +++ b/src/Lucene.Net.Queries/Mlt/MoreLikeThis.cs @@ -77,11 +77,11 @@ namespace Lucene.Net.Queries.Mlt /// /// IndexReader ir = ... /// IndexSearcher is = ... - /// + /// /// MoreLikeThis mlt = new MoreLikeThis(ir); /// TextReader target = ... // orig source of doc you want to find similarities to /// Query query = mlt.Like(target); - /// + /// /// Hits hits = is.Search(query); /// // now the usual iteration thru 'hits' - the only thing to watch for is to make sure /// //you ignore the doc if it matches your 'target' document, as it should be similar to itself @@ -132,32 +132,32 @@ public sealed class MoreLikeThis /// Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support. /// /// - public static readonly int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000; + public const int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000; /// /// Ignore terms with less than this frequency in the source doc. /// /// - public static readonly int DEFAULT_MIN_TERM_FREQ = 2; + public const int DEFAULT_MIN_TERM_FREQ = 2; /// /// Ignore words which do not occur in at least this many docs. /// /// - public static readonly int DEFAULT_MIN_DOC_FREQ = 5; + public const int DEFAULT_MIN_DOC_FREQ = 5; /// /// Ignore words which occur in more than this many docs. /// /// /// - public static readonly int DEFAULT_MAX_DOC_FREQ = int.MaxValue; + public const int DEFAULT_MAX_DOC_FREQ = int.MaxValue; /// /// Boost terms in query based on score. /// /// - public static readonly bool DEFAULT_BOOST = false; + public const bool DEFAULT_BOOST = false; /// /// Default field names. Null is used to specify that the field names should be looked @@ -169,13 +169,13 @@ public sealed class MoreLikeThis /// Ignore words less than this length or if 0 then this has no effect. /// /// - public static readonly int DEFAULT_MIN_WORD_LENGTH = 0; + public const int DEFAULT_MIN_WORD_LENGTH = 0; /// /// Ignore words greater than this length or if 0 then this has no effect. /// /// - public static readonly int DEFAULT_MAX_WORD_LENGTH = 0; + public const int DEFAULT_MAX_WORD_LENGTH = 0; /// /// Default set of stopwords. @@ -189,10 +189,10 @@ public sealed class MoreLikeThis /// /// /// - public static readonly int DEFAULT_MAX_QUERY_TERMS = 25; + public const int DEFAULT_MAX_QUERY_TERMS = 25; // LUCNENENET NOTE: The following fields were made into auto-implemented properties: - // analyzer, minTermFreq, minDocFreq, maxDocFreq, boost, + // analyzer, minTermFreq, minDocFreq, maxDocFreq, boost, // fieldNames, maxNumTokensParsed, minWordLen, maxWordLen, // maxQueryTerms, similarity @@ -250,7 +250,7 @@ public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim) /// /// Gets or Sets an analyzer that will be used to parse source doc with. The default analyzer - /// is not set. An analyzer is not required for generating a query with the + /// is not set. An analyzer is not required for generating a query with the /// method, all other 'like' methods require an analyzer. /// public Analyzer Analyzer { get; set; } @@ -299,7 +299,7 @@ public void SetMaxDocFreqPct(int maxPercentage) /// /// Gets or Sets the field names that will be used when generating the 'More Like This' query. - /// The default field names that will be used is . + /// The default field names that will be used is . /// Set this to null for the field names to be determined at runtime from the /// provided in the constructor. /// @@ -790,4 +790,4 @@ internal ScoreTerm(string word, string topField, float score, float idf, int doc /// public int Tf { get; private set; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.QueryParser/Ext/Extensions.cs b/src/Lucene.Net.QueryParser/Ext/Extensions.cs index bfca17fbc9..4292f19aca 100644 --- a/src/Lucene.Net.QueryParser/Ext/Extensions.cs +++ b/src/Lucene.Net.QueryParser/Ext/Extensions.cs @@ -47,7 +47,7 @@ public class Extensions /// /// The default extension field delimiter character. This constant is set to ':' /// - public static readonly char DEFAULT_EXTENSION_FIELD_DELIMITER = ':'; + public const char DEFAULT_EXTENSION_FIELD_DELIMITER = ':'; /// /// Creates a new instance with the diff --git a/src/Lucene.Net.QueryParser/Flexible/Core/Nodes/QueryNodeImpl.cs b/src/Lucene.Net.QueryParser/Flexible/Core/Nodes/QueryNodeImpl.cs index a722132f6a..def7b6775e 100644 --- a/src/Lucene.Net.QueryParser/Flexible/Core/Nodes/QueryNodeImpl.cs +++ b/src/Lucene.Net.QueryParser/Flexible/Core/Nodes/QueryNodeImpl.cs @@ -35,7 +35,7 @@ public abstract class QueryNode : IQueryNode // LUCENENET specific: Not implemen /// index default field /// // TODO remove PLAINTEXT_FIELD_NAME replacing it with configuration APIs - public static readonly string PLAINTEXT_FIELD_NAME = "_plain"; + public const string PLAINTEXT_FIELD_NAME = "_plain"; private bool isLeaf = true; @@ -212,9 +212,9 @@ private void SetParent(IQueryNode parent) /// /// field name /// true if fld is the default field - // TODO: remove this method, it's commonly used by + // TODO: remove this method, it's commonly used by // - // to figure out what is the default field, however, + // to figure out what is the default field, however, // // should receive the default field value directly by parameter protected virtual bool IsDefaultField(string fld) @@ -230,7 +230,7 @@ protected virtual bool IsDefaultField(string fld) /// /// Every implementation of this class should return pseudo xml like this: - /// + /// /// For FieldQueryNode: <field start='1' end='2' field='subject' text='foo'/> /// /// diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/MultiTermRewriteMethodProcessor.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/MultiTermRewriteMethodProcessor.cs index 8e5a521476..4fe9fc7d8c 100644 --- a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/MultiTermRewriteMethodProcessor.cs +++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/MultiTermRewriteMethodProcessor.cs @@ -33,7 +33,7 @@ namespace Lucene.Net.QueryParsers.Flexible.Standard.Processors /// public class MultiTermRewriteMethodProcessor : QueryNodeProcessor { - public static readonly string TAG_ID = "MultiTermRewriteMethodConfiguration"; + public const string TAG_ID = "MultiTermRewriteMethodConfiguration"; protected override IQueryNode PostProcessNode(IQueryNode node) { diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/OpenRangeQueryNodeProcessor.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/OpenRangeQueryNodeProcessor.cs index dff3d3dce8..74fe006de9 100644 --- a/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/OpenRangeQueryNodeProcessor.cs +++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Processors/OpenRangeQueryNodeProcessor.cs @@ -30,7 +30,7 @@ namespace Lucene.Net.QueryParsers.Flexible.Standard.Processors /// public class OpenRangeQueryNodeProcessor : QueryNodeProcessor { - public readonly static string OPEN_RANGE_TOKEN = "*"; + public const string OPEN_RANGE_TOKEN = "*"; public OpenRangeQueryNodeProcessor() { } diff --git a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs index 35c2a45ef2..d4a90e06c9 100644 --- a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs +++ b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs @@ -61,18 +61,18 @@ namespace Lucene.Net.QueryParsers.Surround.Parser [SuppressMessage("Style", "IDE0028:Collection initialization can be simplified", Justification = "This class is based on generated code")] public class QueryParser { - internal readonly int minimumPrefixLength = 3; - internal readonly int minimumCharsInTrunc = 3; - internal readonly string truncationErrorMessage = "Too unrestrictive truncation: "; - internal readonly string boostErrorMessage = "Cannot handle boost value: "; + internal const int minimumPrefixLength = 3; + internal const int minimumCharsInTrunc = 3; + internal const string truncationErrorMessage = "Too unrestrictive truncation: "; + internal const string boostErrorMessage = "Cannot handle boost value: "; /* CHECKME: These should be the same as for the tokenizer. How? */ - internal readonly char truncator = '*'; - internal readonly char anyChar = '?'; - internal readonly char quote = '"'; - internal readonly char fieldOperator = ':'; - internal readonly char comma = ','; /* prefix list separator */ - internal readonly char carat = '^'; /* weight operator */ + internal const char truncator = '*'; + internal const char anyChar = '?'; + // internal const char quote = '"'; /* LUCENENET: not used */ + internal const char fieldOperator = ':'; + // internal const char comma = ','; /* LUCENENET: not used; prefix list separator */ + // internal const char carat = '^'; /* LUCENENET: not used; weight operator */ public static SrndQuery Parse(string query) { diff --git a/src/Lucene.Net.QueryParser/Surround/Query/FieldsQuery.cs b/src/Lucene.Net.QueryParser/Surround/Query/FieldsQuery.cs index e3b6378b05..59f700c312 100644 --- a/src/Lucene.Net.QueryParser/Surround/Query/FieldsQuery.cs +++ b/src/Lucene.Net.QueryParser/Surround/Query/FieldsQuery.cs @@ -29,7 +29,7 @@ public class FieldsQuery : SrndQuery /* mostly untested */ private readonly SrndQuery q; // LUCENENET: marked readonly private readonly IList fieldNames; // LUCENENET: marked readonly private readonly char fieldOp; - private readonly string orOperatorName = "OR"; /* for expanded queries, not normally visible */ + private const string orOperatorName = "OR"; /* for expanded queries, not normally visible */ public FieldsQuery(SrndQuery q, IList fieldNames, char fieldOp) { diff --git a/src/Lucene.Net.Spatial/Query/SpatialArgs.cs b/src/Lucene.Net.Spatial/Query/SpatialArgs.cs index 2a2c05ca38..600f7afa05 100644 --- a/src/Lucene.Net.Spatial/Query/SpatialArgs.cs +++ b/src/Lucene.Net.Spatial/Query/SpatialArgs.cs @@ -24,12 +24,12 @@ namespace Lucene.Net.Spatial.Queries /// /// Principally holds the query and the . /// It's used as an argument to some methods on . - /// + /// /// @lucene.experimental /// public class SpatialArgs { - public static readonly double DEFAULT_DISTERRPCT = 0.025d; + public const double DEFAULT_DISTERRPCT = 0.025d; private SpatialOperation operation; private IShape shape; @@ -81,7 +81,7 @@ public static double CalcDistanceFromErrPct(IShape shape, double distErrPct, Spa /// /// Gets the error distance that specifies how precise the query shape is. This - /// looks at , , and + /// looks at , , and /// . /// /// diff --git a/src/Lucene.Net.Suggest/Spell/SpellChecker.cs b/src/Lucene.Net.Suggest/Spell/SpellChecker.cs index d23a02f0a1..b086dbe6a8 100644 --- a/src/Lucene.Net.Suggest/Spell/SpellChecker.cs +++ b/src/Lucene.Net.Suggest/Spell/SpellChecker.cs @@ -66,12 +66,13 @@ public class SpellChecker : IDisposable // don't modify the directory directly - see SwapSearcher() // TODO: why is this package private? internal Directory spellIndex; + /// /// Boost value for start and end grams /// - private readonly float bStart = 2.0f; // LUCENENET: marked readonly + private const float bStart = 2.0f; // LUCENENET: marked const - private readonly float bEnd = 1.0f; // LUCENENET: marked readonly + private const float bEnd = 1.0f; // LUCENENET: marked const // don't use this searcher directly - see SwapSearcher() private IndexSearcher searcher; diff --git a/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellLookup.cs b/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellLookup.cs index 03f74c577c..4fd66a16b4 100644 --- a/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellLookup.cs +++ b/src/Lucene.Net.Suggest/Suggest/Jaspell/JaspellLookup.cs @@ -25,15 +25,15 @@ namespace Lucene.Net.Search.Suggest.Jaspell */ /// - /// Suggest implementation based on + /// Suggest implementation based on /// JaSpell. /// /// public class JaspellLookup : Lookup { private JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie(); - private readonly bool usePrefix = true; - private readonly int editDistance = 2; + private const bool usePrefix = true; + private const int editDistance = 2; /// /// Number of entries the lookup was built with @@ -126,10 +126,11 @@ public override IList DoLookup(string key, IEnumerable c { list = trie.MatchPrefix(key, count); } - else - { - list = trie.MatchAlmost(key, count); - } + // LUCENENET NOTE: commented out because usePrefix is always true, unreachable code + // else + // { + // list = trie.MatchAlmost(key, count); + // } if (list is null || list.Count == 0) { return res; @@ -258,4 +259,4 @@ public override long GetSizeInBytes() public override long Count => count; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs b/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs index 3b72dbbd95..ce1b33689a 100644 --- a/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs +++ b/src/Lucene.Net.TestFramework/Analysis/MockTokenizer.cs @@ -69,6 +69,7 @@ public class MockTokenizer : Tokenizer private readonly CharacterRunAutomaton runAutomaton; private readonly bool lowerCase; private readonly int maxTokenLength; + // ReSharper disable once ConvertToConstant.Global - changes to 255 in later versions of Lucene public static readonly int DEFAULT_MAX_TOKEN_LENGTH = int.MaxValue; private int state; @@ -352,4 +353,4 @@ public virtual bool EnableChecks set => enableChecks = value; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net.TestFramework/Codecs/Lucene40/Lucene40PostingsWriter.cs b/src/Lucene.Net.TestFramework/Codecs/Lucene40/Lucene40PostingsWriter.cs index 68b10d9777..28d1c82c16 100644 --- a/src/Lucene.Net.TestFramework/Codecs/Lucene40/Lucene40PostingsWriter.cs +++ b/src/Lucene.Net.TestFramework/Codecs/Lucene40/Lucene40PostingsWriter.cs @@ -55,7 +55,7 @@ public sealed class Lucene40PostingsWriter : PostingsWriterBase /// Expert: The maximum number of skip levels. Smaller values result in /// slightly smaller indexes, but slower skipping in big posting lists. /// - internal readonly int maxSkipLevels = 10; + internal const int maxSkipLevels = 10; internal readonly int totalNumDocs; @@ -369,4 +369,4 @@ protected override void Dispose(bool disposing) } } #pragma warning restore 612, 618 -} \ No newline at end of file +} diff --git a/src/Lucene.Net.TestFramework/Codecs/RAMOnly/RAMOnlyPostingsFormat.cs b/src/Lucene.Net.TestFramework/Codecs/RAMOnly/RAMOnlyPostingsFormat.cs index 397edb726a..b76bdac48d 100644 --- a/src/Lucene.Net.TestFramework/Codecs/RAMOnly/RAMOnlyPostingsFormat.cs +++ b/src/Lucene.Net.TestFramework/Codecs/RAMOnly/RAMOnlyPostingsFormat.cs @@ -571,7 +571,7 @@ public override long GetCost() private readonly AtomicInt32 nextID = new AtomicInt32(); - private readonly string RAM_ONLY_NAME = "RAMOnly"; + private const string RAM_ONLY_NAME = "RAMOnly"; private const int VERSION_START = 0; private const int VERSION_LATEST = VERSION_START; diff --git a/src/Lucene.Net.TestFramework/Support/Util/DefaultNamespaceTypeWrapper.cs b/src/Lucene.Net.TestFramework/Support/Util/DefaultNamespaceTypeWrapper.cs index 4a3de06d18..ab24309ddc 100644 --- a/src/Lucene.Net.TestFramework/Support/Util/DefaultNamespaceTypeWrapper.cs +++ b/src/Lucene.Net.TestFramework/Support/Util/DefaultNamespaceTypeWrapper.cs @@ -9,17 +9,17 @@ namespace Lucene.Net.Util #region Copyright (c) Charlie Poole, Rob Prouse and Contributors. MIT License. // Copyright (c) 2021 Charlie Poole, Rob Prouse - // + // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: - // + // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. - // + // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -95,7 +95,7 @@ public Assembly Assembly /// public string Namespace { - get { return null; } // LUCENENET: Force this type to be the + get { return null; } // LUCENENET: Force this type to be the } /// @@ -289,6 +289,6 @@ public IMethodInfo[] GetMethodsWithAttribute(bool inherit) where T : class // From NUnit's Reflect class - internal static readonly BindingFlags AllMembers = BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance | BindingFlags.Static; + internal const BindingFlags AllMembers = BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance | BindingFlags.Static; } } diff --git a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs index 53c4b28ae5..95e6dd333e 100644 --- a/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs +++ b/src/Lucene.Net.Tests.Analysis.Common/Analysis/Th/TestThaiAnalyzer.cs @@ -35,7 +35,7 @@ namespace Lucene.Net.Analysis.Th /// /// Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer - /// + /// /// public class TestThaiAnalyzer : BaseTokenStreamTestCase @@ -74,7 +74,7 @@ public virtual void TestBackwardsStopWords() /// /// Thai numeric tokens were typed as instead of . - /// @deprecated (3.1) testing backwards behavior + /// @deprecated (3.1) testing backwards behavior [Test] [Obsolete("(3.1) testing backwards behavior")] public virtual void TestBuggyTokenType30() @@ -82,7 +82,7 @@ public virtual void TestBuggyTokenType30() AssertAnalyzesTo(new ThaiAnalyzer(LuceneVersion.LUCENE_30), "การที่ได้ต้องแสดงว่างานดี ๑๒๓", new string[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี", "๑๒๓" }, new string[] { "", "", "", "", "", "", "", "", "" }); } - /// @deprecated (3.1) testing backwards behavior + /// @deprecated (3.1) testing backwards behavior [Test] [Obsolete("(3.1) testing backwards behavior")] public virtual void TestAnalyzer30() @@ -100,9 +100,10 @@ public virtual void TestAnalyzer30() } // Ellision character - private static readonly char THAI_PAIYANNOI = (char)0x0E2F; + private const char THAI_PAIYANNOI = (char)0x0E2F; + // Repeat character - private static readonly char THAI_MAIYAMOK = (char)0x0E46; + private const char THAI_MAIYAMOK = (char)0x0E46; [Test] [LuceneNetSpecific] @@ -166,7 +167,7 @@ public virtual void TestReusableTokenStream() AssertAnalyzesTo(analyzer, "บริษัทชื่อ XY&Z - คุยกับ xyz@demo.com", new string[] { "บริษัท", "ชื่อ", "xy", "z", "คุย", "กับ", "xyz", "demo.com" }); } - /// @deprecated (3.1) for version back compat + /// @deprecated (3.1) for version back compat [Test] [Obsolete("(3.1) for version back compat")] public virtual void TestReusableTokenStream30() @@ -364,7 +365,7 @@ public virtual void TestRandomStrings() /// /// blast some random large strings through the analyzer - /// + /// [Test] [AwaitsFix(BugUrl = "https://github.com/apache/lucenenet/issues/269")] // LUCENENET TODO: this test occasionally fails public virtual void TestRandomHugeStrings() @@ -383,7 +384,7 @@ public virtual void TestAttributeReuse() // just consume TokenStream ts = analyzer.GetTokenStream("dummy", "ภาษาไทย"); AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" }); - // this consumer adds flagsAtt, which this analyzer does not use. + // this consumer adds flagsAtt, which this analyzer does not use. ts = analyzer.GetTokenStream("dummy", "ภาษาไทย"); ts.AddAttribute(); AssertTokenStreamContents(ts, new string[] { "ภาษา", "ไทย" }); @@ -406,4 +407,4 @@ public virtual void TestNumeralBreaking() } } } -#endif \ No newline at end of file +#endif diff --git a/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPLemmatizerFilterFactory.cs b/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPLemmatizerFilterFactory.cs index 309f197749..deb10aaf0e 100644 --- a/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPLemmatizerFilterFactory.cs +++ b/src/Lucene.Net.Tests.Analysis.OpenNLP/TestOpenNLPLemmatizerFilterFactory.cs @@ -26,48 +26,48 @@ namespace Lucene.Net.Analysis.OpenNlp public class TestOpenNLPLemmatizerFilterFactory : BaseTokenStreamTestCase { - private const String SENTENCE = "They sent him running in the evening."; - private static readonly String[] SENTENCE_dict_punc = { "they", "send", "he", "run", "in", "the", "evening", "." }; - private static readonly String[] SENTENCE_maxent_punc = { "they", "send", "he", "runn", "in", "the", "evening", "." }; - private static readonly String[] SENTENCE_posTags = { "NNP", "VBD", "PRP", "VBG", "IN", "DT", "NN", "." }; - private static readonly String SENTENCES = "They sent him running in the evening. He did not come back."; - private static readonly String[] SENTENCES_dict_punc + private const string SENTENCE = "They sent him running in the evening."; + private static readonly string[] SENTENCE_dict_punc = { "they", "send", "he", "run", "in", "the", "evening", "." }; + private static readonly string[] SENTENCE_maxent_punc = { "they", "send", "he", "runn", "in", "the", "evening", "." }; + private static readonly string[] SENTENCE_posTags = { "NNP", "VBD", "PRP", "VBG", "IN", "DT", "NN", "." }; + private const string SENTENCES = "They sent him running in the evening. He did not come back."; + private static readonly string[] SENTENCES_dict_punc = { "they", "send", "he", "run", "in", "the", "evening", ".", "he", "do", "not", "come", "back", "." }; - private static readonly String[] SENTENCES_maxent_punc + private static readonly string[] SENTENCES_maxent_punc = { "they", "send", "he", "runn", "in", "the", "evening", ".", "he", "do", "not", "come", "back", "." }; - private static readonly String[] SENTENCES_posTags + private static readonly string[] SENTENCES_posTags = { "NNP", "VBD", "PRP", "VBG", "IN", "DT", "NN", ".", "PRP", "VBD", "RB", "VB", "RB", "." }; - private static readonly String SENTENCE_both = "Konstantin Kalashnitsov constantly caliphed."; - private static readonly String[] SENTENCE_both_punc + private const string SENTENCE_both = "Konstantin Kalashnitsov constantly caliphed."; + private static readonly string[] SENTENCE_both_punc = { "konstantin", "kalashnitsov", "constantly", "caliph", "." }; - private static readonly String[] SENTENCE_both_posTags + private static readonly string[] SENTENCE_both_posTags = { "IN", "JJ", "NN", "VBN", "." }; - private const String SENTENCES_both = "Konstantin Kalashnitsov constantly caliphed. Coreena could care, completely."; - private static readonly String[] SENTENCES_both_punc + private const string SENTENCES_both = "Konstantin Kalashnitsov constantly caliphed. Coreena could care, completely."; + private static readonly string[] SENTENCES_both_punc = { "konstantin", "kalashnitsov", "constantly", "caliph", ".", "coreena", "could", "care", ",", "completely", "." }; - private static readonly String[] SENTENCES_both_posTags + private static readonly string[] SENTENCES_both_posTags = { "IN", "JJ", "NN", "VBN", ".", "NNP", "VBN", "NN", ",", "NN", "." }; - private static readonly String[] SENTENCES_dict_keep_orig_punc + private static readonly string[] SENTENCES_dict_keep_orig_punc = { "They", "they", "sent", "send", "him", "he", "running", "run", "in", "the", "evening", ".", "He", "he", "did", "do", "not", "come", "back", "." }; - private static readonly String[] SENTENCES_max_ent_keep_orig_punc + private static readonly string[] SENTENCES_max_ent_keep_orig_punc = { "They", "they", "sent", "send", "him", "he", "running", "runn", "in", "the", "evening", ".", "He", "he", "did", "do", "not", "come", "back", "." }; - private static readonly String[] SENTENCES_keep_orig_posTags + private static readonly string[] SENTENCES_keep_orig_posTags = { "NNP", "NNP", "VBD", "VBD", "PRP", "PRP", "VBG", "VBG", "IN", "DT", "NN", ".", "PRP", "PRP", "VBD", "VBD", "RB", "VB", "RB", "." }; - private static readonly String[] SENTENCES_both_keep_orig_punc + private static readonly string[] SENTENCES_both_keep_orig_punc = { "Konstantin", "konstantin", "Kalashnitsov", "kalashnitsov", "constantly", "caliphed", "caliph", ".", "Coreena", "coreena", "could", "care", ",", "completely", "." }; - private static readonly String[] SENTENCES_both_keep_orig_posTags + private static readonly string[] SENTENCES_both_keep_orig_posTags = { "IN", "IN", "JJ", "JJ", "NN", "VBN", "VBN", ".", "NNP", "NNP", "VBN", "NN", ",", "NN", "." }; - private const String tokenizerModelFile = "en-test-tokenizer.bin"; - private const String sentenceModelFile = "en-test-sent.bin"; - private const String posTaggerModelFile = "en-test-pos-maxent.bin"; - private const String lemmatizerModelFile = "en-test-lemmatizer.bin"; - private const String lemmatizerDictFile = "en-test-lemmas.dict"; + private const string tokenizerModelFile = "en-test-tokenizer.bin"; + private const string sentenceModelFile = "en-test-sent.bin"; + private const string posTaggerModelFile = "en-test-pos-maxent.bin"; + private const string lemmatizerModelFile = "en-test-lemmatizer.bin"; + private const string lemmatizerDictFile = "en-test-lemmas.dict"; [Test] public void Test1SentenceDictionaryOnly() diff --git a/src/Lucene.Net/Codecs/CodecUtil.cs b/src/Lucene.Net/Codecs/CodecUtil.cs index 3750b9ef2b..9316bdabdf 100644 --- a/src/Lucene.Net/Codecs/CodecUtil.cs +++ b/src/Lucene.Net/Codecs/CodecUtil.cs @@ -37,12 +37,12 @@ public static class CodecUtil // LUCENENET specific - marked static because all /// /// Constant to identify the start of a codec header. /// - public static readonly int CODEC_MAGIC = 0x3fd76c17; + public const int CODEC_MAGIC = 0x3fd76c17; /// /// Constant to identify the start of a codec footer. /// - public static readonly int FOOTER_MAGIC = ~CODEC_MAGIC; + public const int FOOTER_MAGIC = ~CODEC_MAGIC; /// /// Writes a codec header, which records both a string to @@ -133,7 +133,7 @@ public static int CheckHeader(DataInput @in, string codec, int minVersion, int m } /// - /// Like + /// Like /// except this /// version assumes the first has already been read /// and validated from the input. @@ -275,4 +275,4 @@ public static long ChecksumEntireFile(IndexInput input) return CheckFooter(@in); } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs index 0b578a1b71..9a4fc6e032 100644 --- a/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs +++ b/src/Lucene.Net/Codecs/Lucene3x/Lucene3xSegmentInfoFormat.cs @@ -37,24 +37,24 @@ public class Lucene3xSegmentInfoFormat : SegmentInfoFormat /// This format adds optional per-segment String /// diagnostics storage, and switches userData to Map. /// - public static readonly int FORMAT_DIAGNOSTICS = -9; + public const int FORMAT_DIAGNOSTICS = -9; /// /// Each segment records whether it has term vectors. - public static readonly int FORMAT_HAS_VECTORS = -10; + public const int FORMAT_HAS_VECTORS = -10; /// /// Each segment records the Lucene version that created it. - public static readonly int FORMAT_3_1 = -11; + public const int FORMAT_3_1 = -11; /// /// Extension used for saving each SegmentInfo, once a 3.x /// index is first committed to with 4.0. /// - public static readonly string UPGRADED_SI_EXTENSION = "si"; + public const string UPGRADED_SI_EXTENSION = "si"; - public static readonly string UPGRADED_SI_CODEC_NAME = "Lucene3xSegmentInfo"; - public static readonly int UPGRADED_SI_VERSION_START = 0; + public const string UPGRADED_SI_CODEC_NAME = "Lucene3xSegmentInfo"; + public const int UPGRADED_SI_VERSION_START = 0; public static readonly int UPGRADED_SI_VERSION_CURRENT = UPGRADED_SI_VERSION_START; public override SegmentInfoReader SegmentInfoReader => reader; @@ -94,4 +94,4 @@ public static bool GetDocStoreIsCompoundFile(SegmentInfo si) return v is null ? false : Convert.ToBoolean(v, CultureInfo.InvariantCulture); } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs index 5a537522c2..a5fe677092 100644 --- a/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs +++ b/src/Lucene.Net/Codecs/Lucene40/Lucene40DocValuesFormat.cs @@ -116,7 +116,7 @@ namespace Lucene.Net.Codecs.Lucene40 /// Limitations: /// /// Binary doc values can be at most in length. - /// + /// /// [Obsolete("Only for reading old 4.0 and 4.1 segments")] [DocValuesFormatName("Lucene40")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name @@ -126,7 +126,7 @@ public class Lucene40DocValuesFormat : DocValuesFormat { /// /// Maximum length for each binary doc values field. - public static readonly int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; + public const int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; /// /// Sole constructor. @@ -209,4 +209,4 @@ public override DocValuesProducer FieldsProducer(SegmentReadState state) internal const int BYTES_VAR_SORTED_VERSION_START = 0; internal const int BYTES_VAR_SORTED_VERSION_CURRENT = BYTES_VAR_SORTED_VERSION_START; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs index eafdbf5c3d..99a5291c5d 100644 --- a/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs +++ b/src/Lucene.Net/Codecs/Lucene42/Lucene42DocValuesFormat.cs @@ -117,7 +117,7 @@ namespace Lucene.Net.Codecs.Lucene42 /// Limitations: /// /// Binary doc values can be at most in length. - /// + /// /// [Obsolete("Only for reading old 4.2 segments")] [DocValuesFormatName("Lucene42")] // LUCENENET specific - using DocValuesFormatName attribute to ensure the default name passed from subclasses is the same as this class name @@ -125,7 +125,7 @@ public class Lucene42DocValuesFormat : DocValuesFormat { /// /// Maximum length for each binary doc values field. - public static readonly int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; + public const int MAX_BINARY_FIELD_LENGTH = (1 << 15) - 2; protected readonly float m_acceptableOverheadRatio; @@ -139,7 +139,7 @@ public Lucene42DocValuesFormat() /// /// Creates a new with the specified - /// for . + /// for . /// /// @lucene.experimental /// @@ -167,4 +167,4 @@ public override DocValuesProducer FieldsProducer(SegmentReadState state) internal const string METADATA_CODEC = "Lucene42DocValuesMetadata"; internal const string METADATA_EXTENSION = "dvm"; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs index 2351e4aa25..4f138959a2 100644 --- a/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs +++ b/src/Lucene.Net/Codecs/Lucene45/Lucene45DocValuesConsumer.cs @@ -73,13 +73,13 @@ public class Lucene45DocValuesConsumer : DocValuesConsumer // LUCENENET specific /// Standard storage for sorted set values with 1 level of indirection: /// docId -> address -> ord. /// - public static readonly int SORTED_SET_WITH_ADDRESSES = 0; + public const int SORTED_SET_WITH_ADDRESSES = 0; /// /// Single-valued sorted set values, encoded as sorted values, so no level /// of indirection: docId -> ord. /// - public static readonly int SORTED_SET_SINGLE_VALUED_SORTED = 1; + public const int SORTED_SET_SINGLE_VALUED_SORTED = 1; #pragma warning disable CA2213 // Disposable fields should be disposed internal IndexOutput data, meta; diff --git a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs index ce8a5965c5..8580d0b091 100644 --- a/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs +++ b/src/Lucene.Net/Codecs/Lucene46/Lucene46SegmentInfoFormat.cs @@ -54,7 +54,7 @@ namespace Lucene.Net.Codecs.Lucene46 /// Files is a list of files referred to by this segment. /// /// - /// @lucene.experimental + /// @lucene.experimental /// /// public class Lucene46SegmentInfoFormat : SegmentInfoFormat @@ -74,11 +74,11 @@ public Lucene46SegmentInfoFormat() /// /// File extension used to store . - public readonly static string SI_EXTENSION = "si"; + public const string SI_EXTENSION = "si"; internal const string CODEC_NAME = "Lucene46SegmentInfo"; internal const int VERSION_START = 0; internal const int VERSION_CHECKSUM = 1; internal const int VERSION_CURRENT = VERSION_CHECKSUM; } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs b/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs index 53ad6f2e71..3035d4d589 100644 --- a/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs +++ b/src/Lucene.Net/Codecs/MultiLevelSkipListReader.cs @@ -54,7 +54,7 @@ public abstract class MultiLevelSkipListReader : IDisposable // the skipInterval. The top level can not contain more than // skipLevel entries, the second top level can not contain more // than skipLevel^2 entries and so forth. - private readonly int numberOfLevelsToBuffer = 1; // LUCENENET: marked readonly + private const int numberOfLevelsToBuffer = 1; // LUCENENET: marked const private int docCount; private bool haveSkipped; @@ -402,4 +402,4 @@ private void EnsureOpen() // LUCENENET: Guard against disposed IndexInput } } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/BufferedUpdates.cs b/src/Lucene.Net/Index/BufferedUpdates.cs index 114588876d..59923a7424 100644 --- a/src/Lucene.Net/Index/BufferedUpdates.cs +++ b/src/Lucene.Net/Index/BufferedUpdates.cs @@ -144,7 +144,7 @@ load factor (say 2 * POINTER). Entry is object w/ /// /// NOTE: This was MAX_INT in Lucene /// - internal static readonly int MAX_INT32 = int.MaxValue; // LUCENENET specific - Made internal rather than public, since this class is intended to be internal but couldn't be because it is exposed through a public API + internal const int MAX_INT32 = int.MaxValue; // LUCENENET specific - Made internal rather than public, since this class is intended to be internal but couldn't be because it is exposed through a public API internal readonly AtomicInt64 bytesUsed; @@ -163,9 +163,9 @@ public override string ToString() { if (VERBOSE_DELETES) { - return "gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", terms) - + ", queries=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", queries) + ", docIDs=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", docIDs) - + ", numericUpdates=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", numericUpdates) + return "gen=" + gen + " numTerms=" + numTermDeletes + ", terms=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", terms) + + ", queries=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", queries) + ", docIDs=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", docIDs) + + ", numericUpdates=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", numericUpdates) + ", binaryUpdates=" + string.Format(J2N.Text.StringFormatter.InvariantCulture, "{0}", binaryUpdates) + ", bytesUsed=" + bytesUsed; } else @@ -327,4 +327,4 @@ internal virtual bool Any() return terms.Count > 0 || docIDs.Count > 0 || queries.Count > 0 || numericUpdates.Count > 0 || binaryUpdates.Count > 0; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/DirectoryReader.cs b/src/Lucene.Net/Index/DirectoryReader.cs index a391d1be27..ff76a52bac 100644 --- a/src/Lucene.Net/Index/DirectoryReader.cs +++ b/src/Lucene.Net/Index/DirectoryReader.cs @@ -51,7 +51,7 @@ public abstract class DirectoryReader : BaseCompositeReader { /// /// Default termInfosIndexDivisor. - public static readonly int DEFAULT_TERMS_INDEX_DIVISOR = 1; + public const int DEFAULT_TERMS_INDEX_DIVISOR = 1; /// /// The index directory. @@ -93,7 +93,7 @@ public abstract class DirectoryReader : BaseCompositeReader /// /// Open a near real time from the . /// - /// @lucene.experimental + /// @lucene.experimental /// /// The to open from /// If true, all buffered deletes will @@ -231,7 +231,7 @@ public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexComm /// outstanding readers may continue to be used. However, /// if you attempt to reopen any of those readers, you'll /// hit an . - /// + /// /// @lucene.experimental /// /// that covers entire index plus all @@ -258,14 +258,14 @@ public static DirectoryReader OpenIfChanged(DirectoryReader oldReader, IndexWrit /// /// Returns all commit points that exist in the . - /// Normally, because the default is + /// Normally, because the default is /// , there would be only /// one commit point. But if you're using a custom /// then there could be many commits. /// Once you have a given commit, you can open a reader on /// it by calling /// There must be at least one commit in - /// the , else this method throws + /// the , else this method throws /// . Note that if a commit is in /// progress while this method is running, that commit /// may or may not be returned. @@ -440,7 +440,7 @@ protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders) /// directory. /// /// If instead this reader is a near real-time reader - /// (ie, obtained by a call to + /// (ie, obtained by a call to /// , or by calling an overload of /// on a near real-time reader), then this method checks if /// either a new commit has occurred, or any new @@ -462,4 +462,4 @@ protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders) /// public abstract IndexCommit IndexCommit { get; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/DocValuesUpdate.cs b/src/Lucene.Net/Index/DocValuesUpdate.cs index 60b57f9466..874e0f9988 100644 --- a/src/Lucene.Net/Index/DocValuesUpdate.cs +++ b/src/Lucene.Net/Index/DocValuesUpdate.cs @@ -106,7 +106,7 @@ public override string ToString() /// An in-place update to a numeric field internal sealed class NumericDocValuesUpdate : DocValuesUpdate { - internal static readonly long MISSING = 0; + internal const long MISSING = 0; internal readonly long value; @@ -126,4 +126,4 @@ public override string ToString() return "term=" + term + ",field=" + field + ",value=" + value; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/DocumentsWriterPerThread.cs b/src/Lucene.Net/Index/DocumentsWriterPerThread.cs index e71af22fe9..6e6f9c8ab0 100644 --- a/src/Lucene.Net/Index/DocumentsWriterPerThread.cs +++ b/src/Lucene.Net/Index/DocumentsWriterPerThread.cs @@ -682,13 +682,13 @@ internal virtual void SealFlushedSegment(FlushedSegment flushedSegment) /// Initial chunks size of the shared byte[] blocks used to /// store postings data /// - internal static readonly int BYTE_BLOCK_NOT_MASK = ~ByteBlockPool.BYTE_BLOCK_MASK; + internal const int BYTE_BLOCK_NOT_MASK = ~ByteBlockPool.BYTE_BLOCK_MASK; /// /// if you increase this, you must fix field cache impl for /// getTerms/getTermsIndex requires <= 32768 /// - internal static readonly int MAX_TERM_LENGTH_UTF8 = ByteBlockPool.BYTE_BLOCK_SIZE - 2; + internal const int MAX_TERM_LENGTH_UTF8 = ByteBlockPool.BYTE_BLOCK_SIZE - 2; /// /// NOTE: This was IntBlockAllocator in Lucene diff --git a/src/Lucene.Net/Index/IndexFileNames.cs b/src/Lucene.Net/Index/IndexFileNames.cs index bdc72959d8..cac5280d23 100644 --- a/src/Lucene.Net/Index/IndexFileNames.cs +++ b/src/Lucene.Net/Index/IndexFileNames.cs @@ -28,9 +28,9 @@ namespace Lucene.Net.Index /// /// This class contains useful constants representing filenames and extensions /// used by lucene, as well as convenience methods for querying whether a file - /// name matches an extension (), + /// name matches an extension (), /// as well as generating file names from a segment name, - /// generation and extension + /// generation and extension /// (, /// ). /// @@ -51,23 +51,23 @@ private IndexFileNames() /// /// Name of the index segment file - public static readonly string SEGMENTS = "segments"; + public const string SEGMENTS = "segments"; /// /// Extension of gen file - public static readonly string GEN_EXTENSION = "gen"; + public const string GEN_EXTENSION = "gen"; /// /// Name of the generation reference file name - public static readonly string SEGMENTS_GEN = "segments." + GEN_EXTENSION; + public const string SEGMENTS_GEN = "segments." + GEN_EXTENSION; /// /// Extension of compound file - public static readonly string COMPOUND_FILE_EXTENSION = "cfs"; + public const string COMPOUND_FILE_EXTENSION = "cfs"; /// /// Extension of compound file entries - public static readonly string COMPOUND_FILE_ENTRIES_EXTENSION = "cfe"; + public const string COMPOUND_FILE_ENTRIES_EXTENSION = "cfe"; /// /// This array contains all filename extensions used by @@ -253,4 +253,4 @@ public static string GetExtension(string filename) /// public static readonly Regex CODEC_FILE_PATTERN = new Regex("_[a-z0-9]+(_.*)?\\..*", RegexOptions.Compiled); } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/IndexWriter.cs b/src/Lucene.Net/Index/IndexWriter.cs index f98cdd709f..2c9b94d4ef 100644 --- a/src/Lucene.Net/Index/IndexWriter.cs +++ b/src/Lucene.Net/Index/IndexWriter.cs @@ -195,23 +195,23 @@ public class IndexWriter : IDisposable, ITwoPhaseCommit /// /// Name of the write lock in the index. /// - public static readonly string WRITE_LOCK_NAME = "write.lock"; + public const string WRITE_LOCK_NAME = "write.lock"; /// /// Key for the source of a segment in the . - public static readonly string SOURCE = "source"; + public const string SOURCE = "source"; /// /// Source of a segment which results from a merge of other segments. - public static readonly string SOURCE_MERGE = "merge"; + public const string SOURCE_MERGE = "merge"; /// /// Source of a segment which results from a flush. - public static readonly string SOURCE_FLUSH = "flush"; + public const string SOURCE_FLUSH = "flush"; /// /// Source of a segment which results from a call to . - public static readonly string SOURCE_ADDINDEXES_READERS = "AddIndexes(params IndexReader[] readers)"; + public const string SOURCE_ADDINDEXES_READERS = "AddIndexes(params IndexReader[] readers)"; /// /// Absolute hard maximum length for a term, in bytes once @@ -221,7 +221,7 @@ public class IndexWriter : IDisposable, ITwoPhaseCommit /// and a message is printed to , if set (see /// ). /// - public static readonly int MAX_TERM_LENGTH = DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8; + public const int MAX_TERM_LENGTH = DocumentsWriterPerThread.MAX_TERM_LENGTH_UTF8; private volatile bool hitOOM; diff --git a/src/Lucene.Net/Index/IndexWriterConfig.cs b/src/Lucene.Net/Index/IndexWriterConfig.cs index 6025b670b6..fd1fcc0adf 100644 --- a/src/Lucene.Net/Index/IndexWriterConfig.cs +++ b/src/Lucene.Net/Index/IndexWriterConfig.cs @@ -69,25 +69,25 @@ public sealed class IndexWriterConfig : LiveIndexWriterConfig // LUCENENET speci /// /// Default value is 32. Change using setter. - public static readonly int DEFAULT_TERM_INDEX_INTERVAL = 32; // TODO: this should be private to the codec, not settable here + public const int DEFAULT_TERM_INDEX_INTERVAL = 32; // TODO: this should be private to the codec, not settable here /// /// Denotes a flush trigger is disabled. - public static readonly int DISABLE_AUTO_FLUSH = -1; + public const int DISABLE_AUTO_FLUSH = -1; /// /// Disabled by default (because IndexWriter flushes by RAM usage by default). - public static readonly int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH; + public const int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH; /// /// Disabled by default (because IndexWriter flushes by RAM usage by default). - public static readonly int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH; + public const int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH; /// /// Default value is 16 MB (which means flush when buffered docs consume /// approximately 16 MB RAM). /// - public static readonly double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0; + public const double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0; /// /// Default value for the write lock timeout (1,000 ms). @@ -97,15 +97,15 @@ public sealed class IndexWriterConfig : LiveIndexWriterConfig // LUCENENET speci /// /// Default setting for . - public static readonly bool DEFAULT_READER_POOLING = false; + public const bool DEFAULT_READER_POOLING = false; /// /// Default value is 1. Change using setter. - public static readonly int DEFAULT_READER_TERMS_INDEX_DIVISOR = DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR; + public const int DEFAULT_READER_TERMS_INDEX_DIVISOR = DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR; /// /// Default value is 1945. Change using setter. - public static readonly int DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB = 1945; + public const int DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB = 1945; /// /// The maximum number of simultaneous threads that may be @@ -113,21 +113,21 @@ public sealed class IndexWriterConfig : LiveIndexWriterConfig // LUCENENET speci /// than this many threads arrive they will wait for /// others to finish. Default value is 8. /// - public static readonly int DEFAULT_MAX_THREAD_STATES = 8; + public const int DEFAULT_MAX_THREAD_STATES = 8; /// /// Default value for compound file system for newly written segments /// (set to true). For batch indexing with very large /// ram buffers use false /// - public static readonly bool DEFAULT_USE_COMPOUND_FILE_SYSTEM = true; + public const bool DEFAULT_USE_COMPOUND_FILE_SYSTEM = true; /// /// Default value for calling before /// merging segments (set to false). You can set this /// to true for additional safety. /// - public static readonly bool DEFAULT_CHECK_INTEGRITY_AT_MERGE = false; + public const bool DEFAULT_CHECK_INTEGRITY_AT_MERGE = false; /// /// Gets or sets the default (for any instance) maximum time to wait for a write lock diff --git a/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs b/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs index 719b0fee73..6d082efe4e 100644 --- a/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs +++ b/src/Lucene.Net/Index/LogByteSizeMergePolicy.cs @@ -25,19 +25,19 @@ public class LogByteSizeMergePolicy : LogMergePolicy { /// Default minimum segment size. /// - public static readonly double DEFAULT_MIN_MERGE_MB = 1.6; + public const double DEFAULT_MIN_MERGE_MB = 1.6; /// - /// Default maximum segment size. A segment of this size - /// or larger will never be merged. + /// Default maximum segment size. A segment of this size + /// or larger will never be merged. /// - public static readonly double DEFAULT_MAX_MERGE_MB = 2048; + public const double DEFAULT_MAX_MERGE_MB = 2048; /// - /// Default maximum segment size. A segment of this size + /// Default maximum segment size. A segment of this size /// or larger will never be merged during . /// - public static readonly double DEFAULT_MAX_MERGE_MB_FOR_FORCED_MERGE = long.MaxValue; + public const double DEFAULT_MAX_MERGE_MB_FOR_FORCED_MERGE = long.MaxValue; /// /// Sole constructor, setting all settings to their @@ -47,8 +47,8 @@ public LogByteSizeMergePolicy() { m_minMergeSize = (long)(DEFAULT_MIN_MERGE_MB * 1024 * 1024); m_maxMergeSize = (long)(DEFAULT_MAX_MERGE_MB * 1024 * 1024); - - // .Net port, original line is inappropriate, overflows in .NET + + // .Net port, original line is inappropriate, overflows in .NET // and the property gets set to a negative value. // In Java however such statements results in long.MaxValue @@ -131,4 +131,4 @@ public virtual double MinMergeMB } } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/LogDocMergePolicy.cs b/src/Lucene.Net/Index/LogDocMergePolicy.cs index b81386d203..a11fc62905 100644 --- a/src/Lucene.Net/Index/LogDocMergePolicy.cs +++ b/src/Lucene.Net/Index/LogDocMergePolicy.cs @@ -27,7 +27,7 @@ public class LogDocMergePolicy : LogMergePolicy { /// Default minimum segment size. /// - public static readonly int DEFAULT_MIN_MERGE_DOCS = 1000; + public const int DEFAULT_MIN_MERGE_DOCS = 1000; /// /// Sole constructor, setting all settings to their @@ -65,4 +65,4 @@ public virtual int MinMergeDocs set => m_minMergeSize = value; } } -} \ No newline at end of file +} diff --git a/src/Lucene.Net/Index/LogMergePolicy.cs b/src/Lucene.Net/Index/LogMergePolicy.cs index 3946e0f48f..f8984d278b 100644 --- a/src/Lucene.Net/Index/LogMergePolicy.cs +++ b/src/Lucene.Net/Index/LogMergePolicy.cs @@ -52,25 +52,25 @@ public abstract class LogMergePolicy : MergePolicy /// log size, minus LEVEL_LOG_SPAN, and finding all /// segments falling within that range. /// - public static readonly double LEVEL_LOG_SPAN = 0.75; + public const double LEVEL_LOG_SPAN = 0.75; /// /// Default merge factor, which is how many segments are /// merged at a time /// - public static readonly int DEFAULT_MERGE_FACTOR = 10; + public const int DEFAULT_MERGE_FACTOR = 10; /// /// Default maximum segment size. A segment of this size /// or larger will never be merged. /// - public static readonly int DEFAULT_MAX_MERGE_DOCS = int.MaxValue; + public const int DEFAULT_MAX_MERGE_DOCS = int.MaxValue; /// /// Default noCFSRatio. If a merge's size is >= 10% of /// the index, then we disable compound file for it. /// - public new static readonly double DEFAULT_NO_CFS_RATIO = 0.1; + public new const double DEFAULT_NO_CFS_RATIO = 0.1; /// /// How many segments to merge at a time. diff --git a/src/Lucene.Net/Index/MergePolicy.cs b/src/Lucene.Net/Index/MergePolicy.cs index c697bbad9f..bffd0f5ebf 100644 --- a/src/Lucene.Net/Index/MergePolicy.cs +++ b/src/Lucene.Net/Index/MergePolicy.cs @@ -595,12 +595,12 @@ protected MergeAbortedException(SerializationInfo info, StreamingContext context /// Default ratio for compound file system usage. Set to 1.0, always use /// compound file system. /// - protected static readonly double DEFAULT_NO_CFS_RATIO = 1.0; + protected const double DEFAULT_NO_CFS_RATIO = 1.0; /// /// Default max segment size in order to use compound file system. Set to . /// - protected static readonly long DEFAULT_MAX_CFS_SEGMENT_SIZE = long.MaxValue; + protected const long DEFAULT_MAX_CFS_SEGMENT_SIZE = long.MaxValue; /// /// that contains this instance. diff --git a/src/Lucene.Net/Index/SegmentInfo.cs b/src/Lucene.Net/Index/SegmentInfo.cs index c643694076..da247610c5 100644 --- a/src/Lucene.Net/Index/SegmentInfo.cs +++ b/src/Lucene.Net/Index/SegmentInfo.cs @@ -42,13 +42,13 @@ public sealed class SegmentInfo /// Used by some member fields to mean not present (e.g., /// norms, deletions). /// - public static readonly int NO = -1; // e.g. no norms; no deletes; + public const int NO = -1; // e.g. no norms; no deletes; /// /// Used by some member fields to mean present (e.g., /// norms, deletions). /// - public static readonly int YES = 1; // e.g. have norms; have deletes; + public const int YES = 1; // e.g. have norms; have deletes; /// /// Unique segment name in the directory. diff --git a/src/Lucene.Net/Index/SegmentInfos.cs b/src/Lucene.Net/Index/SegmentInfos.cs index a6167f3672..346ce6f5c3 100644 --- a/src/Lucene.Net/Index/SegmentInfos.cs +++ b/src/Lucene.Net/Index/SegmentInfos.cs @@ -120,11 +120,11 @@ public sealed class SegmentInfos : IEnumerable // LUCENENET s { /// /// The file format version for the segments_N codec header, up to 4.5. - public static readonly int VERSION_40 = 0; + public const int VERSION_40 = 0; /// /// The file format version for the segments_N codec header, since 4.6+. - public static readonly int VERSION_46 = 1; + public const int VERSION_46 = 1; /// /// The file format version for the segments_N codec header, since 4.8+ @@ -139,6 +139,7 @@ public sealed class SegmentInfos : IEnumerable // LUCENENET s /// /// Current format of segments.gen + // ReSharper disable once ConvertToConstant.Global - should always be evaluated, not inlined public static readonly int FORMAT_SEGMENTS_GEN_CURRENT = FORMAT_SEGMENTS_GEN_CHECKSUM; /// diff --git a/src/Lucene.Net/Index/SortedSetDocValues.cs b/src/Lucene.Net/Index/SortedSetDocValues.cs index 0c775f2eb2..8e2264ca3b 100644 --- a/src/Lucene.Net/Index/SortedSetDocValues.cs +++ b/src/Lucene.Net/Index/SortedSetDocValues.cs @@ -41,7 +41,7 @@ protected SortedSetDocValues() /// When returned by it means there are no more /// ordinals for the document. /// - public static readonly long NO_MORE_ORDS = -1; + public const long NO_MORE_ORDS = -1; /// /// Returns the next ordinal for the current document (previously diff --git a/src/Lucene.Net/Index/TieredMergePolicy.cs b/src/Lucene.Net/Index/TieredMergePolicy.cs index a989ab18d2..4462610566 100644 --- a/src/Lucene.Net/Index/TieredMergePolicy.cs +++ b/src/Lucene.Net/Index/TieredMergePolicy.cs @@ -77,7 +77,7 @@ public class TieredMergePolicy : MergePolicy /// the index, then we disable compound file for it. /// /// - public new static readonly double DEFAULT_NO_CFS_RATIO = 0.1; + public new const double DEFAULT_NO_CFS_RATIO = 0.1; private int maxMergeAtOnce = 10; private long maxMergedSegmentBytes = 5 * 1024 * 1024 * 1024L; diff --git a/src/Lucene.Net/Store/CompoundFileDirectory.cs b/src/Lucene.Net/Store/CompoundFileDirectory.cs index 562eb8eaf4..eb8985444b 100644 --- a/src/Lucene.Net/Store/CompoundFileDirectory.cs +++ b/src/Lucene.Net/Store/CompoundFileDirectory.cs @@ -131,10 +131,10 @@ public CompoundFileDirectory(Directory directory, string fileName, IOContext con } // LUCENENET NOTE: These MUST be sbyte because they can be negative - private static readonly sbyte CODEC_MAGIC_BYTE1 = (sbyte)(CodecUtil.CODEC_MAGIC >>> 24); - private static readonly sbyte CODEC_MAGIC_BYTE2 = (sbyte)(CodecUtil.CODEC_MAGIC >>> 16); - private static readonly sbyte CODEC_MAGIC_BYTE3 = (sbyte)(CodecUtil.CODEC_MAGIC >>> 8); - private static readonly sbyte CODEC_MAGIC_BYTE4 = (sbyte)CodecUtil.CODEC_MAGIC; + private const sbyte CODEC_MAGIC_BYTE1 = (sbyte)(CodecUtil.CODEC_MAGIC >>> 24); + private const sbyte CODEC_MAGIC_BYTE2 = unchecked((sbyte)(CodecUtil.CODEC_MAGIC >>> 16)); + private const sbyte CODEC_MAGIC_BYTE3 = unchecked((sbyte)(CodecUtil.CODEC_MAGIC >>> 8)); + private const sbyte CODEC_MAGIC_BYTE4 = unchecked((sbyte)CodecUtil.CODEC_MAGIC); /// /// Helper method that reads CFS entries from an input stream diff --git a/src/Lucene.Net/Support/Util/ExceptionExtensions.cs b/src/Lucene.Net/Support/Util/ExceptionExtensions.cs index 8528498cad..ea0bf98e15 100644 --- a/src/Lucene.Net/Support/Util/ExceptionExtensions.cs +++ b/src/Lucene.Net/Support/Util/ExceptionExtensions.cs @@ -29,7 +29,7 @@ namespace Lucene.Net.Util /// public static class ExceptionExtensions { - public static readonly string SUPPRESSED_EXCEPTIONS_KEY = "Lucene_SuppressedExceptions"; + public const string SUPPRESSED_EXCEPTIONS_KEY = "Lucene_SuppressedExceptions"; public static Exception[] GetSuppressed(this Exception e) { diff --git a/src/Lucene.Net/Util/ArrayUtil.cs b/src/Lucene.Net/Util/ArrayUtil.cs index 0c7759bde7..a0c7c39cd5 100644 --- a/src/Lucene.Net/Util/ArrayUtil.cs +++ b/src/Lucene.Net/Util/ArrayUtil.cs @@ -38,6 +38,7 @@ public static class ArrayUtil // LUCENENET specific - made static /// one JVM but failed later at search time with a /// different JVM. /// + // ReSharper disable once ConvertToConstant.Global - this changes to a computed value in later versions of Lucene public static readonly int MAX_ARRAY_LENGTH = int.MaxValue - 256; /* diff --git a/src/Lucene.Net/Util/BroadWord.cs b/src/Lucene.Net/Util/BroadWord.cs index 68401c32d4..db5a5bfe30 100644 --- a/src/Lucene.Net/Util/BroadWord.cs +++ b/src/Lucene.Net/Util/BroadWord.cs @@ -144,9 +144,9 @@ public static long SmallerUpto15_16(long x, long y) /// These contain the high bit of each group of k bits. /// The suffix _L indicates the implementation. /// - public static readonly long H8_L = L8_L << 7; + public const long H8_L = L8_L << 7; - public static readonly long H16_L = L16_L << 15; + public const long H16_L = L16_L << 15; /// /// Naive implementation of , using repetitively. diff --git a/src/Lucene.Net/Util/ByteBlockPool.cs b/src/Lucene.Net/Util/ByteBlockPool.cs index a2df7defb5..37877f9d91 100644 --- a/src/Lucene.Net/Util/ByteBlockPool.cs +++ b/src/Lucene.Net/Util/ByteBlockPool.cs @@ -45,9 +45,9 @@ namespace Lucene.Net.Util /// public sealed class ByteBlockPool { - public static readonly int BYTE_BLOCK_SHIFT = 15; - public static readonly int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT; - public static readonly int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1; + public const int BYTE_BLOCK_SHIFT = 15; + public const int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT; + public const int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1; /// /// Abstract class for allocating and freeing byte diff --git a/src/Lucene.Net/Util/Constants.cs b/src/Lucene.Net/Util/Constants.cs index cd30a3e487..e236b47d84 100644 --- a/src/Lucene.Net/Util/Constants.cs +++ b/src/Lucene.Net/Util/Constants.cs @@ -44,6 +44,7 @@ public static class Constants // LUCENENET specific - made static because all me /// /// NOTE: This was JAVA_VENDOR in Lucene /// + // ReSharper disable once ConvertToConstant.Global - matches other fields in this file, and might be computed in the future public static readonly string RUNTIME_VENDOR = "Microsoft"; // AppSettings.Get("java.vendor", ""); //public static readonly string JVM_VENDOR = GetEnvironmentVariable("java.vm.vendor", ""); //public static readonly string JVM_VERSION = GetEnvironmentVariable("java.vm.version", ""); @@ -116,7 +117,7 @@ private static bool LoadRuntimeIs64Bit() // LUCENENET: Avoid static constructors { // LUCENENET NOTE: In Java, the check is for sun.misc.Unsafe.addressSize, // which is the pointer size of the current environment. We don't need to - // fallback to the OS bitness in .NET because this property is reliable and + // fallback to the OS bitness in .NET because this property is reliable and // doesn't throw exceptions. if (IntPtr.Size == 8) return true;// 64 bit machine @@ -150,6 +151,7 @@ private static string Ident(string s) /// /// This is the Lucene version for display purposes. /// + // ReSharper disable once ConvertToConstant.Global - should always be evaluated, not inlined public static readonly string LUCENE_VERSION = "4.8.0"; /// diff --git a/src/Lucene.Net/Util/Fst/FST.cs b/src/Lucene.Net/Util/Fst/FST.cs index d3b85e8734..a885d790a5 100644 --- a/src/Lucene.Net/Util/Fst/FST.cs +++ b/src/Lucene.Net/Util/Fst/FST.cs @@ -2175,7 +2175,7 @@ public FST() /// /// If arc has this label then that arc is final/accepted - public static readonly int END_LABEL = -1; + public const int END_LABEL = -1; /// /// returns true if the node at this address has any diff --git a/src/Lucene.Net/Util/IntBlockPool.cs b/src/Lucene.Net/Util/IntBlockPool.cs index 9410f444c4..c9aba490a8 100644 --- a/src/Lucene.Net/Util/IntBlockPool.cs +++ b/src/Lucene.Net/Util/IntBlockPool.cs @@ -35,17 +35,17 @@ public sealed class Int32BlockPool /// /// NOTE: This was INT_BLOCK_SHIFT in Lucene /// - public static readonly int INT32_BLOCK_SHIFT = 13; + public const int INT32_BLOCK_SHIFT = 13; /// /// NOTE: This was INT_BLOCK_SIZE in Lucene /// - public static readonly int INT32_BLOCK_SIZE = 1 << INT32_BLOCK_SHIFT; + public const int INT32_BLOCK_SIZE = 1 << INT32_BLOCK_SHIFT; /// /// NOTE: This was INT_BLOCK_MASK in Lucene /// - public static readonly int INT32_BLOCK_MASK = INT32_BLOCK_SIZE - 1; + public const int INT32_BLOCK_MASK = INT32_BLOCK_SIZE - 1; /// /// Abstract class for allocating and freeing @@ -129,7 +129,7 @@ public int[] Buffer private int[] buffer; /// - /// Current head offset. + /// Current head offset. /// /// NOTE: This was intOffset in Lucene /// @@ -315,7 +315,7 @@ private int AllocSlice(int[] slice, int sliceOffset) /// /// @lucene.internal /// - /// + /// public class SliceWriter { private int offset; @@ -439,7 +439,7 @@ public bool IsEndOfSlice } /// - /// Reads the next from the current slice and returns it. + /// Reads the next from the current slice and returns it. /// /// NOTE: This was readInt() in Lucene /// @@ -486,4 +486,4 @@ private void NextSlice() } } } -} \ No newline at end of file +}