Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enabling spotless, disabling checkstyle check on plugins #1488

Merged
merged 5 commits into from
Nov 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion buildSrc/src/main/resources/checkstyle_suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,14 @@
<suppress files="libs" checks="." />
<!-- Excludes checkstyle run on modules module -->
<suppress files="modules" checks="." />
<!-- Excludes checkstyle run on plugins module -->
<suppress files="plugins" checks="." />
<!-- Excludes checkstyle run on below qa module -->
<suppress files="qa[/\\]die-with-dignity" checks="." />
<!-- Excludes checkstyle run on test module -->
<suppress files="test" checks="." />
<!-- Excludes checkstyle run on rest-api-spec module -->
<suppress files="rest-api-spec" checks="." />

<!--
Truly temporary suppressions suppression of snippets included in
documentation that are so wide that they scroll.
Expand Down
24 changes: 1 addition & 23 deletions gradle/formatting.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -56,29 +56,7 @@ import org.opensearch.gradle.BuildPlugin
*/

// Do not add new sub-projects here!
def projectPathsToExclude = [
':plugins:analysis-icu',
':plugins:analysis-kuromoji',
':plugins:analysis-nori',
':plugins:analysis-phonetic',
':plugins:analysis-smartcn',
':plugins:analysis-stempel',
':plugins:analysis-ukrainian',
':plugins:discovery-azure-classic',
':plugins:discovery-ec2',
':plugins:discovery-gce',
':plugins:ingest-attachment',
':plugins:mapper-annotated-text',
':plugins:mapper-murmur3',
':plugins:mapper-size',
':plugins:repository-azure',
':plugins:repository-gcs',
':plugins:repository-hdfs',
':plugins:repository-s3',
':plugins:store-smb',
':plugins:transport-nio',
':qa:die-with-dignity'
]
def projectPathsToExclude = []
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Love it!

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Finally! 😁


subprojects {
plugins.withType(BuildPlugin).whenPluginAdded {
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -81,42 +81,40 @@
*/
@Deprecated
public final class ICUCollationKeyFilter extends TokenFilter {
private Collator collator = null;
private RawCollationKey reusableKey = new RawCollationKey();
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private Collator collator = null;
private RawCollationKey reusableKey = new RawCollationKey();
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

/**
*
* @param input Source token stream
* @param collator CollationKey generator
*/
public ICUCollationKeyFilter(TokenStream input, Collator collator) {
super(input);
// clone the collator: see http://userguide.icu-project.org/collation/architecture
try {
this.collator = (Collator) collator.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
/**
*
* @param input Source token stream
* @param collator CollationKey generator
*/
public ICUCollationKeyFilter(TokenStream input, Collator collator) {
super(input);
// clone the collator: see http://userguide.icu-project.org/collation/architecture
try {
this.collator = (Collator) collator.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
}

@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char[] termBuffer = termAtt.buffer();
String termText = new String(termBuffer, 0, termAtt.length());
collator.getRawCollationKey(termText, reusableKey);
int encodedLength = IndexableBinaryStringTools.getEncodedLength(
reusableKey.bytes, 0, reusableKey.size);
if (encodedLength > termBuffer.length) {
termAtt.resizeBuffer(encodedLength);
}
termAtt.setLength(encodedLength);
IndexableBinaryStringTools.encode(reusableKey.bytes, 0, reusableKey.size,
termAtt.buffer(), 0, encodedLength);
return true;
} else {
return false;
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char[] termBuffer = termAtt.buffer();
String termText = new String(termBuffer, 0, termAtt.length());
collator.getRawCollationKey(termText, reusableKey);
int encodedLength = IndexableBinaryStringTools.getEncodedLength(reusableKey.bytes, 0, reusableKey.size);
if (encodedLength > termBuffer.length) {
termAtt.resizeBuffer(encodedLength);
}
termAtt.setLength(encodedLength);
IndexableBinaryStringTools.encode(reusableKey.bytes, 0, reusableKey.size, termAtt.buffer(), 0, encodedLength);
return true;
} else {
return false;
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,15 @@ public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment,
String method = settings.get("method", "nfkc_cf");
String mode = settings.get("mode", "compose");
if (!"compose".equals(mode) && !"decompose".equals(mode)) {
throw new IllegalArgumentException("Unknown mode [" + mode + "] in analyzer [" + name +
"], expected one of [compose, decompose]");
throw new IllegalArgumentException(
"Unknown mode [" + mode + "] in analyzer [" + name + "], expected one of [compose, decompose]"
);
}
Normalizer2 normalizer = Normalizer2.getInstance(
null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
null,
method,
"compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE
);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, normalizer, settings);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUFoldingFilter}.
* Applies foldings from UTR#30 Character Foldings.
Expand All @@ -57,7 +56,10 @@
public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
/** Store here the same Normalizer used by the lucene ICUFoldingFilter */
private static final Normalizer2 ICU_FOLDING_NORMALIZER = Normalizer2.getInstance(
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), "utr30", Normalizer2.Mode.COMPOSE);
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"),
"utr30",
Normalizer2.Mode.COMPOSE
);

private final Normalizer2 normalizer;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@

package org.opensearch.index.analysis;


import com.ibm.icu.text.Normalizer2;

import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter;
Expand All @@ -42,7 +41,6 @@

import java.io.Reader;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character.
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
Expand All @@ -61,7 +59,10 @@ public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment e
mode = "compose";
}
Normalizer2 normalizer = Normalizer2.getInstance(
null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
null,
method,
"compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE
);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, normalizer, settings);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,14 @@
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens.
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
*/
public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {

private static final DeprecationLogger deprecationLogger =
DeprecationLogger.getLogger(IcuNormalizerTokenFilterFactory.class);
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IcuNormalizerTokenFilterFactory.class);

private final Normalizer2 normalizer;

Expand All @@ -67,14 +65,14 @@ public TokenStream create(TokenStream tokenStream) {
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
}

static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings,
final Normalizer2 normalizer,
final Settings settings) {
static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings, final Normalizer2 normalizer, final Settings settings) {
String unicodeSetFilter = settings.get("unicodeSetFilter");
if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) {
if (unicodeSetFilter != null) {
deprecationLogger.deprecate("icu_normalizer_unicode_set_filter",
"[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]");
deprecationLogger.deprecate(
"icu_normalizer_unicode_set_filter",
"[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]"
);
} else {
unicodeSetFilter = settings.get("unicode_set_filter");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment,
public Tokenizer create() {
if (config == null) {
return new ICUTokenizer();
}else{
} else {
return new ICUTokenizer(config);
}
}
Expand Down Expand Up @@ -117,14 +117,11 @@ public RuleBasedBreakIterator getBreakIterator(int script) {
}
}

//parse a single RBBi rule file
// parse a single RBBi rule file
private BreakIterator parseRules(String filename, Environment env) throws IOException {

final Path path = env.configFile().resolve(filename);
String rules = Files.readAllLines(path)
.stream()
.filter((v) -> v.startsWith("#") == false)
.collect(Collectors.joining("\n"));
String rules = Files.readAllLines(path).stream().filter((v) -> v.startsWith("#") == false).collect(Collectors.joining("\n"));

return new RuleBasedBreakIterator(rules.toString());
}
Expand Down
Loading