Skip to content

Commit

Permalink
Backporting #1488 spotless check on plugins (#1489)
Browse files Browse the repository at this point in the history
Signed-off-by: Himanshu Setia <setiah@amazon.com>
  • Loading branch information
setiah authored Nov 2, 2021
1 parent 9182939 commit c6e79ef
Show file tree
Hide file tree
Showing 198 changed files with 4,867 additions and 3,339 deletions.
6 changes: 5 additions & 1 deletion buildSrc/src/main/resources/checkstyle_suppressions.xml
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,15 @@
<suppress files="libs" checks="." />
<!-- Excludes checkstyle run on modules module -->
<suppress files="modules" checks="." />
<!-- Excludes checkstyle run on plugins module -->
<suppress files="plugins" checks="." />
<!-- Excludes checkstyle run on below qa module -->
<suppress files="qa[/\\]die-with-dignity" checks="." />
<!-- Excludes checkstyle run on rest-api-spec module -->
<suppress files="rest-api-spec" checks="." />
<!-- Excludes checkstyle run on test module -->
<suppress files="test" checks="." />

<!--
Truly temporary suppressions suppression of snippets included in
documentation that are so wide that they scroll.
Expand Down
24 changes: 1 addition & 23 deletions gradle/formatting.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -56,29 +56,7 @@ import org.opensearch.gradle.BuildPlugin
*/

// Do not add new sub-projects here!
def projectPathsToExclude = [
':plugins:analysis-icu',
':plugins:analysis-kuromoji',
':plugins:analysis-nori',
':plugins:analysis-phonetic',
':plugins:analysis-smartcn',
':plugins:analysis-stempel',
':plugins:analysis-ukrainian',
':plugins:discovery-azure-classic',
':plugins:discovery-ec2',
':plugins:discovery-gce',
':plugins:ingest-attachment',
':plugins:mapper-annotated-text',
':plugins:mapper-murmur3',
':plugins:mapper-size',
':plugins:repository-azure',
':plugins:repository-gcs',
':plugins:repository-hdfs',
':plugins:repository-s3',
':plugins:store-smb',
':plugins:transport-nio',
':qa:die-with-dignity'
]
def projectPathsToExclude = []

subprojects {
plugins.withType(BuildPlugin).whenPluginAdded {
Expand Down

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -81,42 +81,40 @@
*/
@Deprecated
public final class ICUCollationKeyFilter extends TokenFilter {
private Collator collator = null;
private RawCollationKey reusableKey = new RawCollationKey();
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private Collator collator = null;
private RawCollationKey reusableKey = new RawCollationKey();
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

/**
*
* @param input Source token stream
* @param collator CollationKey generator
*/
public ICUCollationKeyFilter(TokenStream input, Collator collator) {
super(input);
// clone the collator: see http://userguide.icu-project.org/collation/architecture
try {
this.collator = (Collator) collator.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
/**
*
* @param input Source token stream
* @param collator CollationKey generator
*/
public ICUCollationKeyFilter(TokenStream input, Collator collator) {
super(input);
// clone the collator: see http://userguide.icu-project.org/collation/architecture
try {
this.collator = (Collator) collator.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
}
}
}

@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char[] termBuffer = termAtt.buffer();
String termText = new String(termBuffer, 0, termAtt.length());
collator.getRawCollationKey(termText, reusableKey);
int encodedLength = IndexableBinaryStringTools.getEncodedLength(
reusableKey.bytes, 0, reusableKey.size);
if (encodedLength > termBuffer.length) {
termAtt.resizeBuffer(encodedLength);
}
termAtt.setLength(encodedLength);
IndexableBinaryStringTools.encode(reusableKey.bytes, 0, reusableKey.size,
termAtt.buffer(), 0, encodedLength);
return true;
} else {
return false;
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
char[] termBuffer = termAtt.buffer();
String termText = new String(termBuffer, 0, termAtt.length());
collator.getRawCollationKey(termText, reusableKey);
int encodedLength = IndexableBinaryStringTools.getEncodedLength(reusableKey.bytes, 0, reusableKey.size);
if (encodedLength > termBuffer.length) {
termAtt.resizeBuffer(encodedLength);
}
termAtt.setLength(encodedLength);
IndexableBinaryStringTools.encode(reusableKey.bytes, 0, reusableKey.size, termAtt.buffer(), 0, encodedLength);
return true;
} else {
return false;
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,15 @@ public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment,
String method = settings.get("method", "nfkc_cf");
String mode = settings.get("mode", "compose");
if (!"compose".equals(mode) && !"decompose".equals(mode)) {
throw new IllegalArgumentException("Unknown mode [" + mode + "] in analyzer [" + name +
"], expected one of [compose, decompose]");
throw new IllegalArgumentException(
"Unknown mode [" + mode + "] in analyzer [" + name + "], expected one of [compose, decompose]"
);
}
Normalizer2 normalizer = Normalizer2.getInstance(
null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
null,
method,
"compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE
);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, normalizer, settings);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUFoldingFilter}.
* Applies foldings from UTR#30 Character Foldings.
Expand All @@ -57,7 +56,10 @@
public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
/** Store here the same Normalizer used by the lucene ICUFoldingFilter */
private static final Normalizer2 ICU_FOLDING_NORMALIZER = Normalizer2.getInstance(
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), "utr30", Normalizer2.Mode.COMPOSE);
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"),
"utr30",
Normalizer2.Mode.COMPOSE
);

private final Normalizer2 normalizer;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@

package org.opensearch.index.analysis;


import com.ibm.icu.text.Normalizer2;

import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter;
Expand All @@ -42,7 +41,6 @@

import java.io.Reader;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character.
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
Expand All @@ -61,7 +59,10 @@ public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment e
mode = "compose";
}
Normalizer2 normalizer = Normalizer2.getInstance(
null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
null,
method,
"compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE
);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(indexSettings, normalizer, settings);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,14 @@
import org.opensearch.env.Environment;
import org.opensearch.index.IndexSettings;


/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens.
* <p>The {@code name} can be used to provide the type of normalization to perform.</p>
* <p>The {@code unicodeSetFilter} attribute can be used to provide the UniCodeSet for filtering.</p>
*/
public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {

private static final DeprecationLogger deprecationLogger =
DeprecationLogger.getLogger(IcuNormalizerTokenFilterFactory.class);
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IcuNormalizerTokenFilterFactory.class);

private final Normalizer2 normalizer;

Expand All @@ -67,14 +65,14 @@ public TokenStream create(TokenStream tokenStream) {
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
}

static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings,
final Normalizer2 normalizer,
final Settings settings) {
static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings, final Normalizer2 normalizer, final Settings settings) {
String unicodeSetFilter = settings.get("unicodeSetFilter");
if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) {
if (unicodeSetFilter != null) {
deprecationLogger.deprecate("icu_normalizer_unicode_set_filter",
"[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]");
deprecationLogger.deprecate(
"icu_normalizer_unicode_set_filter",
"[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]"
);
} else {
unicodeSetFilter = settings.get("unicode_set_filter");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public IcuTokenizerFactory(IndexSettings indexSettings, Environment environment,
public Tokenizer create() {
if (config == null) {
return new ICUTokenizer();
}else{
} else {
return new ICUTokenizer(config);
}
}
Expand Down Expand Up @@ -117,14 +117,11 @@ public RuleBasedBreakIterator getBreakIterator(int script) {
}
}

//parse a single RBBi rule file
// parse a single RBBi rule file
private BreakIterator parseRules(String filename, Environment env) throws IOException {

final Path path = env.configFile().resolve(filename);
String rules = Files.readAllLines(path)
.stream()
.filter((v) -> v.startsWith("#") == false)
.collect(Collectors.joining("\n"));
String rules = Files.readAllLines(path).stream().filter((v) -> v.startsWith("#") == false).collect(Collectors.joining("\n"));

return new RuleBasedBreakIterator(rules.toString());
}
Expand Down
Loading

0 comments on commit c6e79ef

Please sign in to comment.