--- "Custom normalizer with illegal filter in request": # Tests analyze api with normalizer. This is in the analysis-common module # because there are no filters that support multiTermAware - skip: version: " - 5.99.99" reason: normalizer support in 6.0.0 - do: catch: bad_request indices.analyze: body: text: ABc explain: true filter: [word_delimiter] - match: { status: 400 } - match: { error.type: illegal_argument_exception } - match: { error.reason: "Custom normalizer may not use filter [word_delimiter]" } --- "htmlStrip_deprecated": - skip: version: " - 6.2.99" reason: deprecated in 6.3 features: "warnings" - do: indices.create: index: test_deprecated_htmlstrip body: settings: index: analysis: analyzer: my_htmlStripWithCharfilter: tokenizer: keyword char_filter: ["htmlStrip"] mappings: type: properties: name: type: text analyzer: my_htmlStripWithCharfilter - do: warnings: - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' index: index: test_deprecated_htmlstrip type: type id: 1 body: { "name": "foo bar" } - do: warnings: - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' index: index: test_deprecated_htmlstrip type: type id: 2 body: { "name": "foo baz" } - do: warnings: - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.' indices.analyze: index: test_deprecated_htmlstrip body: analyzer: "my_htmlStripWithCharfilter" text: "foo" - length: { tokens: 1 } - match: { tokens.0.token: "\nfoo\n" } --- "Synonym filter with tokenizer": - skip: version: " - 5.99.99" reason: to support synonym same analysis chain were added in 6.0.0 - do: indices.create: index: test_synonym body: settings: index: analysis: tokenizer: trigram: type: nGram min_gram: 3 max_gram: 3 filter: synonym: type: synonym synonyms: ["kimchy => shay"] - do: indices.analyze: index: test_synonym body: tokenizer: trigram filter: [synonym] text: kimchy - length: { tokens: 2 } - match: { tokens.0.token: sha } - match: { tokens.1.token: hay } --- "Custom normalizer in request": - skip: version: " - 5.99.99" reason: normalizer support in 6.0.0 - do: indices.analyze: body: text: ABc explain: true filter: ["lowercase"] - length: { detail.tokenizer.tokens: 1 } - length: { detail.tokenfilters.0.tokens: 1 } - match: { detail.tokenizer.name: keyword_for_normalizer } - match: { detail.tokenizer.tokens.0.token: ABc } - match: { detail.tokenfilters.0.name: lowercase } - match: { detail.tokenfilters.0.tokens.0.token: abc }