"Fossies" - the Fresh Open Source Software Archive

Member "elasticsearch-6.8.23/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml" (29 Dec 2021, 3825 Bytes) of package /linux/www/elasticsearch-6.8.23-src.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Ansible YAML source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file.

    1 ---
    2 "Custom normalizer with illegal filter in request":
    3     # Tests analyze api with normalizer. This is in the analysis-common module
    4     # because there are no filters that support multiTermAware
    5     - skip:
    6         version: " - 5.99.99"
    7         reason:  normalizer support in 6.0.0
    8     - do:
    9         catch: bad_request
   10         indices.analyze:
   11           body:
   12             text: ABc
   13             explain: true
   14             filter: [word_delimiter]
   15 
   16     - match: { status: 400 }
   17     - match: { error.type: illegal_argument_exception }
   18     - match: { error.reason: "Custom normalizer may not use filter [word_delimiter]" }
   19 
   20 ---
   21 "htmlStrip_deprecated":
   22     - skip:
   23         version: " - 6.2.99"
   24         reason: deprecated in 6.3
   25         features: "warnings"
   26 
   27     - do:
   28         indices.create:
   29           index: test_deprecated_htmlstrip
   30           body:
   31             settings:
   32               index:
   33                 analysis:
   34                   analyzer:
   35                     my_htmlStripWithCharfilter:
   36                       tokenizer: keyword
   37                       char_filter: ["htmlStrip"]
   38             mappings:
   39               type:
   40                 properties:
   41                   name:
   42                     type: text
   43                     analyzer: my_htmlStripWithCharfilter
   44 
   45     - do:
   46         warnings:
   47           - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.'
   48         index:
   49           index:   test_deprecated_htmlstrip
   50           type:    type
   51           id:      1
   52           body:    { "name": "foo bar" }
   53 
   54     - do:
   55         warnings:
   56           - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.'
   57         index:
   58           index:   test_deprecated_htmlstrip
   59           type:    type
   60           id:      2
   61           body:    { "name": "foo baz" }
   62 
   63     - do:
   64         warnings:
   65           - 'The [htmpStrip] char filter name is deprecated and will be removed in a future version. Please change the filter name to [html_strip] instead.'
   66         indices.analyze:
   67           index: test_deprecated_htmlstrip
   68           body:
   69             analyzer: "my_htmlStripWithCharfilter"
   70             text: "<html>foo</html>"
   71     - length: { tokens: 1 }
   72     - match:  { tokens.0.token: "\nfoo\n" }
   73 
   74 ---
   75 "Synonym filter with tokenizer":
   76     - skip:
   77         version: " - 5.99.99"
   78         reason: to support synonym same analysis chain were added in 6.0.0
   79     - do:
   80         indices.create:
   81           index: test_synonym
   82           body:
   83             settings:
   84               index:
   85                 analysis:
   86                   tokenizer:
   87                     trigram:
   88                       type: nGram
   89                       min_gram: 3
   90                       max_gram: 3
   91                   filter:
   92                     synonym:
   93                       type: synonym
   94                       synonyms: ["kimchy => shay"]
   95 
   96     - do:
   97         indices.analyze:
   98           index: test_synonym
   99           body:
  100             tokenizer: trigram
  101             filter: [synonym]
  102             text: kimchy
  103     - length: { tokens: 2 }
  104     - match:  { tokens.0.token: sha }
  105     - match:  { tokens.1.token: hay }
  106 
  107 ---
  108 "Custom normalizer in request":
  109     - skip:
  110         version: " - 5.99.99"
  111         reason:  normalizer support in 6.0.0
  112     - do:
  113         indices.analyze:
  114           body:
  115             text: ABc
  116             explain: true
  117             filter: ["lowercase"]
  118 
  119     - length: { detail.tokenizer.tokens: 1 }
  120     - length: { detail.tokenfilters.0.tokens: 1 }
  121     - match:  { detail.tokenizer.name: keyword_for_normalizer }
  122     - match:  { detail.tokenizer.tokens.0.token: ABc }
  123     - match:  { detail.tokenfilters.0.name: lowercase }
  124     - match:  { detail.tokenfilters.0.tokens.0.token: abc }