summaryrefslogtreecommitdiffstats
path: root/Bugzilla/Elastic/Indexer.pm
diff options
context:
space:
mode:
authorDylan William Hardison <dylan@hardison.net>2017-03-14 00:23:22 +0100
committerDylan William Hardison <dylan@hardison.net>2017-03-14 16:13:30 +0100
commitac85576a8799ec12036bfb8bb93ac48f96830f1f (patch)
tree769db0131ede6f433594da5286b552a694cd6d4e /Bugzilla/Elastic/Indexer.pm
parentc56b9339af827fea16217832d93b266a27294acf (diff)
downloadbugzilla-ac85576a8799ec12036bfb8bb93ac48f96830f1f.tar.gz
bugzilla-ac85576a8799ec12036bfb8bb93ac48f96830f1f.tar.xz
Revert "Bug 1307485 - Add code to run a subset of buglist.cgi search queries against the ES backend"
Diffstat (limited to 'Bugzilla/Elastic/Indexer.pm')
-rw-r--r--Bugzilla/Elastic/Indexer.pm29
1 files changed, 17 insertions, 12 deletions
diff --git a/Bugzilla/Elastic/Indexer.pm b/Bugzilla/Elastic/Indexer.pm
index dd71a7198..82f946af9 100644
--- a/Bugzilla/Elastic/Indexer.pm
+++ b/Bugzilla/Elastic/Indexer.pm
@@ -23,7 +23,7 @@ has 'mtime' => (
has 'shadow_dbh' => ( is => 'lazy' );
has 'debug_sql' => (
- is => 'ro',
+ is => 'ro',
default => 0,
);
@@ -40,24 +40,24 @@ sub create_index {
index => $self->index_name,
body => {
settings => {
- number_of_shards => 2,
+ number_of_shards => 1,
analysis => {
- filter => {
- asciifolding_original => {
- type => "asciifolding",
- preserve_original => \1,
- },
- },
analyzer => {
folding => {
+ type => 'standard',
tokenizer => 'standard',
- filter => ['standard', 'lowercase', 'asciifolding_original'],
+ filter => [ 'lowercase', 'asciifolding' ]
},
bz_text_analyzer => {
type => 'standard',
filter => ['lowercase', 'stop'],
max_token_length => '20'
},
+ bz_substring_analyzer => {
+ type => 'custom',
+ filter => ['lowercase'],
+ tokenizer => 'bz_ngram_tokenizer',
+ },
bz_equals_analyzer => {
type => 'custom',
filter => ['lowercase'],
@@ -71,20 +71,25 @@ sub create_index {
whiteboard_shingle_words => {
type => 'custom',
tokenizer => 'whiteboard_words_pattern',
- filter => ['stop', 'shingle', 'lowercase']
+ filter => ['stop', 'shingle']
},
whiteboard_tokens => {
type => 'custom',
tokenizer => 'whiteboard_tokens_pattern',
- filter => ['stop', 'lowercase']
+ filter => ['stop']
},
whiteboard_shingle_tokens => {
type => 'custom',
tokenizer => 'whiteboard_tokens_pattern',
- filter => ['stop', 'shingle', 'lowercase']
+ filter => ['stop', 'shingle']
}
},
tokenizer => {
+ bz_ngram_tokenizer => {
+ type => 'nGram',
+ min_ngram => 2,
+ max_ngram => 25,
+ },
whiteboard_tokens_pattern => {
type => 'pattern',
pattern => '\\s*([,;]*\\[|\\][\\s\\[]*|[;,])\\s*'