Demo地址:https://github.com/UserFengFeng/Lucene-Maven.git
伸手党======>> Luke、IKAnalyzer7.2.0.jar:
1
2
3
4 1链接:https://pan.baidu.com/s/1vaifZeSG5Uj5HmSYU89GXQ
2提取码:dbnm
3复制这段内容后打开百度网盘手机App,操作更方便哦
4
关于它的介绍,请自行百度,不过多解释。
全文检索首先将要查询的目标文档中的词提取出来,组成索引(相当于书的目录),通过查询索引达到搜索目标文档的目的,这种先建立索引,在对索引进行搜索的过程叫做全文检索(Full-textSearch)。
有两个概念叫:正排索引,倒排索引
Lucene是apache下的一个开源的全文检索引擎工具包,提供了完整的查询引擎和索引引擎,部分文本分析引擎。Lucene的目的是为软件开发人员提供一个简单易用的工具包,以方便的在目标系统中实现全文检索的功能。
Lucene和搜索引擎不同,Lucene是一套用java或其他语言写的全文检索的工具包,为应用程序提供了很多个api接口去调用,可以简单理解为是一套实现全文检索的类库,搜索引擎是一个全文检索系统,它是一个单独运行的软件。
Lucene和luke结合使用。
Demo: Maven+Lucene
1.目录结构:
2.pom.xml文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 1<?xml version="1.0" encoding="UTF-8"?>
2
3<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
4 xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
5 <modelVersion>4.0.0</modelVersion>
6
7 <groupId>day01</groupId>
8 <artifactId>day01</artifactId>
9 <version>1.0-SNAPSHOT</version>
10 <packaging>war</packaging>
11
12 <name>day01 Maven Webapp</name>
13 <!-- FIXME change it to the project's website -->
14 <url>http://www.example.com</url>
15
16 <properties>
17 <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
18 <maven.compiler.source>1.7</maven.compiler.source>
19 <maven.compiler.target>1.7</maven.compiler.target>
20 </properties>
21
22 <dependencies>
23 <dependency>
24 <groupId>junit</groupId>
25 <artifactId>junit</artifactId>
26 <version>4.11</version>
27 <scope>test</scope>
28 </dependency>
29
30 <dependency>
31 <groupId>org.apache.lucene</groupId>
32 <artifactId>lucene-core</artifactId>
33 <version>7.2.0</version>
34 </dependency>
35 <!--一般分词器,适用于英文分词-->
36 <dependency>
37 <groupId>org.apache.lucene</groupId>
38 <artifactId>lucene-analyzers-common</artifactId>
39 <version>7.2.0</version>
40 </dependency>
41 <!--中文分词器-->
42 <dependency>
43 <groupId>org.apache.lucene</groupId>
44 <artifactId>lucene-analyzers-smartcn</artifactId>
45 <version>7.2.0</version>
46 </dependency>
47
48 <!--对分词索引查询解析-->
49 <dependency>
50 <groupId>org.apache.lucene</groupId>
51 <artifactId>lucene-queryparser</artifactId>
52 <version>7.2.0</version>
53 </dependency>
54 <!--检索关键字高亮显示-->
55 <dependency>
56 <groupId>org.apache.lucene</groupId>
57 <artifactId>lucene-highlighter</artifactId>
58 <version>7.2.0</version>
59 </dependency>
60
61 <!-- https://mvnrepository.com/artifact/com.janeluo/ikanalyzer -->
62 <dependency>
63 <groupId>com.janeluo</groupId>
64 <artifactId>ikanalyzer</artifactId>
65 <version>2012_u6</version>
66 </dependency>
67 </dependencies>
68
69 <build>
70 <finalName>day01</finalName>
71 <pluginManagement><!-- lock down plugins versions to avoid using Maven defaults (may be moved to parent pom) -->
72 <plugins>
73 <plugin>
74 <artifactId>maven-clean-plugin</artifactId>
75 <version>3.1.0</version>
76 </plugin>
77 <!-- see http://maven.apache.org/ref/current/maven-core/default-bindings.html#Plugin_bindings_for_war_packaging -->
78 <plugin>
79 <artifactId>maven-resources-plugin</artifactId>
80 <version>3.0.2</version>
81 </plugin>
82 <plugin>
83 <artifactId>maven-compiler-plugin</artifactId>
84 <version>3.8.0</version>
85 </plugin>
86 <plugin>
87 <artifactId>maven-surefire-plugin</artifactId>
88 <version>2.22.1</version>
89 </plugin>
90 <plugin>
91 <artifactId>maven-war-plugin</artifactId>
92 <version>3.2.2</version>
93 </plugin>
94 <plugin>
95 <artifactId>maven-install-plugin</artifactId>
96 <version>2.5.2</version>
97 </plugin>
98 <plugin>
99 <artifactId>maven-deploy-plugin</artifactId>
100 <version>2.8.2</version>
101 </plugin>
102 </plugins>
103 </pluginManagement>
104 </build>
105</project>
106
3.Test类
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 1package zhou;
2
3import org.apache.lucene.analysis.Analyzer;
4import org.apache.lucene.analysis.standard.StandardAnalyzer;
5import org.apache.lucene.document.Document;
6import org.apache.lucene.document.Field;
7import org.apache.lucene.document.StringField;
8import org.apache.lucene.document.TextField;
9import org.apache.lucene.index.IndexWriter;
10import org.apache.lucene.index.IndexWriterConfig;
11import org.apache.lucene.queries.function.valuesource.LongFieldSource;
12import org.apache.lucene.store.FSDirectory;
13import org.junit.Before;
14import org.junit.Test;
15
16import java.awt.*;
17import java.io.*;
18import java.nio.file.Path;
19import java.nio.file.Paths;
20
21import static org.apache.lucene.document.Field.Store.YES;
22
23public class LuceneTest {
24 @Before
25 public void setUp() throws Exception{
26 }
27
28 /*
29 * 导入索引
30 * */
31 @Test
32 public void importIndex() throws IOException {
33 // 获得索引库的位置
34 // 项目路径下创建索引库的文件夹index_loc
35 Path path = Paths.get("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\index_loc");
36 // 打开索引库
37 FSDirectory dir = FSDirectory.open(path);
38 // 创建分词器
39 Analyzer al = new StandardAnalyzer();
40 // 创建索引的写入的配置对象
41 IndexWriterConfig iwc = new IndexWriterConfig(al);
42 // 创建索引的Writer
43 IndexWriter iw = new IndexWriter(dir, iwc);
44 /*
45 * 采集原始文档
46 * 创建searchsource文件,放入原始文档文件
47 * */
48 File sourceFile = new File("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\searchsource");
49 // 获得文件夹下的所有文件
50 File[] files = sourceFile.listFiles();
51 // 遍历每一个文件
52 for(File file : files) {
53 // 获得file的属性
54 String fileName = file.getName();
55
56 FileInputStream inputStream = new FileInputStream(file);
57 InputStreamReader streamReader = new InputStreamReader(inputStream);
58 BufferedReader reader = new BufferedReader(streamReader);
59 String line;
60 // StringBuilder builder = new StringBuilder();
61 String content = null;
62 while ((line =reader.readLine()) != null) {
63 // builder.append(line);
64 content += line;
65 }
66 reader.close();
67 inputStream.close();
68 String path1 = file.getPath();
69
70 // StringField不分词
71 Field fName = new StringField("fileName", fileName, YES);
72 Field fcontent = new TextField("content", content, YES);
73 Field fsize = new TextField("size", "1024", YES);
74 Field fpath = new TextField("path", path1, YES);
75 // 创建文档对象
76 Document document = new Document();
77 // 把域加入到文档中
78 document.add(fName);
79 document.add(fcontent);
80 document.add(fsize);
81 document.add(fpath);
82 // 把文档写入到索引库
83 iw.addDocument(document);
84 }
85 // 提交
86 iw.commit();
87 iw.close();
88 }
89}
90
4.Field常用类型
注意:LongField分析会乱码,也能被分析,但是意义不大。
5.分词器介绍
6.分词器案例
Test类:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50 1package zhou;
2
3import org.apache.lucene.analysis.Analyzer;
4import org.apache.lucene.analysis.TokenStream;
5import org.apache.lucene.analysis.standard.StandardAnalyzer;
6import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
7import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
8import org.apache.lucene.document.Document;
9import org.apache.lucene.document.Field;
10import org.apache.lucene.document.StringField;
11import org.apache.lucene.document.TextField;
12import org.apache.lucene.index.IndexWriter;
13import org.apache.lucene.index.IndexWriterConfig;
14import org.apache.lucene.store.FSDirectory;
15import org.junit.Before;
16import org.junit.Test;
17
18import java.io.*;
19import java.nio.file.Path;
20import java.nio.file.Paths;
21
22import static org.apache.lucene.document.Field.Store.YES;
23
24public class LuceneTest1 {
25 @Before
26 public void setUp() throws Exception {
27 }
28
29 @Test
30 public void importIndex() throws IOException {
31 // 创建分词器(对中文分词不太良好)
32 // StandardAnalyzer al = new StandardAnalyzer();
33 Analyzer al = new CJKAnalyzer();
34 // 分词
35 TokenStream stream = al.tokenStream("content", "Serving web content with spring mvc");
36 // 分词对象的重置
37 stream.reset();
38 // 获得每一个语汇的偏移量属性对象
39 OffsetAttribute oa = stream.addAttribute(OffsetAttribute.class);
40 // 获得分词的语汇属性
41 CharTermAttribute ca = stream.addAttribute(CharTermAttribute.class);
42 // 遍历分词的语汇流
43 while (stream.incrementToken()) {
44 System.out.println("------------------");
45 System.out.println("开始索引" + oa.startOffset() + "结束索引" + oa.endOffset());
46 System.out.println(ca);
47 }
48 }
49}
50
** 7.IKAnalyzer中文分词器**
注意:该jar需要另外导入,试了好几个版本的Maven在线导入都不行,只能换离线了
引入IKAnalyzer的jar包及配置文件:https://pan.baidu.com/s/1SrKHlv_YSKy8ffb28ZFtbQ
目录结构:
配置文件的名称不能随便改,因为它的源码里面是写死的,不然会抛出异常。
1
2
3
4
5
6
7
8
9
10
11
12
13 1java.lang.RuntimeException: Main Dictionary not found!!!
2
3 at org.wltea.analyzer.dic.Dictionary.loadMainDict(Dictionary.java:200)
4 at org.wltea.analyzer.dic.Dictionary.<init>(Dictionary.java:69)
5 at org.wltea.analyzer.dic.Dictionary.initial(Dictionary.java:86)
6 at org.wltea.analyzer.core.IKSegmenter.init(IKSegmenter.java:85)
7 at org.wltea.analyzer.core.IKSegmenter.<init>(IKSegmenter.java:65)
8 at org.wltea.analyzer.lucene.IKTokenizer.<init>(IKTokenizer.java:78)
9 at org.wltea.analyzer.lucene.IKTokenizer.<init>(IKTokenizer.java:64)
10 at org.wltea.analyzer.lucene.IKAnalyzer.createComponents(IKAnalyzer.java:64)
11 at org.apache.lucene.analysis.Analyzer.tokenStream(Analyzer.java:198)
12 at zhou.LuceneTest1.importAnalyzer(LuceneTest1.java:29)...
13
源码固定写死位置(也可进行更改源码自行定义):
Test类
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44 1package zhou;
2
3import org.apache.lucene.analysis.Analyzer;
4import org.apache.lucene.analysis.TokenStream;
5import org.apache.lucene.analysis.cjk.CJKAnalyzer;
6import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
7import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
8import org.junit.Before;
9import org.junit.Test;
10import org.wltea.analyzer.core.IKSegmenter;
11import org.wltea.analyzer.core.Lexeme;
12import org.wltea.analyzer.lucene.IKAnalyzer;
13
14import java.io.*;
15
16public class LuceneTest1 {
17 @Before
18 public void setUp() throws Exception {
19 }
20
21 @Test
22 public void importAnalyzer() throws IOException {
23 // 创建分词器
24 // StandardAnalyzer al = new StandardAnalyzer();
25 // Analyzer al = new CJKAnalyzer();
26 Analyzer al = new IKAnalyzer();
27
28 // 分词
29 TokenStream stream = al.tokenStream("content", "当前市场不稳定,得赶紧稳盘抛出。");
30 // 分词对象的重置
31 stream.reset();
32 // 获得每一个语汇的偏移量属性对象
33 OffsetAttribute oa = stream.addAttribute(OffsetAttribute.class);
34 // 获得分词的语汇属性
35 CharTermAttribute ca = stream.addAttribute(CharTermAttribute.class);
36 // 遍历分词的语汇流
37 while (stream.incrementToken()) {
38 System.out.println("------------------");
39 System.out.println("开始索引" + oa.startOffset() + "结束索引" + oa.endOffset());
40 System.out.println(ca);
41 }
42 }
43}
44
8.Luke的使用(管理索引库)
9.添加索引
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81 1public class LuceneTest2 {
2 @Before
3 public void setUp() throws Exception {
4 }
5
6 /*
7 * 导入索引
8 * */
9 @Test
10 public void importIndex() throws IOException {
11 IndexWriter iw = getIndexWriter();
12 /*
13 * 采集原始文档
14 * 创建searchsource文件,放入原始文档文件
15 * */
16 File file = new File("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\searchsource\\test.txt");
17 String content = readFileContent(file);
18 String fileName = file.getName();
19 String filePath = file.getPath();
20 // StringField不分词
21 Field fName = new StringField("fileName", fileName, YES);
22 Field fcontent = new TextField("content", content, YES);
23 // 此处1024是要获取文件大小,本人偷懒请忽略
24 Field fsize = new TextField("size", "1024", YES);
25 Field fpath = new TextField("path", filePath, YES);
26 // 创建文档对象
27 Document document = new Document();
28 // 把域加入到文档中
29 document.add(fName);
30 document.add(fcontent);
31 document.add(fsize);
32 document.add(fpath);
33 // 把文档写入到索引库
34 iw.addDocument(document);
35 // 提交
36 iw.commit();
37 iw.close();
38 }
39
40 public IndexWriter getIndexWriter() throws IOException {
41 // 获得索引库的位置
42 // 项目路径下创建索引库的文件夹index_loc
43 Path path = Paths.get("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\index_loc");
44 // 打开索引库
45 FSDirectory dir = FSDirectory.open(path);
46 // 创建分词器
47 Analyzer al = new IKAnalyzer();
48 // 创建索引的写入的配置对象
49 IndexWriterConfig iwc = new IndexWriterConfig(al);
50 // 创建索引的Writer
51 IndexWriter iw = new IndexWriter(dir, iwc);
52 return iw;
53 }
54
55 // 获取文件内容
56 public String readFileContent(File file) {
57 BufferedReader reader = null;
58 StringBuffer sbf = new StringBuffer();
59 try {
60 reader = new BufferedReader(new FileReader(file));
61 String tempStr;
62 while ((tempStr = reader.readLine()) != null) {
63 sbf.append(tempStr);
64 }
65 reader.close();
66 return sbf.toString();
67 } catch (IOException e) {
68 e.printStackTrace();
69 } finally {
70 if (reader != null) {
71 try {
72 reader.close();
73 } catch (IOException e1) {
74 e1.printStackTrace();
75 }
76 }
77 }
78 return sbf.toString();
79 }
80}
81
10.删除索引
(1)删除全部
1
2
3
4
5
6
7
8
9 1// 删除索引
2@Test
3public void deleteIndex() throws IOException {
4 IndexWriter iw = getIndexWriter();
5 iw.deleteAll();
6 iw.commit();
7 iw.close();
8}
9
(2)删除符合条件的索引
1
2
3
4
5
6
7
8
9
10
11
12 1@Test
2public void deleteIndexByQuery() throws IOException {
3 IndexWriter iw = getIndexWriter();
4 // 创建语汇单元项
5 Term term = new Term("content", "三");
6 // 创建根据语汇单元的查询对象
7 TermQuery query = new TermQuery(term);
8 iw.deleteDocuments(query);
9 iw.commit();
10 iw.close();
11}
12
11.分词语汇单元查询
创建查询
对要搜索的信息创建Query查询对象,Lucene会根据Query查询对象生成最终的查询语法,类似关系数据库Sql语法一样Lucene也有自己的查询语法,比如:“name:lucene”表示查询Field的name为“Lucene”的文档信息。
可通过两种方法创建查询对象:
(1)使用Lucene提供Query子类
Query是一个抽象类,Lucene提供了很多查询对象,比如TermQuery项精确查询。NumericRangeQuery数字范围查询等。
1
2 1Query query = new TermQuery(new Term("name", "lucene"));
2
(2)使用QueryParse解析插叙表达式
QueryParse会将用户输入的查询表达式解析成Query对象实例
1
2
3 1QueryParse queryParse = new QueryParse("name", new IKAnalyzer());
2Query query = queryParse.parse("name:lucene");
3
Test类
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72 1package zhou;
2
3import org.apache.lucene.analysis.Analyzer;
4import org.apache.lucene.document.Document;
5import org.apache.lucene.document.Field;
6import org.apache.lucene.document.StringField;
7import org.apache.lucene.document.TextField;
8import org.apache.lucene.index.DirectoryReader;
9import org.apache.lucene.index.IndexWriter;
10import org.apache.lucene.index.IndexWriterConfig;
11import org.apache.lucene.index.Term;
12import org.apache.lucene.search.IndexSearcher;
13import org.apache.lucene.search.ScoreDoc;
14import org.apache.lucene.search.TermQuery;
15import org.apache.lucene.search.TopDocs;
16import org.apache.lucene.store.FSDirectory;
17import org.junit.Before;
18import org.junit.Test;
19import org.wltea.analyzer.lucene.IKAnalyzer;
20
21import java.io.BufferedReader;
22import java.io.File;
23import java.io.FileReader;
24import java.io.IOException;
25import java.nio.file.Path;
26import java.nio.file.Paths;
27
28import static org.apache.lucene.document.Field.Store.YES;
29
30public class LuceneTest3 {
31 @Before
32 public void setUp() throws Exception {
33 }
34
35 @Test
36 public void queryIndex() throws IOException {
37 Path path = Paths.get("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\index_loc");
38 FSDirectory open = FSDirectory.open(path);
39 // 创建索引的读取对象
40 DirectoryReader reader = DirectoryReader.open(open);
41 // 创建索引库的所有对象
42 IndexSearcher is = new IndexSearcher(reader);
43 // 创建语汇单元的对象(查询语汇单元文件名称为test.txt的文件)
44 Term term = new Term("fileName", "test.txt");
45 // 创建分词的语汇查询对象
46 TermQuery tq = new TermQuery(term);
47 // 查询(前多少条)
48 TopDocs result = is.search(tq, 100);
49 // 总记录数
50 int total = (int) result.totalHits;
51 System.out.println("总记录数是:" + total);
52
53 for (ScoreDoc sd : result.scoreDocs) {
54 // 获得文档的id
55 int id = sd.doc;
56 // 获得文档对象
57 Document doc = is.doc(id);
58 String fileName = doc.get("fileName");
59 String size = doc.get("size");
60 String content = doc.get("content");
61 String path1 = doc.get("path");
62
63 System.out.println("文件名:" + fileName);
64 System.out.println("大小:" + size);
65 System.out.println("内容:" + content);
66 System.out.println("路径:" + path);
67 System.out.println("-------------------------");
68 }
69
70 }
71}
72
12.数值范围查询对象
1.NumericRangeQuery
指定数字范围查询,如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 1 // 文件大小在0到1024的文件
2@Test
3public void rangeQuery() throws IOException {
4 IndexSearcher is = getDirReader();
5 // 创建数值范围查询对象
6 Query tq = NumericRangeQuery.newLongRange("size", 01, 1001, true, true);
7 printDoc(is, tq);
8}
9
10public IndexSearcher getDirReader() throws IOException {
11 Path path = Paths.get("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\index_loc");
12 FSDirectory open = FSDirectory.open(path);
13 // 创建索引的读取对象
14 DirectoryReader reader = DirectoryReader.open(open);
15 // 创建索引库的所有对象
16 IndexSearcher is = new IndexSearcher(reader);
17 return is;
18}
19
20// 打印结果
21public static void printDoc(IndexSearcher is, Query tq) throws IOException {
22 // 查询(前多少条)
23 TopDocs result = is.search(tq, 100);
24 // 总记录数
25 int total = (int) result.totalHits;
26 System.out.println("总记录数是:" + total);
27
28 for (ScoreDoc sd : result.scoreDocs) {
29 // 获得文档的id
30 int id = sd.doc;
31 // 获得文档对象
32 Document doc = is.doc(id);
33 String fileName = doc.get("fileName");
34 String size = doc.get("size");
35 String content = doc.get("content");
36 String path1 = doc.get("path");
37
38 System.out.println("文件名:" + fileName);
39 System.out.println("大小:" + size);
40 System.out.println("内容:" + content);
41 System.out.println("路径:" + path1);
42 System.out.println("-------------------------");
43 }
44}
45
13.多查询对象联合查询
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54 1/*
2 * 多个条件的组合查询
3 * */
4@Test
5public void queryIndex2() throws IOException {
6 IndexSearcher is = getDirReader();
7 // 创建BooleanQuery查询对象,这种查询对象可以控制是& | !
8 BooleanQuery bq = new BooleanQuery();
9 // 创建一个分词的语汇查询对象
10 Query query = new TermQuery(new Term("fileName", "test.txt"));
11 Query query1 = new TermQuery(new Term("content", "test.txt"));
12 bq.add(query, BooleanClause.Occur.MUST);
13 // SHOULD 可有可无
14 bq.add(query1, BooleanClause.Occur.SHOULD);
15 System.out.println("查询条件" + bq);
16 printDoc(is, bq);
17}
18
19public IndexSearcher getDirReader() throws IOException {
20 Path path = Paths.get("D:\\个人文件\\java后端\\Lucene_Demo\\day01\\index_loc");
21 FSDirectory open = FSDirectory.open(path);
22 // 创建索引的读取对象
23 DirectoryReader reader = DirectoryReader.open(open);
24 // 创建索引库的所有对象
25 IndexSearcher is = new IndexSearcher(reader);
26 return is;
27}
28
29// 打印结果
30public static void printDoc(IndexSearcher is, Query tq) throws IOException {
31 // 查询(前多少条)
32 TopDocs result = is.search(tq, 100);
33 // 总记录数
34 int total = (int) result.totalHits;
35 System.out.println("总记录数是:" + total);
36
37 for (ScoreDoc sd : result.scoreDocs) {
38 // 获得文档的id
39 int id = sd.doc;
40 // 获得文档对象
41 Document doc = is.doc(id);
42 String fileName = doc.get("fileName");
43 String size = doc.get("size");
44 String content = doc.get("content");
45 String path1 = doc.get("path");
46
47 System.out.println("文件名:" + fileName);
48 System.out.println("大小:" + size);
49 System.out.println("内容:" + content);
50 System.out.println("路径:" + path1);
51 System.out.println("-------------------------");
52 }
53}
54
14.解析查询
(1) QueryParse查询
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 1/*
2* 查询条件的解析查询
3* 第一种
4* */
5@Test
6public void queryIndex3() throws IOException, ParseException {
7 IndexSearcher is = getDirReader();
8 IKAnalyzer ik = new IKAnalyzer();
9 // 创建查询解析对象
10 QueryParser parser = new QueryParser("content", ik);
11 // 解析查询对象(换言之就是根据如下这句话解析出来后,查询在fileName这个域中的内容)
12 Query query = parser.parse("我在学习全文检索技术Lucene");
13 System.out.println("打印查询条件" + query);
14 printDoc(is, query);
15}
16
17/*
18 * 解析查询
19 * 第二种
20 * */
21@Test
22public void queryIndex4() throws IOException, ParseException {
23 IndexSearcher is = getDirReader();
24 IKAnalyzer ik = new IKAnalyzer();
25 // 创建查询解析对象
26 QueryParser parser = new QueryParser("content", ik);
27 // 自己写查询对象条件 AND OR || !
28 Query query = parser.parse("content: 我 AND 你是 ! 好的");
29 System.out.println("打印查询条件" + query);
30 printDoc(is, query);
31}
32
**(2)**多域条件解析查询
MultiFieldQueryParse组合域查询。
通过MultiFieldQueryParse对多个域查询,比如商品信息查询,输入关键字需要从商品名称和商品内容中查询。
1
2
3
4
5
6
7 1// 设置组合查询域
2String[] fields = {"fileName", "fileContent"};
3// 创建查询解析器
4QueryParse queryParse = new MultiFieldQueryParse(fields, new IKAnalyzer());
5// 查询文件名、文件内容包括“java”关键字的文档
6Query query = queryParse.parse("java");
7
Test类:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 1/*
2 * 多域条件解析查询
3 * */
4@Test
5public void multiFieldQuery() throws IOException, ParseException {
6 IndexSearcher is = getDirReader();
7 IKAnalyzer ik = new IKAnalyzer();
8
9 String[] fields = {"fileName", "content"};
10 MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, ik);
11 Query query = parser.parse("我在学习全文检索技术Lucene");
12
13 System.out.println("打印查询条件" + query);
14 printDoc(is, query);
15}
16