1.当前的版本是7.5.1,后面安装的插件会依赖es的版本,也可根据需要指定版本
2.es推荐安装在docker中,为演示方便,直接使用了windows版本
启动成功后,通过postman就可以向es执行操作命令
1.添加或更新索引及其文档
方法一(推荐):PUT /{索引}/{文档}/{id}, id为必传,若没有该id则插入数据,已有id则更新数据(若只传入索引,则创建索引)
方法二:POST /{索引}/{文档}/{id}, id可省略,如不传则由es生成
2.获取所有文档
GET /{索引}/{文档}/_search
如: http://127.0.0.1:9200/newindex/newdoc/_search
3.获取指定id文档
GET /{索引}/{文档}/{id}
如: http://127.0.0.1:9200/newindex/newdoc/1
4.模糊查询
GET /{索引}/{文档}/_search?q=*关键词*
如: http://127.0.0.1:9200/newindex/newdoc/_search?q=*王*
5.删除文档
DELETE /{索引}/{文档}/{id}
如: http://127.0.0.1:9200/newindex/newdoc/1
更多语句可参考 官网
git clone https://github.com/mobz/elasticsearch-head.git 复制代码
npm install -g grunt-cli 复制代码
cd elasticsearch-head/ npm install 复制代码
vim ../elasticsearch-7.5.1/config/elasticsearch.yml 复制代码
http.cors.enabled: true http.cors.allow-origin: "*" 复制代码
cd - // 返回head根目录 grunt server 复制代码
github下载 与或 直接下载压缩包 ,我选择了第二种
解压后,将解压的文件夹拷贝到elasticsearch-7.5.1/plugins目录下,文件夹重名为ik
测试ik分词器的中文效果
在/elasticsearch-7.5.1/plugins/ik/config目录下新建custom.dic;
添加自己的自定义的词汇;
修改同目录下的IKAnalyzer.cfg.xml文件,为<entry key="ext_dict">属性指定自定义的词典;
Spring Data ElasticSearch <dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-elasticsearch</artifactId>
</dependency>
复制代码
spring:
data:
elasticsearch:
cluster-nodes: 127.0.0.1:9300
复制代码
@Data
@Accessors(chain = true)
@Document(indexName = "school", type = "student") // indexName为ES索引名,type为文档名
public class Student implements Serializable {
// id标识
// index=true代表是否开启索引,默认开启;
// type字段类型
// analyzer="ik_max_word"代表搜索的时候是如何分词匹配,为IK分词器最细颗粒度
// searchAnalyzer = "ik_max_word"搜索分词的类型
@Id
private String id;
@Field(type = FieldType.Keyword, analyzer = "ik_max_word", searchAnalyzer = "ik_max_word")
private String name;
private Integer age;
@Field(type = FieldType.Double)
private Double score;
@Field(type = FieldType.Text, analyzer = "ik_max_word")
private String info;
}
复制代码
@Data
@Accessors(chain = true)
public class QueryPage {
/**
* 当前页
*/
private Integer current;
/**
* 每页记录数
*/
private Integer size;
}
复制代码
public interface EsRepository extends ElasticsearchRepository<Student, String> {
/**
* 根据学生姓名或信息模糊查询
*/
Page<Student> findByNameAndInfoLike(String name, String info, Pageable pageable);
}
复制代码
public interface EsService {
/**
* 插入
*/
void add(Student student);
/**
* 批量插入
*/
void addAll(List<Student> student);
/**
* 模糊查询
*/
Page<Student> search(String keyword, QueryPage queryPage);
}
复制代码
@Service
public class EsServiceImpl implements EsService {
@Autowired
private EsRepository esRepository;
@Override
public void add(Student student) {
esRepository.save(student);
}
@Override
public void addAll(List<Student> student) {
esRepository.saveAll(student);
}
@Override
public Page<Student> search(String keyword, QueryPage queryPage) {
// es默认索引从0开始,mp默认从1开始
PageRequest pageRequest = PageRequest.of(queryPage.getCurrent() - 1, queryPage.getSize());
return esRepository.findByNameOrInfoLike(keyword, keyword, pageRequest);
}
}
复制代码
@SpringBootTest
public class EsServiceImplTest {
@Autowired
private EsService esService;
@Test
public void insert() {
List<Student> students = new ArrayList<>();
for (int i = 10; i <= 12; i++) {
Student student = new Student();
student.setId(i + "").setAge(10 + i).setName("王二狗" + i).setScore(72.5 + i).setInfo("大王派我来巡山" + i);
students.add(student);
}
esService.addAll(students);
}
@Test
public void fuzzySearch() {
QueryPage queryPage = new QueryPage();
queryPage.setCurrent(1).setSize(5);
Page<Student> list = esService.search("二狗2", queryPage);
list.forEach(System.out::println);
}
}
复制代码
/logstash-7.5.1/config/logstash-sample.conf 在当前目录,重命名为 logstash.conf # Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
jdbc {
# MySql连接配置
jdbc_connection_string => "jdbc:mysql://127.0.0.1:3306/springboot_es?characterEncoding=UTF8"
jdbc_user => "root"
jdbc_password => "1234"
jdbc_driver_library => "D:/Develop_Tools_Others/logstash-7.5.1/mysql-connector-java-5.1.26.jar"
jdbc_driver_class => "com.mysql.jdbc.Driver"
jdbc_paging_enabled => "true"
jdbc_page_size => "50000"
# SQL查询语句,用于将查询到的数据导入到ElasticSearch
statement => "select id,name,age,score,info from t_student"
# 定时任务,各自表示:分 时 天 月 年 。全部为 * 默认每分钟执行
schedule => "* * * * *"
}
}
output {
elasticsearch {
hosts => "localhost:9200"
# 索引名称
index => "school"
# 文档名称
document_type => "student"
# 自增ID编号
document_id => "%{id}"
}
stdout {
# JSON格式输出
codec => json_lines
}
}
复制代码
SET NAMES utf8mb4; SET FOREIGN_KEY_CHECKS = 0; -- ---------------------------- -- Table structure for t_student -- ---------------------------- DROP TABLE IF EXISTS `t_student`; CREATE TABLE `t_student` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键', `name` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '学生姓名', `age` int(11) NULL DEFAULT NULL COMMENT '年龄', `score` double(255, 0) NULL DEFAULT NULL COMMENT '成绩', `info` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '信息', PRIMARY KEY (`id`) USING BTREE ) ENGINE = InnoDB AUTO_INCREMENT = 4 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic; -- ---------------------------- -- Records of t_student -- ---------------------------- INSERT INTO `t_student` VALUES (1, '小明', 18, 88, '好好学习'); INSERT INTO `t_student` VALUES (2, '小红', 17, 85, '天天向上'); INSERT INTO `t_student` VALUES (3, '王二狗', 30, 59, '无产阶级'); SET FOREIGN_KEY_CHECKS = 1; 复制代码
D:/Develop_Tools_Others/logstash-7.5.1>./bin/logstash.bat -f ./config/logstash.conf 复制代码