elasticsearch 基本用法。

环境 elasticsearch6.0 kibana-6.0.0

进入
elasticsearch /bin 目录下

下载对应版本的ik分词(我这里是6.0版本)

elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v6.0.0/elasticsearch-analysis-ik-6.0.0.zip

运行
kibana 运行默认端口5601 开始代码把!

1.CURD

#1.新建数据库(索引)
#number_of_shards 分片的数量 默认5(一旦创建完成不能改)
#number_of_replicas 副本的数量 默认1
PUT blog
{
  "settings":{
    "index":{
      "number_of_shards":5,
      "number_of_replicas":1
    }    
  }
}
#2.获取索引消息
GET _all
GET blog
#3.添加表和数据
POST blog/article
{
  "title":"我是博客",
  "msg":{
    "author":"admin",
    "position":"程序员"
  },
  "desc":"i like you",
  "create_time":"2019-07-12",
  "look":50,
  "like":100
}
#4.获取数据(这里先简写后面详细讲解)
GET blog/article/_search
#5.修改数据
PUT blog/article/0PkS5msBS7MmrBYMH0SR
{
  "title":"我是程序员",
  "msg":{
    "author":"admin",
    "position":"程序员"
  },
  "desc":"i like you",
  "create_time":"2019-07-12",
  "look":50,
  "like":100
}
#5.1 修改数据
POST blog/article/0PkS5msBS7MmrBYMH0SR/_update
{
  "doc":{
    "title":"i like work ( fuck you ) "
  }
}
#6.delete (无法删除表,可以删除索引)
DELETE blog/article/0PkS5msBS7MmrBYMH0SR
DELETE blog
#7. 批量操作(同时执行增删改查)
(
格式 添加 {"index":{"_index":"索引名称","_type":"表名称","_id":"id(可不传入)"}}
接着下面填写数据
修改 {"update":{"_id":"id","_type":"表名称","_index":"索引名称"}}
接着下面修改数据
删除 {"delete":{"_id":"id","_type":"表名称","_index":"索引名称"}}
)
POST _bulk
{"index":{"_index":"blog","_type":"article","_id":1}}
{"title":"我是程序员","msg":{"author":"admin","position":"程序员"},"desc":"i like you","create_time":"2019-07-12","like":100}
{"index":{"_index":"blog","_type":"article","_id":2}}
{"title":"我是程序员","msg":{"author":"admin","position":"程序员"},"desc":"i like you","create_time":"2019-07-12","like":100}
{"update":{"_id":2,"_type":"article","_index":"blog"}}
{"doc":{"like":101}}
{"delete":{"_id":1,"_type":"article","_index":"blog"}}
#8.设置插入类型
( text字符串类型 keyword字符串类型(不分词) properties一个对象 date日期 format日期显示格式 integer数字 analyzer选择分词 )
PUT blog
{
  "mappings": {
    "article": {
      "properties": {
        "title":{
          "type": "text",
          "analyzer": "ik_max_word"
        },
        "msg":{
          "properties": {
            "author":{
              "type":"keyword"
            },
            "position":{
              "type":"keyword"
            }
          }
        },
        "desc":{
          "type": "text"
        },
        "create_time":{
          "type": "date",
          "format": "yyyy-MM-dd"
        },
        "like":{
          "type": "integer"
        }
      }
    }
  }
}

执行GET blog 效果如下:
{
  "blog": {
    "aliases": {},
    "mappings": {
      "article": {
        "properties": {
          "create_time": {
            "type": "date",
            "format": "yyyy-MM-dd"
          },
          "desc": {
            "type": "text"
          },
          "like": {
            "type": "integer"
          },
          "msg": {
            "properties": {
              "author": {
                "type": "keyword"
              },
              "position": {
                "type": "keyword"
              }
            }
          },
          "title": {
            "type": "text",
            "analyzer": "ik_max_word"
          }
        }
      }
    },
    "settings": {
      "index": {
        "creation_date": "1562935455291",
        "number_of_shards": "5",
        "number_of_replicas": "1",
        "uuid": "IQ0B-zsNSmeV26mNjb8i_w",
        "version": {
          "created": "6000099"
        },
        "provided_name": "blog"
      }
    }
  }
}
#1.match查询 (根据分词查询对应数据)
GET blog/article/_search
{
  "query":{
    "match":{
      "title":"python"
    }
  }
}
#2.term查询 (不会对搜索解析)
GET blog/article/_search
{
  "query": {
    "term":{
      "title":"python"
    }
  }
}
#3.terms查询 (数组内有一个满足就查询出来)
GET blog/article/_search
{
  "query": {
    "terms": {
      "title": [
        "php",
        "网站"
      ]
    }
  }
}
#4.分页查询
GET blog/article/_search
{
  "query":{
    "match":{
      "title":"python"
    }
  },
  "from": 0,
  "size": 1
}
#5.全部查询
GET blog/article/_search
{
  "query": {
    "match_all": {}
  }
}
#6.多字段查询 multi_match
GET blog/article/_search
{
  "query": {
    "multi_match": {
      "query": "程序员",
      "fields": ["title","msg.position"]
    }
  }  
}
#7.指定返回字段 (要在设置类型时设置 "store": true)
GET blog/article/_search
{
  "stored_fields":["title","desc"],
  "query": {
    "match": {
      "title": "php"
    }
  }
}
#8.sort排序 
GET blog/article/_search
{
  "query": {
    "match_all": {}
  },
  "sort": [
    {
      "like": {
        "order": "desc"
      }
    }
  ]
}
#9. 查询范围  
GET blog/article/_search
{
  "query": {
    "range": {
      "like": {
        "gte": 10,
        "lte": 20
      }
    }
  }
}

组合查询

#1. filter查询
#select * from article where like=50;
GET blog/article/_search
{
  "query": {
    "bool": {
      "must": {"match_all": {}},
      "filter": {
        "term": {
          "like": "50"
        }
      }
    }
  }
}
#should 或者 must必须 must_not取反 exists是否存在
GET blog/article/_search
{
  "query": {
    "bool": {
      "should": [
        {"term":{"title":"python"}}
      ],
      "must": [
        {"term":{"like":50}}
      ]
    }
  }
}

mysql8.0 主从同步

新建my3330.cnf

新建my.cnf

my.cnf

log-bin=mysql-bin
server-id=1
port=3320

my3330.cnf

server-id=2
port=3330
relay_log = mysql-relay-bin

进入3320这个端口

添加 一个用户repl

CREATE USER ‘repl’@’127.0.0.1’ IDENTIFIED WITH mysql_native_password BY ‘123456’;

GRANT REPLICATION SLAVE ON . TO ‘repl’@’127.0.0.1’;

运行show master status;

进入3330端口

mysql> CHANGE MASTER TO
MASTER_HOST='127.0.0.1',
MASTER_PROT=3320,
MASTER_USER='repl',
MASTER_PASSWORD='123456',
MASTER_LOG_FILE='mysql-bin.000005',
MASTER_LOG_POS=155;

运行start slave

查看show slave status\G;

这两个为yes就代表成功了。

测试一下

在3320里 create database testSlave;

在3330里运行 show databases;

ok,大功告成。

mariadb 10.2 安装和使用

最近 oracle裁人 搞得人心惶惶。

所以 赶紧把自己 博客 从mysql8 换成 mariadb

我们先不打代码,先说一下这个mariadb是个什么东西先 以下一段话来自百度百科:

MariaDB数据库管理系统是MySQL的一个分支,主要由开源社区在维护,采用GPL授权许可 MariaDB的目的是完全兼容MySQL,包括API和命令行,使之能轻松成为MySQL的代替品。在存储引擎方面,使用XtraDB(英语:XtraDB)来代替MySQL的InnoDB。 MariaDB由MySQL的创始人Michael Widenius(英语:Michael Widenius)主导开发,他早前曾以10亿美元的价格,将自己创建的公司MySQL AB卖给了SUN,此后,随着SUN被甲骨文收购,MySQL的所有权也落入Oracle的手中。MariaDB名称来自Michael Widenius的女儿Maria的名字。

简单的来说:

mysql被给闭源公司收购了,mysql创始人看不下去,就写了个 mysql的替代品 mariaDB 。

好了废话不多说,开搞开搞

首先下载

下载他的二进制包

wget http://mirrors.neusoft.edu.cn/mariadb//mariadb-10.2.23/bintar-linux-x86_64/mariadb-10.2.23-linux-x86_64.tar.gz

tar -xvf mariadb-10.2.23-linux-x86_64.tar.gz

mv mariadb-10.2.23-linux-x86_64.tar.gz mariadb

cd mariadb

./scripts/mysql_install_db –datadir=/soft/mariadb/data –user=mysql #安装

新建my.cnf

# Example MariaDB config file for very large systems.
#
# This is for a large system with memory of 1G-2G where the system runs mainly
# MariaDB.
#
# MariaDB programs look for option files in a set of
# locations which depend on the deployment platform.
# You can copy this option file to one of those
# locations. For information about these locations, do:
# 'my_print_defaults --help' and see what is printed under
# Default options are read from the following files in the given order:
# More information at: http://dev.mysql.com/doc/mysql/en/option-files.html
#
# In this file, you can use all long options that a program supports.
# If you want to know which options a program supports, run the program
# with the "--help" option.

# The following options will be passed to all MySQL clients
[client]
#password	= your_password
port		= 3310
socket		= /soft/mariadb/mysql.sock

# Here follows entries for some specific programs

# The MySQL server
[mysqld]
log_error = /soft/mariadb/error.log
basedir = /soft/mariadb
datadir = /soft/mariadb/data
port		= 3310
socket		= /soft/mariadb/mysql.sock
skip-external-locking
key_buffer_size = 384M
max_allowed_packet = 1M
table_open_cache = 512
sort_buffer_size = 2M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
myisam_sort_buffer_size = 64M
thread_cache_size = 8
query_cache_size = 32M
# Try number of CPU's*2 for thread_concurrency
thread_concurrency = 8
innodb_file_per_table = on

# Point the following paths to a dedicated disk
#tmpdir		= /tmp/

# Don't listen on a TCP/IP port at all. This can be a security enhancement,
# if all processes that need to connect to mysqld run on the same host.
# All interaction with mysqld must be made via Unix sockets or named pipes.
# Note that using this option without enabling named pipes on Windows
# (via the "enable-named-pipe" option) will render mysqld useless!
# 
#skip-networking

# Replication Master Server (default)
# binary logging is required for replication
log-bin=mysql-bin

# required unique id between 1 and 2^32 - 1
# defaults to 1 if master-host is not set
# but will not function as a master if omitted
server-id	= 1

# Replication Slave (comment out master section to use this)
#
# To configure this host as a replication slave, you can choose between
# two methods :
#
# 1) Use the CHANGE MASTER TO command (fully described in our manual) -
#    the syntax is:
#
#    CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,
#    MASTER_USER=<user>, MASTER_PASSWORD=<password> ;
#
#    where you replace <host>, <user>, <password> by quoted strings and
#    <port> by the master's port number (3306 by default).
#
#    Example:
#
#    CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,
#    MASTER_USER='joe', MASTER_PASSWORD='secret';
#
# OR
#
# 2) Set the variables below. However, in case you choose this method, then
#    start replication for the first time (even unsuccessfully, for example
#    if you mistyped the password in master-password and the slave fails to
#    connect), the slave will create a master.info file, and any later
#    change in this file to the variables' values below will be ignored and
#    overridden by the content of the master.info file, unless you shutdown
#    the slave server, delete master.info and restart the slaver server.
#    For that reason, you may want to leave the lines below untouched
#    (commented) and instead use CHANGE MASTER TO (see above)
#
# required unique id between 2 and 2^32 - 1
# (and different from the master)
# defaults to 2 if master-host is set
# but will not function as a slave if omitted
#server-id       = 2
#
# The replication master for this slave - required
#master-host     =   <hostname>
#
# The username the slave will use for authentication when connecting
# to the master - required
#master-user     =   <username>
#
# The password the slave will authenticate with when connecting to
# the master - required
#master-password =   <password>
#
# The port the master is listening on.
# optional - defaults to 3306
#master-port     =  <port>
#
# binary logging - not required for slaves, but recommended
#log-bin=mysql-bin
#
# binary logging format - mixed recommended 
#binlog_format=mixed

# Uncomment the following if you are using InnoDB tables
#innodb_data_home_dir = /usr/local/mysql/data
#innodb_data_file_path = ibdata1:2000M;ibdata2:10M:autoextend
#innodb_log_group_home_dir = /usr/local/mysql/data
# You can set .._buffer_pool_size up to 50 - 80 %
# of RAM but beware of setting memory usage too high
#innodb_buffer_pool_size = 384M
# Set .._log_file_size to 25 % of buffer pool size
#innodb_log_file_size = 100M
#innodb_log_buffer_size = 8M
#innodb_flush_log_at_trx_commit = 1
#innodb_lock_wait_timeout = 50

[mysqldump]
quick
max_allowed_packet = 16M

[mysql]
no-auto-rehash
# Remove the next comment character if you are not familiar with SQL
#safe-updates

[myisamchk]
key_buffer_size = 256M
sort_buffer_size = 256M
read_buffer = 2M
write_buffer = 2M

[mysqlhotcopy]
interactive-timeout
root@qyi-5bb1f39f1ed60:/soft/mariadb# vim my.cnf 

#
# binary logging - not required for slaves, but recommended
#log-bin=mysql-bin
#
# binary logging format - mixed recommended 
#binlog_format=mixed

# Uncomment the following if you are using InnoDB tables
#innodb_data_home_dir = /usr/local/mysql/data
#innodb_data_file_path = ibdata1:2000M;ibdata2:10M:autoextend
#innodb_log_group_home_dir = /usr/local/mysql/data
# You can set .._buffer_pool_size up to 50 - 80 %
# of RAM but beware of setting memory usage too high
#innodb_buffer_pool_size = 384M
# Set .._log_file_size to 25 % of buffer pool size
#innodb_log_file_size = 100M
#innodb_log_buffer_size = 8M
#innodb_flush_log_at_trx_commit = 1
#innodb_lock_wait_timeout = 50

[mysqldump]
quick
max_allowed_packet = 16M

[mysql]
no-auto-rehash
# Remove the next comment character if you are not familiar with SQL
#safe-updates

[myisamchk]
key_buffer_size = 256M
sort_buffer_size = 256M
read_buffer = 2M
write_buffer = 2M

[mysqlhotcopy]
interactive-timeout

./bin/mysqladmin -u root password “1234” #设置密码

./bin/mysqld –defaults-file=/soft/mariadb/my.cnf –user=mysql & #运行

./bin/mysql -u root -p -S mysql.sock #如果超过的话就会出现一些画面

mongodb 复制集

首先创建3个conf文件

第一个 mongoDB.conf

dbpath = /soft/mongodb4.0/data/db #数据文件存放目录
logpath = /soft/mongodb4.0/logs/mongodb.log #日志文件存放目录
port = 27017 #端口
fork = true #以守护程序的方式启用,即在后台运行
bind_ip=127.0.0.1
replSet=test1 #复制集 统一名称

第二个 mongodb-1.conf

dbpath = /soft/mongodb4.0/data1/db #数据文件存放目录
logpath = /soft/mongodb4.0/logs1/mongodb.log #日志文件存放目录
port = 27018 #端口
fork = true #以守护程序的方式启用,即在后台运行
bind_ip=127.0.0.1 #允许所有的连接
replSet=test1

第三个 mongodb-2.conf

dbpath = /soft/mongodb4.0/data2/db #数据文件存放目录
logpath = /soft/mongodb4.0/logs2/mongodb.log #日志文件存放目录
port = 27019 #端口
fork = true #以守护程序的方式启用,即在后台运行
bind_ip=127.0.0.1 #允许所有的连接
replSet=test1

接着运行第一,2,3 个conf文件

./mongod -f ../mongodb.conf

./mongod -f ../mongodb-1.conf

./mongod -f ../mongodb-2.conf

接着运行 第一台 127.0.0.1:27017 mongo

运行以下代码

rs.initiate({_id:”test2″,members:[{_id:0,host:”127.0.0.1:27017″,”priority”:100},{_id:1,host:”127.0.0.1:27018″,”priority”:90},{_id:2,host:”127.0.0.1:27019″,”priority”:80}] })

返回结果 ok

{
“ok” : 1,
“operationTime” : Timestamp(1556585401, 1),
“$clusterTime” : {
“clusterTime” : Timestamp(1556585401, 1),
“signature” : {
“hash” : BinData(0,”AAAAAAAAAAAAAAAAAAAAAAAAAAA=”),
“keyId” : NumberLong(0)
}
}
}

接着退出 查看 27018 27019 mongodb

就会变成这样 test2:SECONDARY 节点

而27017 就会变成

test2:PRIMARY 主节点

接着

use huxierong

db.huxierong_user.insert({title:”123456″})

退出27017 进入 27018

rs.slaveOk(); 可以在副集中查看读取数据

use huxierong

show collections

就能看到 huxierong_user 这个文档了

然后 db.huxierong_user.find() 就能看到数据了

27019 同样方式查看

mysql存储过程 添加用户和token

BEGIN
DECLARE user_id INT;
DECLARE date_time INT;
start transaction;
    set date_time = (select unix_timestamp());
    INSERT into cartoon_user(username,password,update_time,create_time) VALUES(user_name,pass_word,date_time,date_time);
    set user_id = (select id from cartoon_user where username = user_name);
    INSERT into cartoon_token(user_id,token,ip,update_time,create_time) VALUES(user_id,user_token,token_ip,date_time,date_time);
    select user_id;
commit;
end

mongodb 东西整合

创建数据库

db.createDatabase("huxierong")

use huxierong

创建集合

db.createCollection("user")

添加

db.user.insert( {"title":"胡谢荣",age:"23",sex:"boy"} )

修改字符串类型

db.user.find().forEach(function(e){

     e.age = NumberInt(e.age);

     db.user.save(e)

})

修改

db.user.update({"title":"胡谢荣"},{$set:{"age":50}})

分页

db.user.find().skip(0).limit(15)

创建索引 (唯一索引)

db.user.createIndex({title:1},{unique:true})

聚合

group

db.user.aggregate( { $group: {_id:"$type",title:{$addToSet:"$title"} , age:{$sum:"$age"}  }  } )

管道

db.user.aggregate(

{

     $match:{age:{$gt:20}}

}

{

    $project:{_id:0,title:1}

}

)



db.user.aggregate(

{

     $match:{age:{$gt:20}}

}

{

    $group:{_id:null,age:{$sum:"$age"}}

}

)