# Bind address to use for the RPC service for backup and restore. 备份与恢复端口
bind-address = "127.0.0.1:8088"
[http]
auth-enabled = false
# The bind address used by the HTTP service HTTP API接口
bind-address = ":8086"
修改默认端口是好的安全习惯
日志文件存储
vim /etc/init.d/influxdb
if [ -z "$STDERR" ]; then
STDERR=/home/work/influxdb/log/influxd.log
HTTP日志分离
编辑配置文件
vim /etc/influxdb/influxdb.conf
[http]
access-log-path = "/home/work/influxdb/log/http.log"
influxdb 系统日志
vim /etc/logrotate.d/influxdb
/home/work/influxdb/log/influxd.log {
daily
rotate 7
missingok
dateext
copytruncate
compress
测试是否成功
logrotate -d /etc/logrotate.d/influxdb
influxdb http日志
cp /etc/logrotate.d/influxdb /etc/logrotate.d/influxdb-http
vim /etc/logrotate.d/influxdb-http
/home/work/influxdb/log/http.log {
daily
rotate 7
missingok
dateext
copytruncate
compress
测试是否成功
logrotate -d /etc/logrotate.d/influxdb-http
create user db_xxx_reader with password 'xxx'
create user db_xxx_write with password 'xxx’
grant read on db_xxx to db_xxx_read
grant write on db_xxx to db_xxx_write
更改配置文件,开启HTTP认证
` auth-enabled = true`
重启influxdb
sudo service influxdb restart
备份与恢复
备份与恢复
数据导入导出
curl https://s3.amazonaws.com/noaa.water-database/NOAA_data.txt -o NOAA_data.txt
influx -host 127.0.0.1 -port 8086 -username admin -password test -import -path NOAA_data.txt -precision s -pps 5000
导入数据文件需遵循以下格式
# DDL
CREATE DATABASE NOAA_water_database
# DML
# CONTEXT-DATABASE: NOAA_water_database
h2o_feet,location=coyote_creek water_level=8.120,level\ description="between 6 and 9 feet" 1439856000
h2o_feet,location=coyote_creek water_level=8.005,level\ description="between 6 and 9 feet" 1439856360
influx_inspect export -database NOAA_water_database -retention autogen -waldir ~/.influxdb/wal -datadir ~/.influxdb/data/ -out noaa_water_database.txt
更多 influx_inspect官方文档
身份认证与授权管理
身份认证与授权管理
开启HTTPS
HTTPS开启官方文档
身份认证与授权管理
身份认证与授权管理
show stats
show-stats
show diagnostics
show-diagnostics
_internal 数据库
需开启monitor监控配置项,默认开启
[monitor]
# Whether to record statistics internally.
# store-enabled = true
# The destination database for recorded statistics
# store-database = "_internal"
# The interval at which to record statistics
# store-interval = “10s"
示例:每秒写入的数据点数
http://127.0.0.1:8086/query?chunked=true&db=_internal&q=select+derivative%28pointReq%2C+1s%29+from+%22write%22+where+time+%3E+now%28%29+-+5m+tz%28%27Asia%2FShanghai%27%29
更多 _internal
数据保留策略 retention policies
查看数据保留策略
show retention policies on db_xxx
创建数据保留策略
create retention policy "rentionpolicy_xxx" on "db_xxx" duration 4w replication 1 default
删除数据保留策略
drop retention policy “rentionpolicy_xxx" on db_xxx
数据订阅 subscription
原理 : 将influxdb中的数据以line protocol格式通过HTTP/HTTPS/UDP发送出去
show subscriptions
create subscription "db_sub_xx" ON "db_xxx"."rentionpolicy_xxx" destinations all 'http://user:passwd@host:port'
drop subscription "db_sub_xx" on "db_xxx"."rentionpolicy_xxx"
订阅密码千万不要包含特殊字符
更多 数据订阅官方文档
line protocol
tz('Asia/Shanghai')
SELECT MEAN("difference") FROM (SELECT "cats" - "dogs" AS "difference" FROM "pet_daycare")
SELECT MEAN("water_level") FROM "h2o_feet"; SELECT "water_level" FROM "h2o_feet" LIMIT 2
= <= >= < > != <>
=~ !~ // 正则
更多 算术操作
只支持数值型变量之间的强制转换
SELECT "water_level"::integer FROM "h2o_feet" LIMIT 4
show continuous queries
create coninous query
drop continous query
sql运行详解
explain sql
explain analyze sql
查看当前运行的查询
show queries
杀掉指定查询
kill query
查询相关配置
根据数据量创建合适的存储策略与分片策略
influxdb每一个分片shard存储一个指定时间段内的数据。每一个分片都对应一个底层的 tsm 存储引擎,有独立的 cache、wal、tsm 文件
长分片:减少数据重复,提高压缩率,提高快速查询
短分片:删除数据更有效
插入数据时若field的类型为string,请使用双引号,特殊字符需要转义
数据写入时,注意覆盖情况
database+retentation policy+measurement+tag+ts确定唯一值,单重复时会自动合并field
drop操作耗性能,请谨慎使用
常见bug
tag取值个数超过10w
partial write: max-values-per-tag limit exceeded (100000/100000)
max-values-per-tag = 0
cache内存使用超过1g
Execute error[code:500][err:{“error”:”engine: cache-max-memory-size exceeded: (1073741900/1073741824)”}
` cache-max-memory-size = “2g” `