docker run -d --net="host" --pid="host" --name=node-exporter -v "/:/host:ro,rslave" prom/node-exporter --path.rootfs /host --web.listen-address=":19100"
非docker安装执行下列四步
第一步
第二步
上传到服务器 /opt/exporter下
解压: tar -xvzf node_exporter-0.14.0.linux-amd64.tar.gz
第三步
进入解压后的文件夹,执行命令:
./node_exporter –web.listen-address=:19100 &
nohup ./node_exporter --web.listen-address=":19100"& 然后在服务器上开放19100 端口,测试用ip+19100端口即可访问到数据 contOS6 版本的防火墙重启 : /etc/init.d/iptables restart contOS7 版本的防火墙重启 :service iptables restart
第四步
prometheus.yml 配置
- job_name: '214服务器' # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: # - targets: ['166.16.6.75:9090'] - targets: [ '192.16.6.214:19100']
全部配置文件:
# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: - 192.16.6.214:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" - "/etc/prometheus/rules/*.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: '普罗米修斯214服务器' static_configs: - targets: [ '192.16.6.214:19100'] labels: instance: '普罗米修斯214服务器' - job_name: '鞍山调剂数据库服务器' static_configs: - targets: [ '192.16.6.195:19100' ] labels: instance: '鞍山调剂平台服务器' - job_name: '鞍山调剂平台服务器' static_configs: - targets: [ '192.16.6.193:19100' ] labels: instance: '鞍山调剂平台服务器' - job_name: '鞍山监管数据库服务器' static_configs: - targets: [ '192.16.6.194:19100' ] labels: instance: '鞍山监管数据库服务器' - job_name: '鞍山监管平台服务器' static_configs: - targets: [ '192.16.6.192:19100' ] labels: instance: '鞍山监管平台服务器' - job_name: '成都调剂数据库服务器' static_configs: - targets: [ '192.16.6.218:19100' ] labels: instance: '成都调剂数据库服务器' - job_name: '成都调剂平台服务器' static_configs: - targets: [ '192.16.6.217:19100' ] labels: instance: '成都调剂平台服务器' - job_name: '成都监管数据库服务器' static_configs: - targets: [ '192.16.6.215:19100' ] labels: instance: '成都监管数据库服务器' - job_name: '成都监管平台服务器' static_configs: - targets: [ '192.16.6.216:19100' ] labels: instance: '成都监管平台服务器' - job_name: 'tomcat' file_sd_configs: #引用刚才编写的配置文件 - files: ['/etc/prometheus/tomcat.yml'] refresh_interval: 15s # 监控 mysql - job_name: 'mysql210' static_configs: - targets: [ '192.16.6.214:9104' ] #填写mysqld-exporter的docker服务ip:端口或者宿主机ip:映射的端口 labels: instance: '210数据库' #实例名称或ip - job_name: 'mysql195' static_configs: - targets: [ '192.16.6.214:9105' ] #填写mysqld-exporter的docker服务ip:端口或者宿主机ip:映射的端口 labels: instance: '鞍山调剂数据库195' #实例名称或ip - job_name: 'mysql194' static_configs: - targets: [ '192.16.6.214:9106' ] #填写mysqld-exporter的docker服务ip:端口或者宿主机ip:映射的端口 labels: instance: '鞍山监管数据库194' #实例名称或ip - job_name: 'mysql218' static_configs: - targets: [ '192.16.6.218:9107' ] #填写mysqld-exporter的docker服务ip:端口或者宿主机ip:映射的端口 labels: instance: '成都调剂数据库218' #实例名称或ip - job_name: 'mysql215' static_configs: - targets: [ '192.16.6.214:9108' ] #填写mysqld-exporter的docker服务ip:端口或者宿主机ip:映射的端口 labels: instance: '成都监管数据库215' #实例名称或ip
原创文章,作者:优速盾-小U,如若转载,请注明出处:https://www.cdnb.net/bbs/archives/31494