admin 管理员组

文章数量: 887021

Welcome to Armbian 23.8.1 Bullseye with bleeding edge Linux 6.4.13-edge-meson

Monitoring – How to install Prometheus/Grafana on arm – Raspberry PI/Rock64 | Blogs (mytinydc)

How to Run Prometheus on a Raspberry Pi – How to Raspberry

Ubuntu and Debian(ARMv7)SHA256: 8fa847587f482ea06b077c8791e3fc350b1fa012349c1e476e178f4a9ad7a6fb
sudo apt-get install -y adduser libfontconfig1 musl
wget https://dl.grafana/enterprise/release/grafana-enterprise_9.3.16_armhf.deb
sudo dpkg -i grafana-enterprise_9.3.16_armhf.deb


 

prometheus
参考
Monitoring – How to install Prometheus/Grafana on arm – Raspberry PI/Rock64 | Blogs (mytinydc)

cd /usr/local/src

wget https://github/prometheus/prometheus/releases/download/v2.6.0/prometheus-2.6.0.linux-armv7.tar.gz

tar xfz prometheus-2.6.0.linux-armv7.tar.gz
rm prometheus-2.6.6.0.linux-armv7.tar.gz
cd prometheus-2.6.6.0.linux-armv7/

ln -s prometheus-2.6.6.0.linux-armv7 prometheus

vi prometheus.yml

global:# Default is every 1 minute.
   scrape_configs:
  # server prometheus listens on TCP/9090
  - job_name:'prometheus'.
         #metrics_path defaults to "/metrics
         #scheme defaults to "http".
      static_configs:
        - targets: ['localhost:9090']

  # Exporters: client servers
  - job_name:'nodes'.
     scrape_interval: 1m # Override the default global interval for this job
     scrape_timeout: 10s # Override the default global timeout for this job
     static_configs:
     - targets: ['localhost:9100']





cat prometheus.yml
# my global config
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
    - targets: ['localhost:9090']
  # Exporters: client servers
  - job_name: 'nodes'
    static_configs:
    - targets: ['localhost:9100']



或者
 cat prometheus.yml
# my global config
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
    - targets: ['localhost:9090']
  # Exporters: client servers
  - job_name: 'nodes'
    scrape_interval: 1m # Override the default global interval for this job
    scrape_timeout: 10s # Override the default global timeout for this job
    static_configs:
    - targets: ['localhost:9100']




nohup ./prometheus &
或者
nohup /usr/local/src/prometheus-2.6.0.linux-armv7/prometheus &
或者
cat /etc/profile.d/prometheus.sh 
#!/bin/bash
cd /usr/local/src/prometheus-2.6.0.linux-armv7/ && nohup ./prometheus & 

node_exporter

wget https://github/prometheus/node_exporter/releases/download/v0.17.0/node_exporter-0.17.0.linux-armv7.tar.gz

node_exporter.service

cd /usr/local/src

ln -s node_exporter-0.17.0.linux-armv7/ node_exporter

cat /etc/systemd/system/node_exporter.service
[Unit]
Description=node_export
Documentation=https://github/prometheus/node_exporter

[Service]
ExecStart=/usr/local/src/node_exporter/node_exporter
Restart=on-failure
[Install]
WantedBy=multi-user.target

systemctl daemon-reload
systemctl enable node_exporter.service --now

ip:9090/targets

ip:9100

ip:3000

admin

默认密码admin,进去先改密码

8919

11074

 


多主机

cd /usr/local/src

wget https://github/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-386.tar.gz

tar zxvf tar zxvf node_exporter-1.8.2.linux-386.tar.gz ln -s node_exporter-1.8.2.linux-386/ node_exporter


 

cat /etc/systemd/system/node_exporter.service
[Unit]
Description=node_export
Documentation=https://github/prometheus/node_exporter
 
[Service]
ExecStart=/usr/local/src/node_exporter/node_exporter
Restart=on-failure
[Install]
WantedBy=multi-user.target

systemctl daemon-reload && systemctl enable node_exporter.service --now

cat /usr/local/src/prometheus.yml
# my global config
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
    - targets: ['localhost:9090']
  # Exporters: client servers
  - job_name: 'nodes'
    scrape_interval: 1m # Override the default global interval for this job
    scrape_timeout: 10s # Override the default global timeout for this job
    static_configs:
      - targets: ['localhost:9100','192.168.1.97:9100']

server端重启Prometheus,重启node_exporter

grafana模板使用8919

开机启动Prometheus 

cd /etc/profile.d
cat prometheus.sh 
#!/bin/bash
cd /usr/local/src/prometheus-2.6.0.linux-armv7/ && nohup ./prometheus &

本文标签: Armbian 玩客云 bullseye Grafana amp