grom接入Prometheus,grafana
在同级目录下分别创建
docker-compose.yml,与prometheus.yml 配置文件
version: '3.8'services:prometheus:image: prom/prometheuscontainer_name: prometheusports:- "9090:9090" # Prometheus Web UI 端口volumes:- ./prometheus.yml:/etc/prometheus/prometheus.ymlnetworks:- monitoringgrafana:image: grafana/grafanacontainer_name: grafanaports:- "3000:3000"networks:- monitoringdepends_on:- prometheuspushgateway:image: prom/pushgatewaycontainer_name: pushgatewayports:- "9091:9091"networks:- monitoringnetworks:monitoring:driver: bridge
global:scrape_interval: 5s //采集指标频次scrape_configs:- job_name: 'prometheus'static_configs:- targets: ['host.docker.internal:8181'] //我这边是宿主机运行程序,在同一bridge下面安装的prometheus与granfa.如果程序以容器运行,加入同一个bridge下面。docker容器访问宿主机host配置host.docker.internal
gorm中间件配置
func init() {// 设置MySQL连接信息dsn := "sa:sa123456@tcp(127.0.0.1:3306)/test?charset=utf8mb4&parseTime=True&loc=Local"// 创建一个GORM连接db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})if err != nil {fmt.Println("Failed to connect to the database:", err)return}db.Use(prometheus.New(prometheus.Config{DBName: "test", // 使用 `DBName` 作为指标 labelRefreshInterval: 15, // 指标刷新频率(默认为 15 秒)PushAddr: "", // 如果配置了 `PushAddr`,则推送指标StartServer: true, // 启用一个 http 服务来暴露指标HTTPServerPort: 8181, // 配置 http 服务监听端口,默认端口为 8080 (如果您配置了多个,只有第一个 `HTTPServerPort` 会被使用)MetricsCollector: []prometheus.MetricsCollector{// 从 SHOW STATUS 选择变量变量,如果不设置,则使用全部的状态变量&prometheus.MySQL{VariableNames: []string{"Threads_running"},},},}))// 获取通用数据库对象 sql.DBsqlDB, err := db.DB()if err != nil {fmt.Println("Failed to get generic database object:", err)return}// 设置连接池参数// SetMaxIdleConns 用于设置连接池中空闲连接的最大数量sqlDB.SetMaxIdleConns(10)// SetMaxOpenConns 设置打开数据库连接的最大数量sqlDB.SetMaxOpenConns(100)// SetConnMaxLifetime 设置了连接可复用的最大时间sqlDB.SetConnMaxLifetime(time.Hour)global.DB = db// 检查连接是否成功fmt.Println("Connected to the database successfully")
}
监听成功
我们用go的高并发 限制运行协程数量最大100.
// SetMaxOpenConns 设置打开数据库连接的最大数量 sqlDB.SetMaxOpenConns(100)
func save() {// 设置随机种子rand.Seed(time.Now().UnixNano())// 插入2000万条数据batchSize := 1000totalRecords := 20000000var wg errgroup.Groupwg.SetLimit(100)for i := 0; i < totalRecords/batchSize; i++ {num := iwg.Go(func() error {var employees []Employee//for j := 0; j < batchSize; j++ {employees = append(employees, Employee{EmpNo: num*batchSize + num + 1,BirthDate: randomDate(time.Date(1950, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2000, 12, 31, 0, 0, 0, 0, time.UTC)),FirstName: randomString(14),LastName: randomString(16),Gender: randomGender(),HireDate: randomDate(time.Date(1980, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2024, 12, 31, 0, 0, 0, 0, time.UTC)),})fmt.Printf("Inserted %d records\n", (i+1)*batchSize)global.DB.Create(&employees)return nil})//fmt.Printf("Inserted %d records\n", (i+1)*batchSize)}wg.Wait()
}
在prometheus可以看到相关指标信息
在granfa配置好相关datasoure可以看到相关数据库连接串指标