当前位置: 首页 > news >正文

rhel5中更换raid坏磁盘

一、更换raid5坏磁盘:
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 3
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:12:39 2009
          State : clean
Active Devices : 3
Working Devices : 3
Failed Devices : 0
  Spare Devices : 0
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.34
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       2       8       51        2      active sync   /dev/sdd3
[root@server4 ~]# mdadm --fail /dev/md0 /dev/sdd3
mdadm: set /dev/sdd3 faulty in /dev/md0
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 3
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:13:12 2009
          State : clean, degraded
Active Devices : 2
Working Devices : 2
Failed Devices : 1
  Spare Devices : 0
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.36
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       2       0        0        2      removed
       3       8       51        -      faulty spare   /dev/sdd3
[root@server4 ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4] [raid1]
md4 : active raid5 sde5[6](S) sdb1[0] sdd2[5] sdd1[4] sdc2[3] sdc1[2] sdb2[1]
      10040000 blocks level 5, 64k chunk, algorithm 2 [6/6] [UUUUUU]
md1 : active raid1 sdc5[1] sdb5[0]
      2008000 blocks [2/2] [UU]
md2 : active raid1 sdc6[1] sdb6[0]
      2449792 blocks [2/2] [UU]
md3 : active raid1 sdd6[1] sdd5[0]
      2008000 blocks [2/2] [UU]
md0 : active raid5 sdd3[3](F) sdc3[1] sdb3[0]
      4016000 blocks level 5, 64k chunk, algorithm 2 [3/2] [UU_]
unused devices: <none>
[root@server4 ~]# mdadm --remove /dev/md0 /dev/sdd3
mdadm: hot removed /dev/sdd3
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 2
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:14:22 2009
          State : clean, degraded
Active Devices : 2
Working Devices : 2
Failed Devices : 0
  Spare Devices : 0
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.38
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       2       0        0        2      removed
[root@server4 ~]# mdadm -a /dev/md0 /dev/sde3
mdadm: re-added /dev/sde3
[root@server4 ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4] [raid1]
md4 : active raid5 sde5[6](S) sdb1[0] sdd2[5] sdd1[4] sdc2[3] sdc1[2] sdb2[1]
      10040000 blocks level 5, 64k chunk, algorithm 2 [6/6] [UUUUUU]
md1 : active raid1 sdc5[1] sdb5[0]
      2008000 blocks [2/2] [UU]
md2 : active raid1 sdc6[1] sdb6[0]
      2449792 blocks [2/2] [UU]
md3 : active raid1 sdd6[1] sdd5[0]
      2008000 blocks [2/2] [UU]
md0 : active raid5 sde3[3] sdc3[1] sdb3[0]
      4016000 blocks level 5, 64k chunk, algorithm 2 [3/2] [UU_]
      [=&gt;...................]  recovery =  8.8% (178320/2008000) finish=1.0min speed=29720K/sec
unused devices: <none>
二、更换raid1坏磁盘:
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 2
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 18:58:41 2009
          State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
  Spare Devices : 0
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.14
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       1       8       38        1      active sync   /dev/sdc6
[root@server4 ~]# mdadm --fail /dev/md2 /dev/sdc6
mdadm: set /dev/sdc6 faulty in /dev/md2
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 2
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:16:27 2009
          State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 1
  Spare Devices : 0
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.16
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       2       8       38        -      faulty spared /dev/sdc6
[root@server4 ~]# mdadm --remove /dev/md2 /dev/sdc6
mdadm: hot removed /dev/sdc6
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 1
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:17:02 2009
          State : clean, degraded
Active Devices : 1
Working Devices : 1
Failed Devices : 0
  Spare Devices : 0
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.18
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       1       0        0        1      removed
[root@server4 ~]# mdadm -a /dev/md2 /dev/sde6
mdadm: added /dev/sde6
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 2
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:19:09 2009
          State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
  Spare Devices : 0
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.20
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       1       8       70        1      active sync   /dev/sde6
三、添加热备盘:
1.向raid5中添加热备盘:
[root@server4 ~]# mdadm -a /dev/md0 /dev/sde3
mdadm: added /dev/sde3
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 4
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:23:16 2009
          State : clean
Active Devices : 3
Working Devices : 4
Failed Devices : 0
  Spare Devices : 1
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.46
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       2       8       51        2      active sync   /dev/sdd3
       3       8       67        -      spare         /dev/sde3
[root@server4 ~]# mdadm -a /dev/md4 /dev/sde5
mdadm: re-added /dev/sde5
[root@server4 ~]# mdadm -D /dev/md4
/dev/md4:
        Version : 00.90.03
  Creation Time : Wed Mar 11 22:51:27 2009
     Raid Level : raid5
     Array Size : 10040000 (9.57 GiB 10.28 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 6
  Total Devices : 7
Preferred Minor : 4
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:27:16 2009
          State : clean
Active Devices : 6
Working Devices : 7
Failed Devices : 0
  Spare Devices : 1
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 333d111c:6f4268a8:cd33842a:5daf20b9
         Events : 0.6
    Number   Major   Minor   RaidDevice State
       0       8       17        0      active sync   /dev/sdb1
       1       8       18        1      active sync   /dev/sdb2
       2       8       33        2      active sync   /dev/sdc1
       3       8       34        3      active sync   /dev/sdc2
       4       8       49        4      active sync   /dev/sdd1
       5       8       50        5      active sync   /dev/sdd2
       6       8       69        -      spare         /dev/sde5
2.向raid1中添加热备:
[root@server4 ~]# mdadm -a /dev/md2 /dev/sde6
mdadm: added /dev/sde6
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 3
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:20:49 2009
          State : clean
Active Devices : 2
Working Devices : 3
Failed Devices : 0
  Spare Devices : 1
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.26
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       1       8       38        1      active sync   /dev/sdc6
       2       8       70        -      spare         /dev/sde6
以下内容是当某个磁盘出现故障的时候,热备盘会自动代替坏掉的磁盘:
[root@server4 ~]# mdadm --fail /dev/md2 /dev/sdc6
mdadm: set /dev/sdc6 faulty in /dev/md2
[root@server4 ~]# mdadm -D /dev/md2
/dev/md2:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:54:15 2009
     Raid Level : raid1
     Array Size : 2449792 (2.34 GiB 2.51 GB)
  Used Dev Size : 2449792 (2.34 GiB 2.51 GB)
   Raid Devices : 2
  Total Devices : 3
Preferred Minor : 2
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:29:25 2009
          State : clean, degraded, recovering
Active Devices : 1
Working Devices : 2
Failed Devices : 1
  Spare Devices : 1
Rebuild Status : 32% complete
           UUID : 293a50bd:c8630ee3:4e5c7694:6db62838
         Events : 0.28
    Number   Major   Minor   RaidDevice State
       0       8       22        0      active sync   /dev/sdb6
       2       8       70        1      spare rebuilding   /dev/sde6
       3       8       38        -      faulty spare   /dev/sdc6
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 4
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:23:16 2009
          State : clean
Active Devices : 3
Working Devices : 4
Failed Devices : 0
  Spare Devices : 1
         Layout : left-symmetric
     Chunk Size : 64K
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.46
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       2       8       51        2      active sync   /dev/sdd3
       3       8       67        -      spare   /dev/sde3
       3       8       67        -      spare   /dev/sde3
[root@server4 ~]# mdadm --fail /dev/md0 /dev/sdd3
mdadm: set /dev/sdd3 faulty in /dev/md0
[root@server4 ~]# cat /proc/mdstat
Personalities : [raid6] [raid5] [raid4] [raid1]
md4 : active raid5 sde5[6](S) sdb1[0] sdd2[5] sdd1[4] sdc2[3] sdc1[2] sdb2[1]
      10040000 blocks level 5, 64k chunk, algorithm 2 [6/6] [UUUUUU]
md1 : active raid1 sdc5[1] sdb5[0]
      2008000 blocks [2/2] [UU]
md2 : active raid1 sde6[1] sdc6[2](F) sdb6[0]
      2449792 blocks [2/2] [UU]
md3 : active raid1 sdd6[1] sdd5[0]
      2008000 blocks [2/2] [UU]
md0 : active raid5 sde3[3] sdd3[4](F) sdc3[1] sdb3[0]
      4016000 blocks level 5, 64k chunk, algorithm 2 [3/2] [UU_]
      [=&gt;...................]  recovery =  7.3% (149456/2008000) finish=1.2min speed=24909K/sec
unused devices: <none>
[root@server4 ~]# mdadm -D /dev/md0
/dev/md0:
        Version : 00.90.03
  Creation Time : Wed Mar 11 21:52:53 2009
     Raid Level : raid5
     Array Size : 4016000 (3.83 GiB 4.11 GB)
  Used Dev Size : 2008000 (1961.27 MiB 2056.19 MB)
   Raid Devices : 3
  Total Devices : 4
Preferred Minor : 0
    Persistence : Superblock is persistent
    Update Time : Thu Mar 12 19:32:20 2009
          State : clean, degraded, recovering
Active Devices : 2
Working Devices : 3
Failed Devices : 1
  Spare Devices : 1
         Layout : left-symmetric
     Chunk Size : 64K
Rebuild Status : 35% complete
           UUID : 9b96b4e8:53d431df:9e9c063d:285a9865
         Events : 0.48
    Number   Major   Minor   RaidDevice State
       0       8       19        0      active sync   /dev/sdb3
       1       8       35        1      active sync   /dev/sdc3
       3       8       67        2      spare rebuilding   /dev/sde3
       4       8       51        -      faulty spare   /dev/sdd3

相关文章:

  • 数据库恢复实例一:重建undo表空间
  • “安全”频道的几个问题
  • 远程桌面^^远程协助
  • CSS裸奔日
  • 基于 Json 的富客户端简易分页留言板设计,附源码
  • Flash组件开发研究(1):简单的新闻列表
  • ASP.NET中的一些小技巧
  • 写查询语句出现的问题.显示查不到的数据出错,帮忙看看
  • 深入剖析SAS技术
  • linux命令--grep
  • 鞋为什么会臭?14个方法巧除鞋臭脚臭!
  • Linux下history命令用法15例-zt
  • 我的架构经验小结(四)-- 实战中演化的三层架构
  • 世卫专家评估甲型H1N1病毒大流行风险
  • Ruby之旅—Ruby解释器的安装
  • php的引用
  • 《深入 React 技术栈》
  • 【刷算法】从上往下打印二叉树
  • CentOS7简单部署NFS
  • JavaScript设计模式之工厂模式
  • Koa2 之文件上传下载
  • Octave 入门
  • Spring技术内幕笔记(2):Spring MVC 与 Web
  • uva 10370 Above Average
  • Vue2.x学习三:事件处理生命周期钩子
  • - 概述 - 《设计模式(极简c++版)》
  • 后端_ThinkPHP5
  • 深度学习入门:10门免费线上课程推荐
  • 如何用纯 CSS 创作一个货车 loader
  • 正则表达式-基础知识Review
  • ​水经微图Web1.5.0版即将上线
  • # 执行时间 统计mysql_一文说尽 MySQL 优化原理
  • #define
  • #在 README.md 中生成项目目录结构
  • (1)安装hadoop之虚拟机准备(配置IP与主机名)
  • (delphi11最新学习资料) Object Pascal 学习笔记---第8章第2节(共同的基类)
  • (done) NLP “bag-of-words“ 方法 (带有二元分类和多元分类两个例子)词袋模型、BoW
  • (Matlab)使用竞争神经网络实现数据聚类
  • (NSDate) 时间 (time )比较
  • (Redis使用系列) SpirngBoot中关于Redis的值的各种方式的存储与取出 三
  • (搬运以学习)flask 上下文的实现
  • (免费领源码)python#django#mysql公交线路查询系统85021- 计算机毕业设计项目选题推荐
  • (一)插入排序
  • (一)使用IDEA创建Maven项目和Maven使用入门(配图详解)
  • (转)一些感悟
  • *++p:p先自+,然后*p,最终为3 ++*p:先*p,即arr[0]=1,然后再++,最终为2 *p++:值为arr[0],即1,该语句执行完毕后,p指向arr[1]
  • .net 程序发生了一个不可捕获的异常
  • .NET高级面试指南专题十一【 设计模式介绍,为什么要用设计模式】
  • .NET设计模式(11):组合模式(Composite Pattern)
  • @property python知乎_Python3基础之:property
  • [ vulhub漏洞复现篇 ] Jetty WEB-INF 文件读取复现CVE-2021-34429
  • [] 与 [[]], -gt 与 > 的比较
  • [100天算法】-不同路径 III(day 73)
  • [2544]最短路 (两种算法)(HDU)
  • [AutoSar NVM] 存储架构