1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25 * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * SPA: Storage Pool Allocator 30 * 31 * This file contains all the routines used when modifying on-disk SPA state. 32 * This includes opening, importing, destroying, exporting a pool, and syncing a 33 * pool. 34 */ 35 36 #include <sys/zfs_context.h> 37 #include <sys/fm/fs/zfs.h> 38 #include <sys/spa_impl.h> 39 #include <sys/zio.h> 40 #include <sys/zio_checksum.h> 41 #include <sys/dmu.h> 42 #include <sys/dmu_tx.h> 43 #include <sys/zap.h> 44 #include <sys/zil.h> 45 #include <sys/ddt.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/metaslab.h> 48 #include <sys/metaslab_impl.h> 49 #include <sys/uberblock_impl.h> 50 #include <sys/txg.h> 51 #include <sys/avl.h> 52 #include <sys/dmu_traverse.h> 53 #include <sys/dmu_objset.h> 54 #include <sys/unique.h> 55 #include <sys/dsl_pool.h> 56 #include <sys/dsl_dataset.h> 57 #include <sys/dsl_dir.h> 58 #include <sys/dsl_prop.h> 59 #include <sys/dsl_synctask.h> 60 #include <sys/fs/zfs.h> 61 #include <sys/arc.h> 62 #include <sys/callb.h> 63 #include <sys/systeminfo.h> 64 #include <sys/spa_boot.h> 65 #include <sys/zfs_ioctl.h> 66 #include <sys/dsl_scan.h> 67 #include <sys/zfeature.h> 68 #include <sys/dsl_destroy.h> 69 70 #ifdef _KERNEL 71 #include <sys/bootprops.h> 72 #include <sys/callb.h> 73 #include <sys/cpupart.h> 74 #include <sys/pool.h> 75 #include <sys/sysdc.h> 76 #include <sys/zone.h> 77 #endif /* _KERNEL */ 78 79 #include "zfs_prop.h" 80 #include "zfs_comutil.h" 81 82 /* 83 * The interval, in seconds, at which failed configuration cache file writes 84 * should be retried. 85 */ 86 static int zfs_ccw_retry_interval = 300; 87 88 typedef enum zti_modes { 89 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 90 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 91 ZTI_MODE_NULL, /* don't create a taskq */ 92 ZTI_NMODES 93 } zti_modes_t; 94 95 #define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 96 #define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 97 #define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 98 99 #define ZTI_N(n) ZTI_P(n, 1) 100 #define ZTI_ONE ZTI_N(1) 101 102 typedef struct zio_taskq_info { 103 zti_modes_t zti_mode; 104 uint_t zti_value; 105 uint_t zti_count; 106 } zio_taskq_info_t; 107 108 static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 109 "issue", "issue_high", "intr", "intr_high" 110 }; 111 112 /* 113 * This table defines the taskq settings for each ZFS I/O type. When 114 * initializing a pool, we use this table to create an appropriately sized 115 * taskq. Some operations are low volume and therefore have a small, static 116 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 117 * macros. Other operations process a large amount of data; the ZTI_BATCH 118 * macro causes us to create a taskq oriented for throughput. Some operations 119 * are so high frequency and short-lived that the taskq itself can become a a 120 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 121 * additional degree of parallelism specified by the number of threads per- 122 * taskq and the number of taskqs; when dispatching an event in this case, the 123 * particular taskq is chosen at random. 124 * 125 * The different taskq priorities are to handle the different contexts (issue 126 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 127 * need to be handled with minimum delay. 128 */ 129 const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 130 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 131 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 132 { ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */ 133 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 134 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 135 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 136 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 137 }; 138 139 static void spa_sync_version(void *arg, dmu_tx_t *tx); 140 static void spa_sync_props(void *arg, dmu_tx_t *tx); 141 static boolean_t spa_has_active_shared_spare(spa_t *spa); 142 static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 143 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 144 char **ereport); 145 static void spa_vdev_resilver_done(spa_t *spa); 146 147 uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 148 id_t zio_taskq_psrset_bind = PS_NONE; 149 boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 150 uint_t zio_taskq_basedc = 80; /* base duty cycle */ 151 152 boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 153 extern int zfs_sync_pass_deferred_free; 154 155 /* 156 * This (illegal) pool name is used when temporarily importing a spa_t in order 157 * to get the vdev stats associated with the imported devices. 158 */ 159 #define TRYIMPORT_NAME "$import" 160 161 /* 162 * ========================================================================== 163 * SPA properties routines 164 * ========================================================================== 165 */ 166 167 /* 168 * Add a (source=src, propname=propval) list to an nvlist. 169 */ 170 static void 171 spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 172 uint64_t intval, zprop_source_t src) 173 { 174 const char *propname = zpool_prop_to_name(prop); 175 nvlist_t *propval; 176 177 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 178 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 179 180 if (strval != NULL) 181 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 182 else 183 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 184 185 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 186 nvlist_free(propval); 187 } 188 189 /* 190 * Get property values from the spa configuration. 191 */ 192 static void 193 spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 194 { 195 vdev_t *rvd = spa->spa_root_vdev; 196 dsl_pool_t *pool = spa->spa_dsl_pool; 197 uint64_t size; 198 uint64_t alloc; 199 uint64_t space; 200 uint64_t cap, version; 201 zprop_source_t src = ZPROP_SRC_NONE; 202 spa_config_dirent_t *dp; 203 204 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 205 206 if (rvd != NULL) { 207 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 208 size = metaslab_class_get_space(spa_normal_class(spa)); 209 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 210 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 211 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 212 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 213 size - alloc, src); 214 215 space = 0; 216 for (int c = 0; c < rvd->vdev_children; c++) { 217 vdev_t *tvd = rvd->vdev_child[c]; 218 space += tvd->vdev_max_asize - tvd->vdev_asize; 219 } 220 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space, 221 src); 222 223 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 224 (spa_mode(spa) == FREAD), src); 225 226 cap = (size == 0) ? 0 : (alloc * 100 / size); 227 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 228 229 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 230 ddt_get_pool_dedup_ratio(spa), src); 231 232 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 233 rvd->vdev_state, src); 234 235 version = spa_version(spa); 236 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 237 src = ZPROP_SRC_DEFAULT; 238 else 239 src = ZPROP_SRC_LOCAL; 240 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 241 } 242 243 if (pool != NULL) { 244 /* 245 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 246 * when opening pools before this version freedir will be NULL. 247 */ 248 if (pool->dp_free_dir != NULL) { 249 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 250 pool->dp_free_dir->dd_phys->dd_used_bytes, src); 251 } else { 252 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 253 NULL, 0, src); 254 } 255 256 if (pool->dp_leak_dir != NULL) { 257 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 258 pool->dp_leak_dir->dd_phys->dd_used_bytes, src); 259 } else { 260 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 261 NULL, 0, src); 262 } 263 } 264 265 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 266 267 if (spa->spa_comment != NULL) { 268 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 269 0, ZPROP_SRC_LOCAL); 270 } 271 272 if (spa->spa_root != NULL) 273 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 274 0, ZPROP_SRC_LOCAL); 275 276 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 277 if (dp->scd_path == NULL) { 278 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 279 "none", 0, ZPROP_SRC_LOCAL); 280 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 281 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 282 dp->scd_path, 0, ZPROP_SRC_LOCAL); 283 } 284 } 285 } 286 287 /* 288 * Get zpool property values. 289 */ 290 int 291 spa_prop_get(spa_t *spa, nvlist_t **nvp) 292 { 293 objset_t *mos = spa->spa_meta_objset; 294 zap_cursor_t zc; 295 zap_attribute_t za; 296 int err; 297 298 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 299 300 mutex_enter(&spa->spa_props_lock); 301 302 /* 303 * Get properties from the spa config. 304 */ 305 spa_prop_get_config(spa, nvp); 306 307 /* If no pool property object, no more prop to get. */ 308 if (mos == NULL || spa->spa_pool_props_object == 0) { 309 mutex_exit(&spa->spa_props_lock); 310 return (0); 311 } 312 313 /* 314 * Get properties from the MOS pool property object. 315 */ 316 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 317 (err = zap_cursor_retrieve(&zc, &za)) == 0; 318 zap_cursor_advance(&zc)) { 319 uint64_t intval = 0; 320 char *strval = NULL; 321 zprop_source_t src = ZPROP_SRC_DEFAULT; 322 zpool_prop_t prop; 323 324 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 325 continue; 326 327 switch (za.za_integer_length) { 328 case 8: 329 /* integer property */ 330 if (za.za_first_integer != 331 zpool_prop_default_numeric(prop)) 332 src = ZPROP_SRC_LOCAL; 333 334 if (prop == ZPOOL_PROP_BOOTFS) { 335 dsl_pool_t *dp; 336 dsl_dataset_t *ds = NULL; 337 338 dp = spa_get_dsl(spa); 339 dsl_pool_config_enter(dp, FTAG); 340 if (err = dsl_dataset_hold_obj(dp, 341 za.za_first_integer, FTAG, &ds)) { 342 dsl_pool_config_exit(dp, FTAG); 343 break; 344 } 345 346 strval = kmem_alloc( 347 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 348 KM_SLEEP); 349 dsl_dataset_name(ds, strval); 350 dsl_dataset_rele(ds, FTAG); 351 dsl_pool_config_exit(dp, FTAG); 352 } else { 353 strval = NULL; 354 intval = za.za_first_integer; 355 } 356 357 spa_prop_add_list(*nvp, prop, strval, intval, src); 358 359 if (strval != NULL) 360 kmem_free(strval, 361 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 362 363 break; 364 365 case 1: 366 /* string property */ 367 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 368 err = zap_lookup(mos, spa->spa_pool_props_object, 369 za.za_name, 1, za.za_num_integers, strval); 370 if (err) { 371 kmem_free(strval, za.za_num_integers); 372 break; 373 } 374 spa_prop_add_list(*nvp, prop, strval, 0, src); 375 kmem_free(strval, za.za_num_integers); 376 break; 377 378 default: 379 break; 380 } 381 } 382 zap_cursor_fini(&zc); 383 mutex_exit(&spa->spa_props_lock); 384 out: 385 if (err && err != ENOENT) { 386 nvlist_free(*nvp); 387 *nvp = NULL; 388 return (err); 389 } 390 391 return (0); 392 } 393 394 /* 395 * Validate the given pool properties nvlist and modify the list 396 * for the property values to be set. 397 */ 398 static int 399 spa_prop_validate(spa_t *spa, nvlist_t *props) 400 { 401 nvpair_t *elem; 402 int error = 0, reset_bootfs = 0; 403 uint64_t objnum = 0; 404 boolean_t has_feature = B_FALSE; 405 406 elem = NULL; 407 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 408 uint64_t intval; 409 char *strval, *slash, *check, *fname; 410 const char *propname = nvpair_name(elem); 411 zpool_prop_t prop = zpool_name_to_prop(propname); 412 413 switch (prop) { 414 case ZPROP_INVAL: 415 if (!zpool_prop_feature(propname)) { 416 error = SET_ERROR(EINVAL); 417 break; 418 } 419 420 /* 421 * Sanitize the input. 422 */ 423 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 424 error = SET_ERROR(EINVAL); 425 break; 426 } 427 428 if (nvpair_value_uint64(elem, &intval) != 0) { 429 error = SET_ERROR(EINVAL); 430 break; 431 } 432 433 if (intval != 0) { 434 error = SET_ERROR(EINVAL); 435 break; 436 } 437 438 fname = strchr(propname, '@') + 1; 439 if (zfeature_lookup_name(fname, NULL) != 0) { 440 error = SET_ERROR(EINVAL); 441 break; 442 } 443 444 has_feature = B_TRUE; 445 break; 446 447 case ZPOOL_PROP_VERSION: 448 error = nvpair_value_uint64(elem, &intval); 449 if (!error && 450 (intval < spa_version(spa) || 451 intval > SPA_VERSION_BEFORE_FEATURES || 452 has_feature)) 453 error = SET_ERROR(EINVAL); 454 break; 455 456 case ZPOOL_PROP_DELEGATION: 457 case ZPOOL_PROP_AUTOREPLACE: 458 case ZPOOL_PROP_LISTSNAPS: 459 case ZPOOL_PROP_AUTOEXPAND: 460 error = nvpair_value_uint64(elem, &intval); 461 if (!error && intval > 1) 462 error = SET_ERROR(EINVAL); 463 break; 464 465 case ZPOOL_PROP_BOOTFS: 466 /* 467 * If the pool version is less than SPA_VERSION_BOOTFS, 468 * or the pool is still being created (version == 0), 469 * the bootfs property cannot be set. 470 */ 471 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 472 error = SET_ERROR(ENOTSUP); 473 break; 474 } 475 476 /* 477 * Make sure the vdev config is bootable 478 */ 479 if (!vdev_is_bootable(spa->spa_root_vdev)) { 480 error = SET_ERROR(ENOTSUP); 481 break; 482 } 483 484 reset_bootfs = 1; 485 486 error = nvpair_value_string(elem, &strval); 487 488 if (!error) { 489 objset_t *os; 490 uint64_t compress; 491 492 if (strval == NULL || strval[0] == '\0') { 493 objnum = zpool_prop_default_numeric( 494 ZPOOL_PROP_BOOTFS); 495 break; 496 } 497 498 if (error = dmu_objset_hold(strval, FTAG, &os)) 499 break; 500 501 /* Must be ZPL and not gzip compressed. */ 502 503 if (dmu_objset_type(os) != DMU_OST_ZFS) { 504 error = SET_ERROR(ENOTSUP); 505 } else if ((error = 506 dsl_prop_get_int_ds(dmu_objset_ds(os), 507 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 508 &compress)) == 0 && 509 !BOOTFS_COMPRESS_VALID(compress)) { 510 error = SET_ERROR(ENOTSUP); 511 } else { 512 objnum = dmu_objset_id(os); 513 } 514 dmu_objset_rele(os, FTAG); 515 } 516 break; 517 518 case ZPOOL_PROP_FAILUREMODE: 519 error = nvpair_value_uint64(elem, &intval); 520 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 521 intval > ZIO_FAILURE_MODE_PANIC)) 522 error = SET_ERROR(EINVAL); 523 524 /* 525 * This is a special case which only occurs when 526 * the pool has completely failed. This allows 527 * the user to change the in-core failmode property 528 * without syncing it out to disk (I/Os might 529 * currently be blocked). We do this by returning 530 * EIO to the caller (spa_prop_set) to trick it 531 * into thinking we encountered a property validation 532 * error. 533 */ 534 if (!error && spa_suspended(spa)) { 535 spa->spa_failmode = intval; 536 error = SET_ERROR(EIO); 537 } 538 break; 539 540 case ZPOOL_PROP_CACHEFILE: 541 if ((error = nvpair_value_string(elem, &strval)) != 0) 542 break; 543 544 if (strval[0] == '\0') 545 break; 546 547 if (strcmp(strval, "none") == 0) 548 break; 549 550 if (strval[0] != '/') { 551 error = SET_ERROR(EINVAL); 552 break; 553 } 554 555 slash = strrchr(strval, '/'); 556 ASSERT(slash != NULL); 557 558 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 559 strcmp(slash, "/..") == 0) 560 error = SET_ERROR(EINVAL); 561 break; 562 563 case ZPOOL_PROP_COMMENT: 564 if ((error = nvpair_value_string(elem, &strval)) != 0) 565 break; 566 for (check = strval; *check != '\0'; check++) { 567 /* 568 * The kernel doesn't have an easy isprint() 569 * check. For this kernel check, we merely 570 * check ASCII apart from DEL. Fix this if 571 * there is an easy-to-use kernel isprint(). 572 */ 573 if (*check >= 0x7f) { 574 error = SET_ERROR(EINVAL); 575 break; 576 } 577 check++; 578 } 579 if (strlen(strval) > ZPROP_MAX_COMMENT) 580 error = E2BIG; 581 break; 582 583 case ZPOOL_PROP_DEDUPDITTO: 584 if (spa_version(spa) < SPA_VERSION_DEDUP) 585 error = SET_ERROR(ENOTSUP); 586 else 587 error = nvpair_value_uint64(elem, &intval); 588 if (error == 0 && 589 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 590 error = SET_ERROR(EINVAL); 591 break; 592 } 593 594 if (error) 595 break; 596 } 597 598 if (!error && reset_bootfs) { 599 error = nvlist_remove(props, 600 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 601 602 if (!error) { 603 error = nvlist_add_uint64(props, 604 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 605 } 606 } 607 608 return (error); 609 } 610 611 void 612 spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 613 { 614 char *cachefile; 615 spa_config_dirent_t *dp; 616 617 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 618 &cachefile) != 0) 619 return; 620 621 dp = kmem_alloc(sizeof (spa_config_dirent_t), 622 KM_SLEEP); 623 624 if (cachefile[0] == '\0') 625 dp->scd_path = spa_strdup(spa_config_path); 626 else if (strcmp(cachefile, "none") == 0) 627 dp->scd_path = NULL; 628 else 629 dp->scd_path = spa_strdup(cachefile); 630 631 list_insert_head(&spa->spa_config_list, dp); 632 if (need_sync) 633 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 634 } 635 636 int 637 spa_prop_set(spa_t *spa, nvlist_t *nvp) 638 { 639 int error; 640 nvpair_t *elem = NULL; 641 boolean_t need_sync = B_FALSE; 642 643 if ((error = spa_prop_validate(spa, nvp)) != 0) 644 return (error); 645 646 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 647 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 648 649 if (prop == ZPOOL_PROP_CACHEFILE || 650 prop == ZPOOL_PROP_ALTROOT || 651 prop == ZPOOL_PROP_READONLY) 652 continue; 653 654 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 655 uint64_t ver; 656 657 if (prop == ZPOOL_PROP_VERSION) { 658 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 659 } else { 660 ASSERT(zpool_prop_feature(nvpair_name(elem))); 661 ver = SPA_VERSION_FEATURES; 662 need_sync = B_TRUE; 663 } 664 665 /* Save time if the version is already set. */ 666 if (ver == spa_version(spa)) 667 continue; 668 669 /* 670 * In addition to the pool directory object, we might 671 * create the pool properties object, the features for 672 * read object, the features for write object, or the 673 * feature descriptions object. 674 */ 675 error = dsl_sync_task(spa->spa_name, NULL, 676 spa_sync_version, &ver, 677 6, ZFS_SPACE_CHECK_RESERVED); 678 if (error) 679 return (error); 680 continue; 681 } 682 683 need_sync = B_TRUE; 684 break; 685 } 686 687 if (need_sync) { 688 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 689 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 690 } 691 692 return (0); 693 } 694 695 /* 696 * If the bootfs property value is dsobj, clear it. 697 */ 698 void 699 spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 700 { 701 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 702 VERIFY(zap_remove(spa->spa_meta_objset, 703 spa->spa_pool_props_object, 704 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 705 spa->spa_bootfs = 0; 706 } 707 } 708 709 /*ARGSUSED*/ 710 static int 711 spa_change_guid_check(void *arg, dmu_tx_t *tx) 712 { 713 uint64_t *newguid = arg; 714 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 715 vdev_t *rvd = spa->spa_root_vdev; 716 uint64_t vdev_state; 717 718 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 719 vdev_state = rvd->vdev_state; 720 spa_config_exit(spa, SCL_STATE, FTAG); 721 722 if (vdev_state != VDEV_STATE_HEALTHY) 723 return (SET_ERROR(ENXIO)); 724 725 ASSERT3U(spa_guid(spa), !=, *newguid); 726 727 return (0); 728 } 729 730 static void 731 spa_change_guid_sync(void *arg, dmu_tx_t *tx) 732 { 733 uint64_t *newguid = arg; 734 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 735 uint64_t oldguid; 736 vdev_t *rvd = spa->spa_root_vdev; 737 738 oldguid = spa_guid(spa); 739 740 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 741 rvd->vdev_guid = *newguid; 742 rvd->vdev_guid_sum += (*newguid - oldguid); 743 vdev_config_dirty(rvd); 744 spa_config_exit(spa, SCL_STATE, FTAG); 745 746 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 747 oldguid, *newguid); 748 } 749 750 /* 751 * Change the GUID for the pool. This is done so that we can later 752 * re-import a pool built from a clone of our own vdevs. We will modify 753 * the root vdev's guid, our own pool guid, and then mark all of our 754 * vdevs dirty. Note that we must make sure that all our vdevs are 755 * online when we do this, or else any vdevs that weren't present 756 * would be orphaned from our pool. We are also going to issue a 757 * sysevent to update any watchers. 758 */ 759 int 760 spa_change_guid(spa_t *spa) 761 { 762 int error; 763 uint64_t guid; 764 765 mutex_enter(&spa->spa_vdev_top_lock); 766 mutex_enter(&spa_namespace_lock); 767 guid = spa_generate_guid(NULL); 768 769 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 770 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 771 772 if (error == 0) { 773 spa_config_sync(spa, B_FALSE, B_TRUE); 774 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 775 } 776 777 mutex_exit(&spa_namespace_lock); 778 mutex_exit(&spa->spa_vdev_top_lock); 779 780 return (error); 781 } 782 783 /* 784 * ========================================================================== 785 * SPA state manipulation (open/create/destroy/import/export) 786 * ========================================================================== 787 */ 788 789 static int 790 spa_error_entry_compare(const void *a, const void *b) 791 { 792 spa_error_entry_t *sa = (spa_error_entry_t *)a; 793 spa_error_entry_t *sb = (spa_error_entry_t *)b; 794 int ret; 795 796 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 797 sizeof (zbookmark_phys_t)); 798 799 if (ret < 0) 800 return (-1); 801 else if (ret > 0) 802 return (1); 803 else 804 return (0); 805 } 806 807 /* 808 * Utility function which retrieves copies of the current logs and 809 * re-initializes them in the process. 810 */ 811 void 812 spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 813 { 814 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 815 816 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 817 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 818 819 avl_create(&spa->spa_errlist_scrub, 820 spa_error_entry_compare, sizeof (spa_error_entry_t), 821 offsetof(spa_error_entry_t, se_avl)); 822 avl_create(&spa->spa_errlist_last, 823 spa_error_entry_compare, sizeof (spa_error_entry_t), 824 offsetof(spa_error_entry_t, se_avl)); 825 } 826 827 static void 828 spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 829 { 830 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 831 enum zti_modes mode = ztip->zti_mode; 832 uint_t value = ztip->zti_value; 833 uint_t count = ztip->zti_count; 834 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 835 char name[32]; 836 uint_t flags = 0; 837 boolean_t batch = B_FALSE; 838 839 if (mode == ZTI_MODE_NULL) { 840 tqs->stqs_count = 0; 841 tqs->stqs_taskq = NULL; 842 return; 843 } 844 845 ASSERT3U(count, >, 0); 846 847 tqs->stqs_count = count; 848 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 849 850 switch (mode) { 851 case ZTI_MODE_FIXED: 852 ASSERT3U(value, >=, 1); 853 value = MAX(value, 1); 854 break; 855 856 case ZTI_MODE_BATCH: 857 batch = B_TRUE; 858 flags |= TASKQ_THREADS_CPU_PCT; 859 value = zio_taskq_batch_pct; 860 break; 861 862 default: 863 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 864 "spa_activate()", 865 zio_type_name[t], zio_taskq_types[q], mode, value); 866 break; 867 } 868 869 for (uint_t i = 0; i < count; i++) { 870 taskq_t *tq; 871 872 if (count > 1) { 873 (void) snprintf(name, sizeof (name), "%s_%s_%u", 874 zio_type_name[t], zio_taskq_types[q], i); 875 } else { 876 (void) snprintf(name, sizeof (name), "%s_%s", 877 zio_type_name[t], zio_taskq_types[q]); 878 } 879 880 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 881 if (batch) 882 flags |= TASKQ_DC_BATCH; 883 884 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 885 spa->spa_proc, zio_taskq_basedc, flags); 886 } else { 887 pri_t pri = maxclsyspri; 888 /* 889 * The write issue taskq can be extremely CPU 890 * intensive. Run it at slightly lower priority 891 * than the other taskqs. 892 */ 893 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 894 pri--; 895 896 tq = taskq_create_proc(name, value, pri, 50, 897 INT_MAX, spa->spa_proc, flags); 898 } 899 900 tqs->stqs_taskq[i] = tq; 901 } 902 } 903 904 static void 905 spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 906 { 907 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 908 909 if (tqs->stqs_taskq == NULL) { 910 ASSERT0(tqs->stqs_count); 911 return; 912 } 913 914 for (uint_t i = 0; i < tqs->stqs_count; i++) { 915 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 916 taskq_destroy(tqs->stqs_taskq[i]); 917 } 918 919 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 920 tqs->stqs_taskq = NULL; 921 } 922 923 /* 924 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 925 * Note that a type may have multiple discrete taskqs to avoid lock contention 926 * on the taskq itself. In that case we choose which taskq at random by using 927 * the low bits of gethrtime(). 928 */ 929 void 930 spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 931 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 932 { 933 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 934 taskq_t *tq; 935 936 ASSERT3P(tqs->stqs_taskq, !=, NULL); 937 ASSERT3U(tqs->stqs_count, !=, 0); 938 939 if (tqs->stqs_count == 1) { 940 tq = tqs->stqs_taskq[0]; 941 } else { 942 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 943 } 944 945 taskq_dispatch_ent(tq, func, arg, flags, ent); 946 } 947 948 static void 949 spa_create_zio_taskqs(spa_t *spa) 950 { 951 for (int t = 0; t < ZIO_TYPES; t++) { 952 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 953 spa_taskqs_init(spa, t, q); 954 } 955 } 956 } 957 958 #ifdef _KERNEL 959 static void 960 spa_thread(void *arg) 961 { 962 callb_cpr_t cprinfo; 963 964 spa_t *spa = arg; 965 user_t *pu = PTOU(curproc); 966 967 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 968 spa->spa_name); 969 970 ASSERT(curproc != &p0); 971 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 972 "zpool-%s", spa->spa_name); 973 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 974 975 /* bind this thread to the requested psrset */ 976 if (zio_taskq_psrset_bind != PS_NONE) { 977 pool_lock(); 978 mutex_enter(&cpu_lock); 979 mutex_enter(&pidlock); 980 mutex_enter(&curproc->p_lock); 981 982 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 983 0, NULL, NULL) == 0) { 984 curthread->t_bind_pset = zio_taskq_psrset_bind; 985 } else { 986 cmn_err(CE_WARN, 987 "Couldn't bind process for zfs pool \"%s\" to " 988 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 989 } 990 991 mutex_exit(&curproc->p_lock); 992 mutex_exit(&pidlock); 993 mutex_exit(&cpu_lock); 994 pool_unlock(); 995 } 996 997 if (zio_taskq_sysdc) { 998 sysdc_thread_enter(curthread, 100, 0); 999 } 1000 1001 spa->spa_proc = curproc; 1002 spa->spa_did = curthread->t_did; 1003 1004 spa_create_zio_taskqs(spa); 1005 1006 mutex_enter(&spa->spa_proc_lock); 1007 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1008 1009 spa->spa_proc_state = SPA_PROC_ACTIVE; 1010 cv_broadcast(&spa->spa_proc_cv); 1011 1012 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1013 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1014 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1015 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1016 1017 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1018 spa->spa_proc_state = SPA_PROC_GONE; 1019 spa->spa_proc = &p0; 1020 cv_broadcast(&spa->spa_proc_cv); 1021 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1022 1023 mutex_enter(&curproc->p_lock); 1024 lwp_exit(); 1025 } 1026 #endif 1027 1028 /* 1029 * Activate an uninitialized pool. 1030 */ 1031 static void 1032 spa_activate(spa_t *spa, int mode) 1033 { 1034 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1035 1036 spa->spa_state = POOL_STATE_ACTIVE; 1037 spa->spa_mode = mode; 1038 1039 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1040 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1041 1042 /* Try to create a covering process */ 1043 mutex_enter(&spa->spa_proc_lock); 1044 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1045 ASSERT(spa->spa_proc == &p0); 1046 spa->spa_did = 0; 1047 1048 /* Only create a process if we're going to be around a while. */ 1049 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1050 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1051 NULL, 0) == 0) { 1052 spa->spa_proc_state = SPA_PROC_CREATED; 1053 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1054 cv_wait(&spa->spa_proc_cv, 1055 &spa->spa_proc_lock); 1056 } 1057 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1058 ASSERT(spa->spa_proc != &p0); 1059 ASSERT(spa->spa_did != 0); 1060 } else { 1061 #ifdef _KERNEL 1062 cmn_err(CE_WARN, 1063 "Couldn't create process for zfs pool \"%s\"\n", 1064 spa->spa_name); 1065 #endif 1066 } 1067 } 1068 mutex_exit(&spa->spa_proc_lock); 1069 1070 /* If we didn't create a process, we need to create our taskqs. */ 1071 if (spa->spa_proc == &p0) { 1072 spa_create_zio_taskqs(spa); 1073 } 1074 1075 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1076 offsetof(vdev_t, vdev_config_dirty_node)); 1077 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1078 offsetof(vdev_t, vdev_state_dirty_node)); 1079 1080 txg_list_create(&spa->spa_vdev_txg_list, 1081 offsetof(struct vdev, vdev_txg_node)); 1082 1083 avl_create(&spa->spa_errlist_scrub, 1084 spa_error_entry_compare, sizeof (spa_error_entry_t), 1085 offsetof(spa_error_entry_t, se_avl)); 1086 avl_create(&spa->spa_errlist_last, 1087 spa_error_entry_compare, sizeof (spa_error_entry_t), 1088 offsetof(spa_error_entry_t, se_avl)); 1089 } 1090 1091 /* 1092 * Opposite of spa_activate(). 1093 */ 1094 static void 1095 spa_deactivate(spa_t *spa) 1096 { 1097 ASSERT(spa->spa_sync_on == B_FALSE); 1098 ASSERT(spa->spa_dsl_pool == NULL); 1099 ASSERT(spa->spa_root_vdev == NULL); 1100 ASSERT(spa->spa_async_zio_root == NULL); 1101 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1102 1103 txg_list_destroy(&spa->spa_vdev_txg_list); 1104 1105 list_destroy(&spa->spa_config_dirty_list); 1106 list_destroy(&spa->spa_state_dirty_list); 1107 1108 for (int t = 0; t < ZIO_TYPES; t++) { 1109 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1110 spa_taskqs_fini(spa, t, q); 1111 } 1112 } 1113 1114 metaslab_class_destroy(spa->spa_normal_class); 1115 spa->spa_normal_class = NULL; 1116 1117 metaslab_class_destroy(spa->spa_log_class); 1118 spa->spa_log_class = NULL; 1119 1120 /* 1121 * If this was part of an import or the open otherwise failed, we may 1122 * still have errors left in the queues. Empty them just in case. 1123 */ 1124 spa_errlog_drain(spa); 1125 1126 avl_destroy(&spa->spa_errlist_scrub); 1127 avl_destroy(&spa->spa_errlist_last); 1128 1129 spa->spa_state = POOL_STATE_UNINITIALIZED; 1130 1131 mutex_enter(&spa->spa_proc_lock); 1132 if (spa->spa_proc_state != SPA_PROC_NONE) { 1133 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1134 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1135 cv_broadcast(&spa->spa_proc_cv); 1136 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1137 ASSERT(spa->spa_proc != &p0); 1138 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1139 } 1140 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1141 spa->spa_proc_state = SPA_PROC_NONE; 1142 } 1143 ASSERT(spa->spa_proc == &p0); 1144 mutex_exit(&spa->spa_proc_lock); 1145 1146 /* 1147 * We want to make sure spa_thread() has actually exited the ZFS 1148 * module, so that the module can't be unloaded out from underneath 1149 * it. 1150 */ 1151 if (spa->spa_did != 0) { 1152 thread_join(spa->spa_did); 1153 spa->spa_did = 0; 1154 } 1155 } 1156 1157 /* 1158 * Verify a pool configuration, and construct the vdev tree appropriately. This 1159 * will create all the necessary vdevs in the appropriate layout, with each vdev 1160 * in the CLOSED state. This will prep the pool before open/creation/import. 1161 * All vdev validation is done by the vdev_alloc() routine. 1162 */ 1163 static int 1164 spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1165 uint_t id, int atype) 1166 { 1167 nvlist_t **child; 1168 uint_t children; 1169 int error; 1170 1171 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1172 return (error); 1173 1174 if ((*vdp)->vdev_ops->vdev_op_leaf) 1175 return (0); 1176 1177 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1178 &child, &children); 1179 1180 if (error == ENOENT) 1181 return (0); 1182 1183 if (error) { 1184 vdev_free(*vdp); 1185 *vdp = NULL; 1186 return (SET_ERROR(EINVAL)); 1187 } 1188 1189 for (int c = 0; c < children; c++) { 1190 vdev_t *vd; 1191 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1192 atype)) != 0) { 1193 vdev_free(*vdp); 1194 *vdp = NULL; 1195 return (error); 1196 } 1197 } 1198 1199 ASSERT(*vdp != NULL); 1200 1201 return (0); 1202 } 1203 1204 /* 1205 * Opposite of spa_load(). 1206 */ 1207 static void 1208 spa_unload(spa_t *spa) 1209 { 1210 int i; 1211 1212 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1213 1214 /* 1215 * Stop async tasks. 1216 */ 1217 spa_async_suspend(spa); 1218 1219 /* 1220 * Stop syncing. 1221 */ 1222 if (spa->spa_sync_on) { 1223 txg_sync_stop(spa->spa_dsl_pool); 1224 spa->spa_sync_on = B_FALSE; 1225 } 1226 1227 /* 1228 * Wait for any outstanding async I/O to complete. 1229 */ 1230 if (spa->spa_async_zio_root != NULL) { 1231 (void) zio_wait(spa->spa_async_zio_root); 1232 spa->spa_async_zio_root = NULL; 1233 } 1234 1235 bpobj_close(&spa->spa_deferred_bpobj); 1236 1237 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1238 1239 /* 1240 * Close all vdevs. 1241 */ 1242 if (spa->spa_root_vdev) 1243 vdev_free(spa->spa_root_vdev); 1244 ASSERT(spa->spa_root_vdev == NULL); 1245 1246 /* 1247 * Close the dsl pool. 1248 */ 1249 if (spa->spa_dsl_pool) { 1250 dsl_pool_close(spa->spa_dsl_pool); 1251 spa->spa_dsl_pool = NULL; 1252 spa->spa_meta_objset = NULL; 1253 } 1254 1255 ddt_unload(spa); 1256 1257 1258 /* 1259 * Drop and purge level 2 cache 1260 */ 1261 spa_l2cache_drop(spa); 1262 1263 for (i = 0; i < spa->spa_spares.sav_count; i++) 1264 vdev_free(spa->spa_spares.sav_vdevs[i]); 1265 if (spa->spa_spares.sav_vdevs) { 1266 kmem_free(spa->spa_spares.sav_vdevs, 1267 spa->spa_spares.sav_count * sizeof (void *)); 1268 spa->spa_spares.sav_vdevs = NULL; 1269 } 1270 if (spa->spa_spares.sav_config) { 1271 nvlist_free(spa->spa_spares.sav_config); 1272 spa->spa_spares.sav_config = NULL; 1273 } 1274 spa->spa_spares.sav_count = 0; 1275 1276 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1277 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1278 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1279 } 1280 if (spa->spa_l2cache.sav_vdevs) { 1281 kmem_free(spa->spa_l2cache.sav_vdevs, 1282 spa->spa_l2cache.sav_count * sizeof (void *)); 1283 spa->spa_l2cache.sav_vdevs = NULL; 1284 } 1285 if (spa->spa_l2cache.sav_config) { 1286 nvlist_free(spa->spa_l2cache.sav_config); 1287 spa->spa_l2cache.sav_config = NULL; 1288 } 1289 spa->spa_l2cache.sav_count = 0; 1290 1291 spa->spa_async_suspended = 0; 1292 1293 if (spa->spa_comment != NULL) { 1294 spa_strfree(spa->spa_comment); 1295 spa->spa_comment = NULL; 1296 } 1297 1298 spa_config_exit(spa, SCL_ALL, FTAG); 1299 } 1300 1301 /* 1302 * Load (or re-load) the current list of vdevs describing the active spares for 1303 * this pool. When this is called, we have some form of basic information in 1304 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1305 * then re-generate a more complete list including status information. 1306 */ 1307 static void 1308 spa_load_spares(spa_t *spa) 1309 { 1310 nvlist_t **spares; 1311 uint_t nspares; 1312 int i; 1313 vdev_t *vd, *tvd; 1314 1315 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1316 1317 /* 1318 * First, close and free any existing spare vdevs. 1319 */ 1320 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1321 vd = spa->spa_spares.sav_vdevs[i]; 1322 1323 /* Undo the call to spa_activate() below */ 1324 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1325 B_FALSE)) != NULL && tvd->vdev_isspare) 1326 spa_spare_remove(tvd); 1327 vdev_close(vd); 1328 vdev_free(vd); 1329 } 1330 1331 if (spa->spa_spares.sav_vdevs) 1332 kmem_free(spa->spa_spares.sav_vdevs, 1333 spa->spa_spares.sav_count * sizeof (void *)); 1334 1335 if (spa->spa_spares.sav_config == NULL) 1336 nspares = 0; 1337 else 1338 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1339 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1340 1341 spa->spa_spares.sav_count = (int)nspares; 1342 spa->spa_spares.sav_vdevs = NULL; 1343 1344 if (nspares == 0) 1345 return; 1346 1347 /* 1348 * Construct the array of vdevs, opening them to get status in the 1349 * process. For each spare, there is potentially two different vdev_t 1350 * structures associated with it: one in the list of spares (used only 1351 * for basic validation purposes) and one in the active vdev 1352 * configuration (if it's spared in). During this phase we open and 1353 * validate each vdev on the spare list. If the vdev also exists in the 1354 * active configuration, then we also mark this vdev as an active spare. 1355 */ 1356 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1357 KM_SLEEP); 1358 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1359 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1360 VDEV_ALLOC_SPARE) == 0); 1361 ASSERT(vd != NULL); 1362 1363 spa->spa_spares.sav_vdevs[i] = vd; 1364 1365 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1366 B_FALSE)) != NULL) { 1367 if (!tvd->vdev_isspare) 1368 spa_spare_add(tvd); 1369 1370 /* 1371 * We only mark the spare active if we were successfully 1372 * able to load the vdev. Otherwise, importing a pool 1373 * with a bad active spare would result in strange 1374 * behavior, because multiple pool would think the spare 1375 * is actively in use. 1376 * 1377 * There is a vulnerability here to an equally bizarre 1378 * circumstance, where a dead active spare is later 1379 * brought back to life (onlined or otherwise). Given 1380 * the rarity of this scenario, and the extra complexity 1381 * it adds, we ignore the possibility. 1382 */ 1383 if (!vdev_is_dead(tvd)) 1384 spa_spare_activate(tvd); 1385 } 1386 1387 vd->vdev_top = vd; 1388 vd->vdev_aux = &spa->spa_spares; 1389 1390 if (vdev_open(vd) != 0) 1391 continue; 1392 1393 if (vdev_validate_aux(vd) == 0) 1394 spa_spare_add(vd); 1395 } 1396 1397 /* 1398 * Recompute the stashed list of spares, with status information 1399 * this time. 1400 */ 1401 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1402 DATA_TYPE_NVLIST_ARRAY) == 0); 1403 1404 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1405 KM_SLEEP); 1406 for (i = 0; i < spa->spa_spares.sav_count; i++) 1407 spares[i] = vdev_config_generate(spa, 1408 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1409 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1410 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1411 for (i = 0; i < spa->spa_spares.sav_count; i++) 1412 nvlist_free(spares[i]); 1413 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1414 } 1415 1416 /* 1417 * Load (or re-load) the current list of vdevs describing the active l2cache for 1418 * this pool. When this is called, we have some form of basic information in 1419 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1420 * then re-generate a more complete list including status information. 1421 * Devices which are already active have their details maintained, and are 1422 * not re-opened. 1423 */ 1424 static void 1425 spa_load_l2cache(spa_t *spa) 1426 { 1427 nvlist_t **l2cache; 1428 uint_t nl2cache; 1429 int i, j, oldnvdevs; 1430 uint64_t guid; 1431 vdev_t *vd, **oldvdevs, **newvdevs; 1432 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1433 1434 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1435 1436 if (sav->sav_config != NULL) { 1437 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1438 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1439 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1440 } else { 1441 nl2cache = 0; 1442 newvdevs = NULL; 1443 } 1444 1445 oldvdevs = sav->sav_vdevs; 1446 oldnvdevs = sav->sav_count; 1447 sav->sav_vdevs = NULL; 1448 sav->sav_count = 0; 1449 1450 /* 1451 * Process new nvlist of vdevs. 1452 */ 1453 for (i = 0; i < nl2cache; i++) { 1454 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1455 &guid) == 0); 1456 1457 newvdevs[i] = NULL; 1458 for (j = 0; j < oldnvdevs; j++) { 1459 vd = oldvdevs[j]; 1460 if (vd != NULL && guid == vd->vdev_guid) { 1461 /* 1462 * Retain previous vdev for add/remove ops. 1463 */ 1464 newvdevs[i] = vd; 1465 oldvdevs[j] = NULL; 1466 break; 1467 } 1468 } 1469 1470 if (newvdevs[i] == NULL) { 1471 /* 1472 * Create new vdev 1473 */ 1474 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1475 VDEV_ALLOC_L2CACHE) == 0); 1476 ASSERT(vd != NULL); 1477 newvdevs[i] = vd; 1478 1479 /* 1480 * Commit this vdev as an l2cache device, 1481 * even if it fails to open. 1482 */ 1483 spa_l2cache_add(vd); 1484 1485 vd->vdev_top = vd; 1486 vd->vdev_aux = sav; 1487 1488 spa_l2cache_activate(vd); 1489 1490 if (vdev_open(vd) != 0) 1491 continue; 1492 1493 (void) vdev_validate_aux(vd); 1494 1495 if (!vdev_is_dead(vd)) 1496 l2arc_add_vdev(spa, vd); 1497 } 1498 } 1499 1500 /* 1501 * Purge vdevs that were dropped 1502 */ 1503 for (i = 0; i < oldnvdevs; i++) { 1504 uint64_t pool; 1505 1506 vd = oldvdevs[i]; 1507 if (vd != NULL) { 1508 ASSERT(vd->vdev_isl2cache); 1509 1510 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1511 pool != 0ULL && l2arc_vdev_present(vd)) 1512 l2arc_remove_vdev(vd); 1513 vdev_clear_stats(vd); 1514 vdev_free(vd); 1515 } 1516 } 1517 1518 if (oldvdevs) 1519 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1520 1521 if (sav->sav_config == NULL) 1522 goto out; 1523 1524 sav->sav_vdevs = newvdevs; 1525 sav->sav_count = (int)nl2cache; 1526 1527 /* 1528 * Recompute the stashed list of l2cache devices, with status 1529 * information this time. 1530 */ 1531 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1532 DATA_TYPE_NVLIST_ARRAY) == 0); 1533 1534 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1535 for (i = 0; i < sav->sav_count; i++) 1536 l2cache[i] = vdev_config_generate(spa, 1537 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1538 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1539 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1540 out: 1541 for (i = 0; i < sav->sav_count; i++) 1542 nvlist_free(l2cache[i]); 1543 if (sav->sav_count) 1544 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1545 } 1546 1547 static int 1548 load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1549 { 1550 dmu_buf_t *db; 1551 char *packed = NULL; 1552 size_t nvsize = 0; 1553 int error; 1554 *value = NULL; 1555 1556 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 1557 nvsize = *(uint64_t *)db->db_data; 1558 dmu_buf_rele(db, FTAG); 1559 1560 packed = kmem_alloc(nvsize, KM_SLEEP); 1561 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1562 DMU_READ_PREFETCH); 1563 if (error == 0) 1564 error = nvlist_unpack(packed, nvsize, value, 0); 1565 kmem_free(packed, nvsize); 1566 1567 return (error); 1568 } 1569 1570 /* 1571 * Checks to see if the given vdev could not be opened, in which case we post a 1572 * sysevent to notify the autoreplace code that the device has been removed. 1573 */ 1574 static void 1575 spa_check_removed(vdev_t *vd) 1576 { 1577 for (int c = 0; c < vd->vdev_children; c++) 1578 spa_check_removed(vd->vdev_child[c]); 1579 1580 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1581 !vd->vdev_ishole) { 1582 zfs_post_autoreplace(vd->vdev_spa, vd); 1583 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1584 } 1585 } 1586 1587 /* 1588 * Validate the current config against the MOS config 1589 */ 1590 static boolean_t 1591 spa_config_valid(spa_t *spa, nvlist_t *config) 1592 { 1593 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1594 nvlist_t *nv; 1595 1596 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1597 1598 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1599 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1600 1601 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1602 1603 /* 1604 * If we're doing a normal import, then build up any additional 1605 * diagnostic information about missing devices in this config. 1606 * We'll pass this up to the user for further processing. 1607 */ 1608 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1609 nvlist_t **child, *nv; 1610 uint64_t idx = 0; 1611 1612 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1613 KM_SLEEP); 1614 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1615 1616 for (int c = 0; c < rvd->vdev_children; c++) { 1617 vdev_t *tvd = rvd->vdev_child[c]; 1618 vdev_t *mtvd = mrvd->vdev_child[c]; 1619 1620 if (tvd->vdev_ops == &vdev_missing_ops && 1621 mtvd->vdev_ops != &vdev_missing_ops && 1622 mtvd->vdev_islog) 1623 child[idx++] = vdev_config_generate(spa, mtvd, 1624 B_FALSE, 0); 1625 } 1626 1627 if (idx) { 1628 VERIFY(nvlist_add_nvlist_array(nv, 1629 ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1630 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1631 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1632 1633 for (int i = 0; i < idx; i++) 1634 nvlist_free(child[i]); 1635 } 1636 nvlist_free(nv); 1637 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1638 } 1639 1640 /* 1641 * Compare the root vdev tree with the information we have 1642 * from the MOS config (mrvd). Check each top-level vdev 1643 * with the corresponding MOS config top-level (mtvd). 1644 */ 1645 for (int c = 0; c < rvd->vdev_children; c++) { 1646 vdev_t *tvd = rvd->vdev_child[c]; 1647 vdev_t *mtvd = mrvd->vdev_child[c]; 1648 1649 /* 1650 * Resolve any "missing" vdevs in the current configuration. 1651 * If we find that the MOS config has more accurate information 1652 * about the top-level vdev then use that vdev instead. 1653 */ 1654 if (tvd->vdev_ops == &vdev_missing_ops && 1655 mtvd->vdev_ops != &vdev_missing_ops) { 1656 1657 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1658 continue; 1659 1660 /* 1661 * Device specific actions. 1662 */ 1663 if (mtvd->vdev_islog) { 1664 spa_set_log_state(spa, SPA_LOG_CLEAR); 1665 } else { 1666 /* 1667 * XXX - once we have 'readonly' pool 1668 * support we should be able to handle 1669 * missing data devices by transitioning 1670 * the pool to readonly. 1671 */ 1672 continue; 1673 } 1674 1675 /* 1676 * Swap the missing vdev with the data we were 1677 * able to obtain from the MOS config. 1678 */ 1679 vdev_remove_child(rvd, tvd); 1680 vdev_remove_child(mrvd, mtvd); 1681 1682 vdev_add_child(rvd, mtvd); 1683 vdev_add_child(mrvd, tvd); 1684 1685 spa_config_exit(spa, SCL_ALL, FTAG); 1686 vdev_load(mtvd); 1687 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1688 1689 vdev_reopen(rvd); 1690 } else if (mtvd->vdev_islog) { 1691 /* 1692 * Load the slog device's state from the MOS config 1693 * since it's possible that the label does not 1694 * contain the most up-to-date information. 1695 */ 1696 vdev_load_log_state(tvd, mtvd); 1697 vdev_reopen(tvd); 1698 } 1699 } 1700 vdev_free(mrvd); 1701 spa_config_exit(spa, SCL_ALL, FTAG); 1702 1703 /* 1704 * Ensure we were able to validate the config. 1705 */ 1706 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1707 } 1708 1709 /* 1710 * Check for missing log devices 1711 */ 1712 static boolean_t 1713 spa_check_logs(spa_t *spa) 1714 { 1715 boolean_t rv = B_FALSE; 1716 1717 switch (spa->spa_log_state) { 1718 case SPA_LOG_MISSING: 1719 /* need to recheck in case slog has been restored */ 1720 case SPA_LOG_UNKNOWN: 1721 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain, 1722 NULL, DS_FIND_CHILDREN) != 0); 1723 if (rv) 1724 spa_set_log_state(spa, SPA_LOG_MISSING); 1725 break; 1726 } 1727 return (rv); 1728 } 1729 1730 static boolean_t 1731 spa_passivate_log(spa_t *spa) 1732 { 1733 vdev_t *rvd = spa->spa_root_vdev; 1734 boolean_t slog_found = B_FALSE; 1735 1736 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1737 1738 if (!spa_has_slogs(spa)) 1739 return (B_FALSE); 1740 1741 for (int c = 0; c < rvd->vdev_children; c++) { 1742 vdev_t *tvd = rvd->vdev_child[c]; 1743 metaslab_group_t *mg = tvd->vdev_mg; 1744 1745 if (tvd->vdev_islog) { 1746 metaslab_group_passivate(mg); 1747 slog_found = B_TRUE; 1748 } 1749 } 1750 1751 return (slog_found); 1752 } 1753 1754 static void 1755 spa_activate_log(spa_t *spa) 1756 { 1757 vdev_t *rvd = spa->spa_root_vdev; 1758 1759 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1760 1761 for (int c = 0; c < rvd->vdev_children; c++) { 1762 vdev_t *tvd = rvd->vdev_child[c]; 1763 metaslab_group_t *mg = tvd->vdev_mg; 1764 1765 if (tvd->vdev_islog) 1766 metaslab_group_activate(mg); 1767 } 1768 } 1769 1770 int 1771 spa_offline_log(spa_t *spa) 1772 { 1773 int error; 1774 1775 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1776 NULL, DS_FIND_CHILDREN); 1777 if (error == 0) { 1778 /* 1779 * We successfully offlined the log device, sync out the 1780 * current txg so that the "stubby" block can be removed 1781 * by zil_sync(). 1782 */ 1783 txg_wait_synced(spa->spa_dsl_pool, 0); 1784 } 1785 return (error); 1786 } 1787 1788 static void 1789 spa_aux_check_removed(spa_aux_vdev_t *sav) 1790 { 1791 for (int i = 0; i < sav->sav_count; i++) 1792 spa_check_removed(sav->sav_vdevs[i]); 1793 } 1794 1795 void 1796 spa_claim_notify(zio_t *zio) 1797 { 1798 spa_t *spa = zio->io_spa; 1799 1800 if (zio->io_error) 1801 return; 1802 1803 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1804 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1805 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1806 mutex_exit(&spa->spa_props_lock); 1807 } 1808 1809 typedef struct spa_load_error { 1810 uint64_t sle_meta_count; 1811 uint64_t sle_data_count; 1812 } spa_load_error_t; 1813 1814 static void 1815 spa_load_verify_done(zio_t *zio) 1816 { 1817 blkptr_t *bp = zio->io_bp; 1818 spa_load_error_t *sle = zio->io_private; 1819 dmu_object_type_t type = BP_GET_TYPE(bp); 1820 int error = zio->io_error; 1821 spa_t *spa = zio->io_spa; 1822 1823 if (error) { 1824 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1825 type != DMU_OT_INTENT_LOG) 1826 atomic_inc_64(&sle->sle_meta_count); 1827 else 1828 atomic_inc_64(&sle->sle_data_count); 1829 } 1830 zio_data_buf_free(zio->io_data, zio->io_size); 1831 1832 mutex_enter(&spa->spa_scrub_lock); 1833 spa->spa_scrub_inflight--; 1834 cv_broadcast(&spa->spa_scrub_io_cv); 1835 mutex_exit(&spa->spa_scrub_lock); 1836 } 1837 1838 /* 1839 * Maximum number of concurrent scrub i/os to create while verifying 1840 * a pool while importing it. 1841 */ 1842 int spa_load_verify_maxinflight = 10000; 1843 boolean_t spa_load_verify_metadata = B_TRUE; 1844 boolean_t spa_load_verify_data = B_TRUE; 1845 1846 /*ARGSUSED*/ 1847 static int 1848 spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1849 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1850 { 1851 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1852 return (0); 1853 /* 1854 * Note: normally this routine will not be called if 1855 * spa_load_verify_metadata is not set. However, it may be useful 1856 * to manually set the flag after the traversal has begun. 1857 */ 1858 if (!spa_load_verify_metadata) 1859 return (0); 1860 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1861 return (0); 1862 1863 zio_t *rio = arg; 1864 size_t size = BP_GET_PSIZE(bp); 1865 void *data = zio_data_buf_alloc(size); 1866 1867 mutex_enter(&spa->spa_scrub_lock); 1868 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 1869 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1870 spa->spa_scrub_inflight++; 1871 mutex_exit(&spa->spa_scrub_lock); 1872 1873 zio_nowait(zio_read(rio, spa, bp, data, size, 1874 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1875 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1876 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1877 return (0); 1878 } 1879 1880 static int 1881 spa_load_verify(spa_t *spa) 1882 { 1883 zio_t *rio; 1884 spa_load_error_t sle = { 0 }; 1885 zpool_rewind_policy_t policy; 1886 boolean_t verify_ok = B_FALSE; 1887 int error = 0; 1888 1889 zpool_get_rewind_policy(spa->spa_config, &policy); 1890 1891 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1892 return (0); 1893 1894 rio = zio_root(spa, NULL, &sle, 1895 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1896 1897 if (spa_load_verify_metadata) { 1898 error = traverse_pool(spa, spa->spa_verify_min_txg, 1899 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 1900 spa_load_verify_cb, rio); 1901 } 1902 1903 (void) zio_wait(rio); 1904 1905 spa->spa_load_meta_errors = sle.sle_meta_count; 1906 spa->spa_load_data_errors = sle.sle_data_count; 1907 1908 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1909 sle.sle_data_count <= policy.zrp_maxdata) { 1910 int64_t loss = 0; 1911 1912 verify_ok = B_TRUE; 1913 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1914 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1915 1916 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1917 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1918 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1919 VERIFY(nvlist_add_int64(spa->spa_load_info, 1920 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1921 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1922 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1923 } else { 1924 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1925 } 1926 1927 if (error) { 1928 if (error != ENXIO && error != EIO) 1929 error = SET_ERROR(EIO); 1930 return (error); 1931 } 1932 1933 return (verify_ok ? 0 : EIO); 1934 } 1935 1936 /* 1937 * Find a value in the pool props object. 1938 */ 1939 static void 1940 spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 1941 { 1942 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 1943 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 1944 } 1945 1946 /* 1947 * Find a value in the pool directory object. 1948 */ 1949 static int 1950 spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 1951 { 1952 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1953 name, sizeof (uint64_t), 1, val)); 1954 } 1955 1956 static int 1957 spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 1958 { 1959 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 1960 return (err); 1961 } 1962 1963 /* 1964 * Fix up config after a partly-completed split. This is done with the 1965 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 1966 * pool have that entry in their config, but only the splitting one contains 1967 * a list of all the guids of the vdevs that are being split off. 1968 * 1969 * This function determines what to do with that list: either rejoin 1970 * all the disks to the pool, or complete the splitting process. To attempt 1971 * the rejoin, each disk that is offlined is marked online again, and 1972 * we do a reopen() call. If the vdev label for every disk that was 1973 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 1974 * then we call vdev_split() on each disk, and complete the split. 1975 * 1976 * Otherwise we leave the config alone, with all the vdevs in place in 1977 * the original pool. 1978 */ 1979 static void 1980 spa_try_repair(spa_t *spa, nvlist_t *config) 1981 { 1982 uint_t extracted; 1983 uint64_t *glist; 1984 uint_t i, gcount; 1985 nvlist_t *nvl; 1986 vdev_t **vd; 1987 boolean_t attempt_reopen; 1988 1989 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 1990 return; 1991 1992 /* check that the config is complete */ 1993 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 1994 &glist, &gcount) != 0) 1995 return; 1996 1997 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 1998 1999 /* attempt to online all the vdevs & validate */ 2000 attempt_reopen = B_TRUE; 2001 for (i = 0; i < gcount; i++) { 2002 if (glist[i] == 0) /* vdev is hole */ 2003 continue; 2004 2005 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2006 if (vd[i] == NULL) { 2007 /* 2008 * Don't bother attempting to reopen the disks; 2009 * just do the split. 2010 */ 2011 attempt_reopen = B_FALSE; 2012 } else { 2013 /* attempt to re-online it */ 2014 vd[i]->vdev_offline = B_FALSE; 2015 } 2016 } 2017 2018 if (attempt_reopen) { 2019 vdev_reopen(spa->spa_root_vdev); 2020 2021 /* check each device to see what state it's in */ 2022 for (extracted = 0, i = 0; i < gcount; i++) { 2023 if (vd[i] != NULL && 2024 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2025 break; 2026 ++extracted; 2027 } 2028 } 2029 2030 /* 2031 * If every disk has been moved to the new pool, or if we never 2032 * even attempted to look at them, then we split them off for 2033 * good. 2034 */ 2035 if (!attempt_reopen || gcount == extracted) { 2036 for (i = 0; i < gcount; i++) 2037 if (vd[i] != NULL) 2038 vdev_split(vd[i]); 2039 vdev_reopen(spa->spa_root_vdev); 2040 } 2041 2042 kmem_free(vd, gcount * sizeof (vdev_t *)); 2043 } 2044 2045 static int 2046 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2047 boolean_t mosconfig) 2048 { 2049 nvlist_t *config = spa->spa_config; 2050 char *ereport = FM_EREPORT_ZFS_POOL; 2051 char *comment; 2052 int error; 2053 uint64_t pool_guid; 2054 nvlist_t *nvl; 2055 2056 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2057 return (SET_ERROR(EINVAL)); 2058 2059 ASSERT(spa->spa_comment == NULL); 2060 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2061 spa->spa_comment = spa_strdup(comment); 2062 2063 /* 2064 * Versioning wasn't explicitly added to the label until later, so if 2065 * it's not present treat it as the initial version. 2066 */ 2067 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2068 &spa->spa_ubsync.ub_version) != 0) 2069 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2070 2071 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2072 &spa->spa_config_txg); 2073 2074 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2075 spa_guid_exists(pool_guid, 0)) { 2076 error = SET_ERROR(EEXIST); 2077 } else { 2078 spa->spa_config_guid = pool_guid; 2079 2080 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2081 &nvl) == 0) { 2082 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2083 KM_SLEEP) == 0); 2084 } 2085 2086 nvlist_free(spa->spa_load_info); 2087 spa->spa_load_info = fnvlist_alloc(); 2088 2089 gethrestime(&spa->spa_loaded_ts); 2090 error = spa_load_impl(spa, pool_guid, config, state, type, 2091 mosconfig, &ereport); 2092 } 2093 2094 spa->spa_minref = refcount_count(&spa->spa_refcount); 2095 if (error) { 2096 if (error != EEXIST) { 2097 spa->spa_loaded_ts.tv_sec = 0; 2098 spa->spa_loaded_ts.tv_nsec = 0; 2099 } 2100 if (error != EBADF) { 2101 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2102 } 2103 } 2104 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2105 spa->spa_ena = 0; 2106 2107 return (error); 2108 } 2109 2110 /* 2111 * Load an existing storage pool, using the pool's builtin spa_config as a 2112 * source of configuration information. 2113 */ 2114 static int 2115 spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2116 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2117 char **ereport) 2118 { 2119 int error = 0; 2120 nvlist_t *nvroot = NULL; 2121 nvlist_t *label; 2122 vdev_t *rvd; 2123 uberblock_t *ub = &spa->spa_uberblock; 2124 uint64_t children, config_cache_txg = spa->spa_config_txg; 2125 int orig_mode = spa->spa_mode; 2126 int parse; 2127 uint64_t obj; 2128 boolean_t missing_feat_write = B_FALSE; 2129 2130 /* 2131 * If this is an untrusted config, access the pool in read-only mode. 2132 * This prevents things like resilvering recently removed devices. 2133 */ 2134 if (!mosconfig) 2135 spa->spa_mode = FREAD; 2136 2137 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2138 2139 spa->spa_load_state = state; 2140 2141 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2142 return (SET_ERROR(EINVAL)); 2143 2144 parse = (type == SPA_IMPORT_EXISTING ? 2145 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2146 2147 /* 2148 * Create "The Godfather" zio to hold all async IOs 2149 */ 2150 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2151 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2152 2153 /* 2154 * Parse the configuration into a vdev tree. We explicitly set the 2155 * value that will be returned by spa_version() since parsing the 2156 * configuration requires knowing the version number. 2157 */ 2158 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2159 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2160 spa_config_exit(spa, SCL_ALL, FTAG); 2161 2162 if (error != 0) 2163 return (error); 2164 2165 ASSERT(spa->spa_root_vdev == rvd); 2166 2167 if (type != SPA_IMPORT_ASSEMBLE) { 2168 ASSERT(spa_guid(spa) == pool_guid); 2169 } 2170 2171 /* 2172 * Try to open all vdevs, loading each label in the process. 2173 */ 2174 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2175 error = vdev_open(rvd); 2176 spa_config_exit(spa, SCL_ALL, FTAG); 2177 if (error != 0) 2178 return (error); 2179 2180 /* 2181 * We need to validate the vdev labels against the configuration that 2182 * we have in hand, which is dependent on the setting of mosconfig. If 2183 * mosconfig is true then we're validating the vdev labels based on 2184 * that config. Otherwise, we're validating against the cached config 2185 * (zpool.cache) that was read when we loaded the zfs module, and then 2186 * later we will recursively call spa_load() and validate against 2187 * the vdev config. 2188 * 2189 * If we're assembling a new pool that's been split off from an 2190 * existing pool, the labels haven't yet been updated so we skip 2191 * validation for now. 2192 */ 2193 if (type != SPA_IMPORT_ASSEMBLE) { 2194 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2195 error = vdev_validate(rvd, mosconfig); 2196 spa_config_exit(spa, SCL_ALL, FTAG); 2197 2198 if (error != 0) 2199 return (error); 2200 2201 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2202 return (SET_ERROR(ENXIO)); 2203 } 2204 2205 /* 2206 * Find the best uberblock. 2207 */ 2208 vdev_uberblock_load(rvd, ub, &label); 2209 2210 /* 2211 * If we weren't able to find a single valid uberblock, return failure. 2212 */ 2213 if (ub->ub_txg == 0) { 2214 nvlist_free(label); 2215 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2216 } 2217 2218 /* 2219 * If the pool has an unsupported version we can't open it. 2220 */ 2221 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2222 nvlist_free(label); 2223 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2224 } 2225 2226 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2227 nvlist_t *features; 2228 2229 /* 2230 * If we weren't able to find what's necessary for reading the 2231 * MOS in the label, return failure. 2232 */ 2233 if (label == NULL || nvlist_lookup_nvlist(label, 2234 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2235 nvlist_free(label); 2236 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2237 ENXIO)); 2238 } 2239 2240 /* 2241 * Update our in-core representation with the definitive values 2242 * from the label. 2243 */ 2244 nvlist_free(spa->spa_label_features); 2245 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2246 } 2247 2248 nvlist_free(label); 2249 2250 /* 2251 * Look through entries in the label nvlist's features_for_read. If 2252 * there is a feature listed there which we don't understand then we 2253 * cannot open a pool. 2254 */ 2255 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2256 nvlist_t *unsup_feat; 2257 2258 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2259 0); 2260 2261 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2262 NULL); nvp != NULL; 2263 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2264 if (!zfeature_is_supported(nvpair_name(nvp))) { 2265 VERIFY(nvlist_add_string(unsup_feat, 2266 nvpair_name(nvp), "") == 0); 2267 } 2268 } 2269 2270 if (!nvlist_empty(unsup_feat)) { 2271 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2272 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2273 nvlist_free(unsup_feat); 2274 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2275 ENOTSUP)); 2276 } 2277 2278 nvlist_free(unsup_feat); 2279 } 2280 2281 /* 2282 * If the vdev guid sum doesn't match the uberblock, we have an 2283 * incomplete configuration. We first check to see if the pool 2284 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2285 * If it is, defer the vdev_guid_sum check till later so we 2286 * can handle missing vdevs. 2287 */ 2288 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2289 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2290 rvd->vdev_guid_sum != ub->ub_guid_sum) 2291 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2292 2293 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2294 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2295 spa_try_repair(spa, config); 2296 spa_config_exit(spa, SCL_ALL, FTAG); 2297 nvlist_free(spa->spa_config_splitting); 2298 spa->spa_config_splitting = NULL; 2299 } 2300 2301 /* 2302 * Initialize internal SPA structures. 2303 */ 2304 spa->spa_state = POOL_STATE_ACTIVE; 2305 spa->spa_ubsync = spa->spa_uberblock; 2306 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2307 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2308 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2309 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2310 spa->spa_claim_max_txg = spa->spa_first_txg; 2311 spa->spa_prev_software_version = ub->ub_software_version; 2312 2313 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2314 if (error) 2315 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2316 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2317 2318 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2319 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2320 2321 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2322 boolean_t missing_feat_read = B_FALSE; 2323 nvlist_t *unsup_feat, *enabled_feat; 2324 2325 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2326 &spa->spa_feat_for_read_obj) != 0) { 2327 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2328 } 2329 2330 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2331 &spa->spa_feat_for_write_obj) != 0) { 2332 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2333 } 2334 2335 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2336 &spa->spa_feat_desc_obj) != 0) { 2337 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2338 } 2339 2340 enabled_feat = fnvlist_alloc(); 2341 unsup_feat = fnvlist_alloc(); 2342 2343 if (!spa_features_check(spa, B_FALSE, 2344 unsup_feat, enabled_feat)) 2345 missing_feat_read = B_TRUE; 2346 2347 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2348 if (!spa_features_check(spa, B_TRUE, 2349 unsup_feat, enabled_feat)) { 2350 missing_feat_write = B_TRUE; 2351 } 2352 } 2353 2354 fnvlist_add_nvlist(spa->spa_load_info, 2355 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2356 2357 if (!nvlist_empty(unsup_feat)) { 2358 fnvlist_add_nvlist(spa->spa_load_info, 2359 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2360 } 2361 2362 fnvlist_free(enabled_feat); 2363 fnvlist_free(unsup_feat); 2364 2365 if (!missing_feat_read) { 2366 fnvlist_add_boolean(spa->spa_load_info, 2367 ZPOOL_CONFIG_CAN_RDONLY); 2368 } 2369 2370 /* 2371 * If the state is SPA_LOAD_TRYIMPORT, our objective is 2372 * twofold: to determine whether the pool is available for 2373 * import in read-write mode and (if it is not) whether the 2374 * pool is available for import in read-only mode. If the pool 2375 * is available for import in read-write mode, it is displayed 2376 * as available in userland; if it is not available for import 2377 * in read-only mode, it is displayed as unavailable in 2378 * userland. If the pool is available for import in read-only 2379 * mode but not read-write mode, it is displayed as unavailable 2380 * in userland with a special note that the pool is actually 2381 * available for open in read-only mode. 2382 * 2383 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2384 * missing a feature for write, we must first determine whether 2385 * the pool can be opened read-only before returning to 2386 * userland in order to know whether to display the 2387 * abovementioned note. 2388 */ 2389 if (missing_feat_read || (missing_feat_write && 2390 spa_writeable(spa))) { 2391 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2392 ENOTSUP)); 2393 } 2394 2395 /* 2396 * Load refcounts for ZFS features from disk into an in-memory 2397 * cache during SPA initialization. 2398 */ 2399 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2400 uint64_t refcount; 2401 2402 error = feature_get_refcount_from_disk(spa, 2403 &spa_feature_table[i], &refcount); 2404 if (error == 0) { 2405 spa->spa_feat_refcount_cache[i] = refcount; 2406 } else if (error == ENOTSUP) { 2407 spa->spa_feat_refcount_cache[i] = 2408 SPA_FEATURE_DISABLED; 2409 } else { 2410 return (spa_vdev_err(rvd, 2411 VDEV_AUX_CORRUPT_DATA, EIO)); 2412 } 2413 } 2414 } 2415 2416 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2417 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2418 &spa->spa_feat_enabled_txg_obj) != 0) 2419 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2420 } 2421 2422 spa->spa_is_initializing = B_TRUE; 2423 error = dsl_pool_open(spa->spa_dsl_pool); 2424 spa->spa_is_initializing = B_FALSE; 2425 if (error != 0) 2426 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2427 2428 if (!mosconfig) { 2429 uint64_t hostid; 2430 nvlist_t *policy = NULL, *nvconfig; 2431 2432 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2433 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2434 2435 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2436 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2437 char *hostname; 2438 unsigned long myhostid = 0; 2439 2440 VERIFY(nvlist_lookup_string(nvconfig, 2441 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2442 2443 #ifdef _KERNEL 2444 myhostid = zone_get_hostid(NULL); 2445 #else /* _KERNEL */ 2446 /* 2447 * We're emulating the system's hostid in userland, so 2448 * we can't use zone_get_hostid(). 2449 */ 2450 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2451 #endif /* _KERNEL */ 2452 if (hostid != 0 && myhostid != 0 && 2453 hostid != myhostid) { 2454 nvlist_free(nvconfig); 2455 cmn_err(CE_WARN, "pool '%s' could not be " 2456 "loaded as it was last accessed by " 2457 "another system (host: %s hostid: 0x%lx). " 2458 "See: http://illumos.org/msg/ZFS-8000-EY", 2459 spa_name(spa), hostname, 2460 (unsigned long)hostid); 2461 return (SET_ERROR(EBADF)); 2462 } 2463 } 2464 if (nvlist_lookup_nvlist(spa->spa_config, 2465 ZPOOL_REWIND_POLICY, &policy) == 0) 2466 VERIFY(nvlist_add_nvlist(nvconfig, 2467 ZPOOL_REWIND_POLICY, policy) == 0); 2468 2469 spa_config_set(spa, nvconfig); 2470 spa_unload(spa); 2471 spa_deactivate(spa); 2472 spa_activate(spa, orig_mode); 2473 2474 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2475 } 2476 2477 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2478 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2479 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2480 if (error != 0) 2481 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2482 2483 /* 2484 * Load the bit that tells us to use the new accounting function 2485 * (raid-z deflation). If we have an older pool, this will not 2486 * be present. 2487 */ 2488 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2489 if (error != 0 && error != ENOENT) 2490 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2491 2492 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2493 &spa->spa_creation_version); 2494 if (error != 0 && error != ENOENT) 2495 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2496 2497 /* 2498 * Load the persistent error log. If we have an older pool, this will 2499 * not be present. 2500 */ 2501 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2502 if (error != 0 && error != ENOENT) 2503 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2504 2505 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2506 &spa->spa_errlog_scrub); 2507 if (error != 0 && error != ENOENT) 2508 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2509 2510 /* 2511 * Load the history object. If we have an older pool, this 2512 * will not be present. 2513 */ 2514 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2515 if (error != 0 && error != ENOENT) 2516 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2517 2518 /* 2519 * If we're assembling the pool from the split-off vdevs of 2520 * an existing pool, we don't want to attach the spares & cache 2521 * devices. 2522 */ 2523 2524 /* 2525 * Load any hot spares for this pool. 2526 */ 2527 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2528 if (error != 0 && error != ENOENT) 2529 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2530 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2531 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2532 if (load_nvlist(spa, spa->spa_spares.sav_object, 2533 &spa->spa_spares.sav_config) != 0) 2534 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2535 2536 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2537 spa_load_spares(spa); 2538 spa_config_exit(spa, SCL_ALL, FTAG); 2539 } else if (error == 0) { 2540 spa->spa_spares.sav_sync = B_TRUE; 2541 } 2542 2543 /* 2544 * Load any level 2 ARC devices for this pool. 2545 */ 2546 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2547 &spa->spa_l2cache.sav_object); 2548 if (error != 0 && error != ENOENT) 2549 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2550 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2551 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2552 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2553 &spa->spa_l2cache.sav_config) != 0) 2554 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2555 2556 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2557 spa_load_l2cache(spa); 2558 spa_config_exit(spa, SCL_ALL, FTAG); 2559 } else if (error == 0) { 2560 spa->spa_l2cache.sav_sync = B_TRUE; 2561 } 2562 2563 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2564 2565 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2566 if (error && error != ENOENT) 2567 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2568 2569 if (error == 0) { 2570 uint64_t autoreplace; 2571 2572 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2573 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2574 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2575 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2576 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2577 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2578 &spa->spa_dedup_ditto); 2579 2580 spa->spa_autoreplace = (autoreplace != 0); 2581 } 2582 2583 /* 2584 * If the 'autoreplace' property is set, then post a resource notifying 2585 * the ZFS DE that it should not issue any faults for unopenable 2586 * devices. We also iterate over the vdevs, and post a sysevent for any 2587 * unopenable vdevs so that the normal autoreplace handler can take 2588 * over. 2589 */ 2590 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2591 spa_check_removed(spa->spa_root_vdev); 2592 /* 2593 * For the import case, this is done in spa_import(), because 2594 * at this point we're using the spare definitions from 2595 * the MOS config, not necessarily from the userland config. 2596 */ 2597 if (state != SPA_LOAD_IMPORT) { 2598 spa_aux_check_removed(&spa->spa_spares); 2599 spa_aux_check_removed(&spa->spa_l2cache); 2600 } 2601 } 2602 2603 /* 2604 * Load the vdev state for all toplevel vdevs. 2605 */ 2606 vdev_load(rvd); 2607 2608 /* 2609 * Propagate the leaf DTLs we just loaded all the way up the tree. 2610 */ 2611 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2612 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2613 spa_config_exit(spa, SCL_ALL, FTAG); 2614 2615 /* 2616 * Load the DDTs (dedup tables). 2617 */ 2618 error = ddt_load(spa); 2619 if (error != 0) 2620 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2621 2622 spa_update_dspace(spa); 2623 2624 /* 2625 * Validate the config, using the MOS config to fill in any 2626 * information which might be missing. If we fail to validate 2627 * the config then declare the pool unfit for use. If we're 2628 * assembling a pool from a split, the log is not transferred 2629 * over. 2630 */ 2631 if (type != SPA_IMPORT_ASSEMBLE) { 2632 nvlist_t *nvconfig; 2633 2634 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2635 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2636 2637 if (!spa_config_valid(spa, nvconfig)) { 2638 nvlist_free(nvconfig); 2639 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2640 ENXIO)); 2641 } 2642 nvlist_free(nvconfig); 2643 2644 /* 2645 * Now that we've validated the config, check the state of the 2646 * root vdev. If it can't be opened, it indicates one or 2647 * more toplevel vdevs are faulted. 2648 */ 2649 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2650 return (SET_ERROR(ENXIO)); 2651 2652 if (spa_check_logs(spa)) { 2653 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2654 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2655 } 2656 } 2657 2658 if (missing_feat_write) { 2659 ASSERT(state == SPA_LOAD_TRYIMPORT); 2660 2661 /* 2662 * At this point, we know that we can open the pool in 2663 * read-only mode but not read-write mode. We now have enough 2664 * information and can return to userland. 2665 */ 2666 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2667 } 2668 2669 /* 2670 * We've successfully opened the pool, verify that we're ready 2671 * to start pushing transactions. 2672 */ 2673 if (state != SPA_LOAD_TRYIMPORT) { 2674 if (error = spa_load_verify(spa)) 2675 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2676 error)); 2677 } 2678 2679 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2680 spa->spa_load_max_txg == UINT64_MAX)) { 2681 dmu_tx_t *tx; 2682 int need_update = B_FALSE; 2683 2684 ASSERT(state != SPA_LOAD_TRYIMPORT); 2685 2686 /* 2687 * Claim log blocks that haven't been committed yet. 2688 * This must all happen in a single txg. 2689 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2690 * invoked from zil_claim_log_block()'s i/o done callback. 2691 * Price of rollback is that we abandon the log. 2692 */ 2693 spa->spa_claiming = B_TRUE; 2694 2695 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 2696 spa_first_txg(spa)); 2697 (void) dmu_objset_find(spa_name(spa), 2698 zil_claim, tx, DS_FIND_CHILDREN); 2699 dmu_tx_commit(tx); 2700 2701 spa->spa_claiming = B_FALSE; 2702 2703 spa_set_log_state(spa, SPA_LOG_GOOD); 2704 spa->spa_sync_on = B_TRUE; 2705 txg_sync_start(spa->spa_dsl_pool); 2706 2707 /* 2708 * Wait for all claims to sync. We sync up to the highest 2709 * claimed log block birth time so that claimed log blocks 2710 * don't appear to be from the future. spa_claim_max_txg 2711 * will have been set for us by either zil_check_log_chain() 2712 * (invoked from spa_check_logs()) or zil_claim() above. 2713 */ 2714 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2715 2716 /* 2717 * If the config cache is stale, or we have uninitialized 2718 * metaslabs (see spa_vdev_add()), then update the config. 2719 * 2720 * If this is a verbatim import, trust the current 2721 * in-core spa_config and update the disk labels. 2722 */ 2723 if (config_cache_txg != spa->spa_config_txg || 2724 state == SPA_LOAD_IMPORT || 2725 state == SPA_LOAD_RECOVER || 2726 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2727 need_update = B_TRUE; 2728 2729 for (int c = 0; c < rvd->vdev_children; c++) 2730 if (rvd->vdev_child[c]->vdev_ms_array == 0) 2731 need_update = B_TRUE; 2732 2733 /* 2734 * Update the config cache asychronously in case we're the 2735 * root pool, in which case the config cache isn't writable yet. 2736 */ 2737 if (need_update) 2738 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2739 2740 /* 2741 * Check all DTLs to see if anything needs resilvering. 2742 */ 2743 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2744 vdev_resilver_needed(rvd, NULL, NULL)) 2745 spa_async_request(spa, SPA_ASYNC_RESILVER); 2746 2747 /* 2748 * Log the fact that we booted up (so that we can detect if 2749 * we rebooted in the middle of an operation). 2750 */ 2751 spa_history_log_version(spa, "open"); 2752 2753 /* 2754 * Delete any inconsistent datasets. 2755 */ 2756 (void) dmu_objset_find(spa_name(spa), 2757 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2758 2759 /* 2760 * Clean up any stale temporary dataset userrefs. 2761 */ 2762 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2763 } 2764 2765 return (0); 2766 } 2767 2768 static int 2769 spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2770 { 2771 int mode = spa->spa_mode; 2772 2773 spa_unload(spa); 2774 spa_deactivate(spa); 2775 2776 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 2777 2778 spa_activate(spa, mode); 2779 spa_async_suspend(spa); 2780 2781 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2782 } 2783 2784 /* 2785 * If spa_load() fails this function will try loading prior txg's. If 2786 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2787 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2788 * function will not rewind the pool and will return the same error as 2789 * spa_load(). 2790 */ 2791 static int 2792 spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2793 uint64_t max_request, int rewind_flags) 2794 { 2795 nvlist_t *loadinfo = NULL; 2796 nvlist_t *config = NULL; 2797 int load_error, rewind_error; 2798 uint64_t safe_rewind_txg; 2799 uint64_t min_txg; 2800 2801 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2802 spa->spa_load_max_txg = spa->spa_load_txg; 2803 spa_set_log_state(spa, SPA_LOG_CLEAR); 2804 } else { 2805 spa->spa_load_max_txg = max_request; 2806 if (max_request != UINT64_MAX) 2807 spa->spa_extreme_rewind = B_TRUE; 2808 } 2809 2810 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2811 mosconfig); 2812 if (load_error == 0) 2813 return (0); 2814 2815 if (spa->spa_root_vdev != NULL) 2816 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2817 2818 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2819 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2820 2821 if (rewind_flags & ZPOOL_NEVER_REWIND) { 2822 nvlist_free(config); 2823 return (load_error); 2824 } 2825 2826 if (state == SPA_LOAD_RECOVER) { 2827 /* Price of rolling back is discarding txgs, including log */ 2828 spa_set_log_state(spa, SPA_LOG_CLEAR); 2829 } else { 2830 /* 2831 * If we aren't rolling back save the load info from our first 2832 * import attempt so that we can restore it after attempting 2833 * to rewind. 2834 */ 2835 loadinfo = spa->spa_load_info; 2836 spa->spa_load_info = fnvlist_alloc(); 2837 } 2838 2839 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2840 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2841 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2842 TXG_INITIAL : safe_rewind_txg; 2843 2844 /* 2845 * Continue as long as we're finding errors, we're still within 2846 * the acceptable rewind range, and we're still finding uberblocks 2847 */ 2848 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2849 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2850 if (spa->spa_load_max_txg < safe_rewind_txg) 2851 spa->spa_extreme_rewind = B_TRUE; 2852 rewind_error = spa_load_retry(spa, state, mosconfig); 2853 } 2854 2855 spa->spa_extreme_rewind = B_FALSE; 2856 spa->spa_load_max_txg = UINT64_MAX; 2857 2858 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2859 spa_config_set(spa, config); 2860 2861 if (state == SPA_LOAD_RECOVER) { 2862 ASSERT3P(loadinfo, ==, NULL); 2863 return (rewind_error); 2864 } else { 2865 /* Store the rewind info as part of the initial load info */ 2866 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2867 spa->spa_load_info); 2868 2869 /* Restore the initial load info */ 2870 fnvlist_free(spa->spa_load_info); 2871 spa->spa_load_info = loadinfo; 2872 2873 return (load_error); 2874 } 2875 } 2876 2877 /* 2878 * Pool Open/Import 2879 * 2880 * The import case is identical to an open except that the configuration is sent 2881 * down from userland, instead of grabbed from the configuration cache. For the 2882 * case of an open, the pool configuration will exist in the 2883 * POOL_STATE_UNINITIALIZED state. 2884 * 2885 * The stats information (gen/count/ustats) is used to gather vdev statistics at 2886 * the same time open the pool, without having to keep around the spa_t in some 2887 * ambiguous state. 2888 */ 2889 static int 2890 spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2891 nvlist_t **config) 2892 { 2893 spa_t *spa; 2894 spa_load_state_t state = SPA_LOAD_OPEN; 2895 int error; 2896 int locked = B_FALSE; 2897 2898 *spapp = NULL; 2899 2900 /* 2901 * As disgusting as this is, we need to support recursive calls to this 2902 * function because dsl_dir_open() is called during spa_load(), and ends 2903 * up calling spa_open() again. The real fix is to figure out how to 2904 * avoid dsl_dir_open() calling this in the first place. 2905 */ 2906 if (mutex_owner(&spa_namespace_lock) != curthread) { 2907 mutex_enter(&spa_namespace_lock); 2908 locked = B_TRUE; 2909 } 2910 2911 if ((spa = spa_lookup(pool)) == NULL) { 2912 if (locked) 2913 mutex_exit(&spa_namespace_lock); 2914 return (SET_ERROR(ENOENT)); 2915 } 2916 2917 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 2918 zpool_rewind_policy_t policy; 2919 2920 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 2921 &policy); 2922 if (policy.zrp_request & ZPOOL_DO_REWIND) 2923 state = SPA_LOAD_RECOVER; 2924 2925 spa_activate(spa, spa_mode_global); 2926 2927 if (state != SPA_LOAD_RECOVER) 2928 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 2929 2930 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 2931 policy.zrp_request); 2932 2933 if (error == EBADF) { 2934 /* 2935 * If vdev_validate() returns failure (indicated by 2936 * EBADF), it indicates that one of the vdevs indicates 2937 * that the pool has been exported or destroyed. If 2938 * this is the case, the config cache is out of sync and 2939 * we should remove the pool from the namespace. 2940 */ 2941 spa_unload(spa); 2942 spa_deactivate(spa); 2943 spa_config_sync(spa, B_TRUE, B_TRUE); 2944 spa_remove(spa); 2945 if (locked) 2946 mutex_exit(&spa_namespace_lock); 2947 return (SET_ERROR(ENOENT)); 2948 } 2949 2950 if (error) { 2951 /* 2952 * We can't open the pool, but we still have useful 2953 * information: the state of each vdev after the 2954 * attempted vdev_open(). Return this to the user. 2955 */ 2956 if (config != NULL && spa->spa_config) { 2957 VERIFY(nvlist_dup(spa->spa_config, config, 2958 KM_SLEEP) == 0); 2959 VERIFY(nvlist_add_nvlist(*config, 2960 ZPOOL_CONFIG_LOAD_INFO, 2961 spa->spa_load_info) == 0); 2962 } 2963 spa_unload(spa); 2964 spa_deactivate(spa); 2965 spa->spa_last_open_failed = error; 2966 if (locked) 2967 mutex_exit(&spa_namespace_lock); 2968 *spapp = NULL; 2969 return (error); 2970 } 2971 } 2972 2973 spa_open_ref(spa, tag); 2974 2975 if (config != NULL) 2976 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2977 2978 /* 2979 * If we've recovered the pool, pass back any information we 2980 * gathered while doing the load. 2981 */ 2982 if (state == SPA_LOAD_RECOVER) { 2983 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 2984 spa->spa_load_info) == 0); 2985 } 2986 2987 if (locked) { 2988 spa->spa_last_open_failed = 0; 2989 spa->spa_last_ubsync_txg = 0; 2990 spa->spa_load_txg = 0; 2991 mutex_exit(&spa_namespace_lock); 2992 } 2993 2994 *spapp = spa; 2995 2996 return (0); 2997 } 2998 2999 int 3000 spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3001 nvlist_t **config) 3002 { 3003 return (spa_open_common(name, spapp, tag, policy, config)); 3004 } 3005 3006 int 3007 spa_open(const char *name, spa_t **spapp, void *tag) 3008 { 3009 return (spa_open_common(name, spapp, tag, NULL, NULL)); 3010 } 3011 3012 /* 3013 * Lookup the given spa_t, incrementing the inject count in the process, 3014 * preventing it from being exported or destroyed. 3015 */ 3016 spa_t * 3017 spa_inject_addref(char *name) 3018 { 3019 spa_t *spa; 3020 3021 mutex_enter(&spa_namespace_lock); 3022 if ((spa = spa_lookup(name)) == NULL) { 3023 mutex_exit(&spa_namespace_lock); 3024 return (NULL); 3025 } 3026 spa->spa_inject_ref++; 3027 mutex_exit(&spa_namespace_lock); 3028 3029 return (spa); 3030 } 3031 3032 void 3033 spa_inject_delref(spa_t *spa) 3034 { 3035 mutex_enter(&spa_namespace_lock); 3036 spa->spa_inject_ref--; 3037 mutex_exit(&spa_namespace_lock); 3038 } 3039 3040 /* 3041 * Add spares device information to the nvlist. 3042 */ 3043 static void 3044 spa_add_spares(spa_t *spa, nvlist_t *config) 3045 { 3046 nvlist_t **spares; 3047 uint_t i, nspares; 3048 nvlist_t *nvroot; 3049 uint64_t guid; 3050 vdev_stat_t *vs; 3051 uint_t vsc; 3052 uint64_t pool; 3053 3054 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3055 3056 if (spa->spa_spares.sav_count == 0) 3057 return; 3058 3059 VERIFY(nvlist_lookup_nvlist(config, 3060 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3061 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3062 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3063 if (nspares != 0) { 3064 VERIFY(nvlist_add_nvlist_array(nvroot, 3065 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3066 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3067 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3068 3069 /* 3070 * Go through and find any spares which have since been 3071 * repurposed as an active spare. If this is the case, update 3072 * their status appropriately. 3073 */ 3074 for (i = 0; i < nspares; i++) { 3075 VERIFY(nvlist_lookup_uint64(spares[i], 3076 ZPOOL_CONFIG_GUID, &guid) == 0); 3077 if (spa_spare_exists(guid, &pool, NULL) && 3078 pool != 0ULL) { 3079 VERIFY(nvlist_lookup_uint64_array( 3080 spares[i], ZPOOL_CONFIG_VDEV_STATS, 3081 (uint64_t **)&vs, &vsc) == 0); 3082 vs->vs_state = VDEV_STATE_CANT_OPEN; 3083 vs->vs_aux = VDEV_AUX_SPARED; 3084 } 3085 } 3086 } 3087 } 3088 3089 /* 3090 * Add l2cache device information to the nvlist, including vdev stats. 3091 */ 3092 static void 3093 spa_add_l2cache(spa_t *spa, nvlist_t *config) 3094 { 3095 nvlist_t **l2cache; 3096 uint_t i, j, nl2cache; 3097 nvlist_t *nvroot; 3098 uint64_t guid; 3099 vdev_t *vd; 3100 vdev_stat_t *vs; 3101 uint_t vsc; 3102 3103 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3104 3105 if (spa->spa_l2cache.sav_count == 0) 3106 return; 3107 3108 VERIFY(nvlist_lookup_nvlist(config, 3109 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3110 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3111 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3112 if (nl2cache != 0) { 3113 VERIFY(nvlist_add_nvlist_array(nvroot, 3114 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3115 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3116 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3117 3118 /* 3119 * Update level 2 cache device stats. 3120 */ 3121 3122 for (i = 0; i < nl2cache; i++) { 3123 VERIFY(nvlist_lookup_uint64(l2cache[i], 3124 ZPOOL_CONFIG_GUID, &guid) == 0); 3125 3126 vd = NULL; 3127 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3128 if (guid == 3129 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3130 vd = spa->spa_l2cache.sav_vdevs[j]; 3131 break; 3132 } 3133 } 3134 ASSERT(vd != NULL); 3135 3136 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3137 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3138 == 0); 3139 vdev_get_stats(vd, vs); 3140 } 3141 } 3142 } 3143 3144 static void 3145 spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3146 { 3147 nvlist_t *features; 3148 zap_cursor_t zc; 3149 zap_attribute_t za; 3150 3151 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3152 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3153 3154 if (spa->spa_feat_for_read_obj != 0) { 3155 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3156 spa->spa_feat_for_read_obj); 3157 zap_cursor_retrieve(&zc, &za) == 0; 3158 zap_cursor_advance(&zc)) { 3159 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3160 za.za_num_integers == 1); 3161 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3162 za.za_first_integer)); 3163 } 3164 zap_cursor_fini(&zc); 3165 } 3166 3167 if (spa->spa_feat_for_write_obj != 0) { 3168 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3169 spa->spa_feat_for_write_obj); 3170 zap_cursor_retrieve(&zc, &za) == 0; 3171 zap_cursor_advance(&zc)) { 3172 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3173 za.za_num_integers == 1); 3174 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3175 za.za_first_integer)); 3176 } 3177 zap_cursor_fini(&zc); 3178 } 3179 3180 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3181 features) == 0); 3182 nvlist_free(features); 3183 } 3184 3185 int 3186 spa_get_stats(const char *name, nvlist_t **config, 3187 char *altroot, size_t buflen) 3188 { 3189 int error; 3190 spa_t *spa; 3191 3192 *config = NULL; 3193 error = spa_open_common(name, &spa, FTAG, NULL, config); 3194 3195 if (spa != NULL) { 3196 /* 3197 * This still leaves a window of inconsistency where the spares 3198 * or l2cache devices could change and the config would be 3199 * self-inconsistent. 3200 */ 3201 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3202 3203 if (*config != NULL) { 3204 uint64_t loadtimes[2]; 3205 3206 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3207 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3208 VERIFY(nvlist_add_uint64_array(*config, 3209 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3210 3211 VERIFY(nvlist_add_uint64(*config, 3212 ZPOOL_CONFIG_ERRCOUNT, 3213 spa_get_errlog_size(spa)) == 0); 3214 3215 if (spa_suspended(spa)) 3216 VERIFY(nvlist_add_uint64(*config, 3217 ZPOOL_CONFIG_SUSPENDED, 3218 spa->spa_failmode) == 0); 3219 3220 spa_add_spares(spa, *config); 3221 spa_add_l2cache(spa, *config); 3222 spa_add_feature_stats(spa, *config); 3223 } 3224 } 3225 3226 /* 3227 * We want to get the alternate root even for faulted pools, so we cheat 3228 * and call spa_lookup() directly. 3229 */ 3230 if (altroot) { 3231 if (spa == NULL) { 3232 mutex_enter(&spa_namespace_lock); 3233 spa = spa_lookup(name); 3234 if (spa) 3235 spa_altroot(spa, altroot, buflen); 3236 else 3237 altroot[0] = '\0'; 3238 spa = NULL; 3239 mutex_exit(&spa_namespace_lock); 3240 } else { 3241 spa_altroot(spa, altroot, buflen); 3242 } 3243 } 3244 3245 if (spa != NULL) { 3246 spa_config_exit(spa, SCL_CONFIG, FTAG); 3247 spa_close(spa, FTAG); 3248 } 3249 3250 return (error); 3251 } 3252 3253 /* 3254 * Validate that the auxiliary device array is well formed. We must have an 3255 * array of nvlists, each which describes a valid leaf vdev. If this is an 3256 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3257 * specified, as long as they are well-formed. 3258 */ 3259 static int 3260 spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3261 spa_aux_vdev_t *sav, const char *config, uint64_t version, 3262 vdev_labeltype_t label) 3263 { 3264 nvlist_t **dev; 3265 uint_t i, ndev; 3266 vdev_t *vd; 3267 int error; 3268 3269 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3270 3271 /* 3272 * It's acceptable to have no devs specified. 3273 */ 3274 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3275 return (0); 3276 3277 if (ndev == 0) 3278 return (SET_ERROR(EINVAL)); 3279 3280 /* 3281 * Make sure the pool is formatted with a version that supports this 3282 * device type. 3283 */ 3284 if (spa_version(spa) < version) 3285 return (SET_ERROR(ENOTSUP)); 3286 3287 /* 3288 * Set the pending device list so we correctly handle device in-use 3289 * checking. 3290 */ 3291 sav->sav_pending = dev; 3292 sav->sav_npending = ndev; 3293 3294 for (i = 0; i < ndev; i++) { 3295 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3296 mode)) != 0) 3297 goto out; 3298 3299 if (!vd->vdev_ops->vdev_op_leaf) { 3300 vdev_free(vd); 3301 error = SET_ERROR(EINVAL); 3302 goto out; 3303 } 3304 3305 /* 3306 * The L2ARC currently only supports disk devices in 3307 * kernel context. For user-level testing, we allow it. 3308 */ 3309 #ifdef _KERNEL 3310 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3311 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3312 error = SET_ERROR(ENOTBLK); 3313 vdev_free(vd); 3314 goto out; 3315 } 3316 #endif 3317 vd->vdev_top = vd; 3318 3319 if ((error = vdev_open(vd)) == 0 && 3320 (error = vdev_label_init(vd, crtxg, label)) == 0) { 3321 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3322 vd->vdev_guid) == 0); 3323 } 3324 3325 vdev_free(vd); 3326 3327 if (error && 3328 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3329 goto out; 3330 else 3331 error = 0; 3332 } 3333 3334 out: 3335 sav->sav_pending = NULL; 3336 sav->sav_npending = 0; 3337 return (error); 3338 } 3339 3340 static int 3341 spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3342 { 3343 int error; 3344 3345 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3346 3347 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3348 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3349 VDEV_LABEL_SPARE)) != 0) { 3350 return (error); 3351 } 3352 3353 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3354 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3355 VDEV_LABEL_L2CACHE)); 3356 } 3357 3358 static void 3359 spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3360 const char *config) 3361 { 3362 int i; 3363 3364 if (sav->sav_config != NULL) { 3365 nvlist_t **olddevs; 3366 uint_t oldndevs; 3367 nvlist_t **newdevs; 3368 3369 /* 3370 * Generate new dev list by concatentating with the 3371 * current dev list. 3372 */ 3373 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3374 &olddevs, &oldndevs) == 0); 3375 3376 newdevs = kmem_alloc(sizeof (void *) * 3377 (ndevs + oldndevs), KM_SLEEP); 3378 for (i = 0; i < oldndevs; i++) 3379 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3380 KM_SLEEP) == 0); 3381 for (i = 0; i < ndevs; i++) 3382 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3383 KM_SLEEP) == 0); 3384 3385 VERIFY(nvlist_remove(sav->sav_config, config, 3386 DATA_TYPE_NVLIST_ARRAY) == 0); 3387 3388 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3389 config, newdevs, ndevs + oldndevs) == 0); 3390 for (i = 0; i < oldndevs + ndevs; i++) 3391 nvlist_free(newdevs[i]); 3392 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3393 } else { 3394 /* 3395 * Generate a new dev list. 3396 */ 3397 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3398 KM_SLEEP) == 0); 3399 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3400 devs, ndevs) == 0); 3401 } 3402 } 3403 3404 /* 3405 * Stop and drop level 2 ARC devices 3406 */ 3407 void 3408 spa_l2cache_drop(spa_t *spa) 3409 { 3410 vdev_t *vd; 3411 int i; 3412 spa_aux_vdev_t *sav = &spa->spa_l2cache; 3413 3414 for (i = 0; i < sav->sav_count; i++) { 3415 uint64_t pool; 3416 3417 vd = sav->sav_vdevs[i]; 3418 ASSERT(vd != NULL); 3419 3420 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3421 pool != 0ULL && l2arc_vdev_present(vd)) 3422 l2arc_remove_vdev(vd); 3423 } 3424 } 3425 3426 /* 3427 * Pool Creation 3428 */ 3429 int 3430 spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3431 nvlist_t *zplprops) 3432 { 3433 spa_t *spa; 3434 char *altroot = NULL; 3435 vdev_t *rvd; 3436 dsl_pool_t *dp; 3437 dmu_tx_t *tx; 3438 int error = 0; 3439 uint64_t txg = TXG_INITIAL; 3440 nvlist_t **spares, **l2cache; 3441 uint_t nspares, nl2cache; 3442 uint64_t version, obj; 3443 boolean_t has_features; 3444 3445 /* 3446 * If this pool already exists, return failure. 3447 */ 3448 mutex_enter(&spa_namespace_lock); 3449 if (spa_lookup(pool) != NULL) { 3450 mutex_exit(&spa_namespace_lock); 3451 return (SET_ERROR(EEXIST)); 3452 } 3453 3454 /* 3455 * Allocate a new spa_t structure. 3456 */ 3457 (void) nvlist_lookup_string(props, 3458 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3459 spa = spa_add(pool, NULL, altroot); 3460 spa_activate(spa, spa_mode_global); 3461 3462 if (props && (error = spa_prop_validate(spa, props))) { 3463 spa_deactivate(spa); 3464 spa_remove(spa); 3465 mutex_exit(&spa_namespace_lock); 3466 return (error); 3467 } 3468 3469 has_features = B_FALSE; 3470 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3471 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3472 if (zpool_prop_feature(nvpair_name(elem))) 3473 has_features = B_TRUE; 3474 } 3475 3476 if (has_features || nvlist_lookup_uint64(props, 3477 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3478 version = SPA_VERSION; 3479 } 3480 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3481 3482 spa->spa_first_txg = txg; 3483 spa->spa_uberblock.ub_txg = txg - 1; 3484 spa->spa_uberblock.ub_version = version; 3485 spa->spa_ubsync = spa->spa_uberblock; 3486 3487 /* 3488 * Create "The Godfather" zio to hold all async IOs 3489 */ 3490 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 3491 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 3492 3493 /* 3494 * Create the root vdev. 3495 */ 3496 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3497 3498 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3499 3500 ASSERT(error != 0 || rvd != NULL); 3501 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3502 3503 if (error == 0 && !zfs_allocatable_devs(nvroot)) 3504 error = SET_ERROR(EINVAL); 3505 3506 if (error == 0 && 3507 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3508 (error = spa_validate_aux(spa, nvroot, txg, 3509 VDEV_ALLOC_ADD)) == 0) { 3510 for (int c = 0; c < rvd->vdev_children; c++) { 3511 vdev_metaslab_set_size(rvd->vdev_child[c]); 3512 vdev_expand(rvd->vdev_child[c], txg); 3513 } 3514 } 3515 3516 spa_config_exit(spa, SCL_ALL, FTAG); 3517 3518 if (error != 0) { 3519 spa_unload(spa); 3520 spa_deactivate(spa); 3521 spa_remove(spa); 3522 mutex_exit(&spa_namespace_lock); 3523 return (error); 3524 } 3525 3526 /* 3527 * Get the list of spares, if specified. 3528 */ 3529 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3530 &spares, &nspares) == 0) { 3531 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3532 KM_SLEEP) == 0); 3533 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3534 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3535 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3536 spa_load_spares(spa); 3537 spa_config_exit(spa, SCL_ALL, FTAG); 3538 spa->spa_spares.sav_sync = B_TRUE; 3539 } 3540 3541 /* 3542 * Get the list of level 2 cache devices, if specified. 3543 */ 3544 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3545 &l2cache, &nl2cache) == 0) { 3546 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3547 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3548 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3549 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3550 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3551 spa_load_l2cache(spa); 3552 spa_config_exit(spa, SCL_ALL, FTAG); 3553 spa->spa_l2cache.sav_sync = B_TRUE; 3554 } 3555 3556 spa->spa_is_initializing = B_TRUE; 3557 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3558 spa->spa_meta_objset = dp->dp_meta_objset; 3559 spa->spa_is_initializing = B_FALSE; 3560 3561 /* 3562 * Create DDTs (dedup tables). 3563 */ 3564 ddt_create(spa); 3565 3566 spa_update_dspace(spa); 3567 3568 tx = dmu_tx_create_assigned(dp, txg); 3569 3570 /* 3571 * Create the pool config object. 3572 */ 3573 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3574 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3575 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3576 3577 if (zap_add(spa->spa_meta_objset, 3578 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3579 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3580 cmn_err(CE_PANIC, "failed to add pool config"); 3581 } 3582 3583 if (spa_version(spa) >= SPA_VERSION_FEATURES) 3584 spa_feature_create_zap_objects(spa, tx); 3585 3586 if (zap_add(spa->spa_meta_objset, 3587 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3588 sizeof (uint64_t), 1, &version, tx) != 0) { 3589 cmn_err(CE_PANIC, "failed to add pool version"); 3590 } 3591 3592 /* Newly created pools with the right version are always deflated. */ 3593 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3594 spa->spa_deflate = TRUE; 3595 if (zap_add(spa->spa_meta_objset, 3596 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3597 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3598 cmn_err(CE_PANIC, "failed to add deflate"); 3599 } 3600 } 3601 3602 /* 3603 * Create the deferred-free bpobj. Turn off compression 3604 * because sync-to-convergence takes longer if the blocksize 3605 * keeps changing. 3606 */ 3607 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3608 dmu_object_set_compress(spa->spa_meta_objset, obj, 3609 ZIO_COMPRESS_OFF, tx); 3610 if (zap_add(spa->spa_meta_objset, 3611 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3612 sizeof (uint64_t), 1, &obj, tx) != 0) { 3613 cmn_err(CE_PANIC, "failed to add bpobj"); 3614 } 3615 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3616 spa->spa_meta_objset, obj)); 3617 3618 /* 3619 * Create the pool's history object. 3620 */ 3621 if (version >= SPA_VERSION_ZPOOL_HISTORY) 3622 spa_history_create_obj(spa, tx); 3623 3624 /* 3625 * Set pool properties. 3626 */ 3627 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3628 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3629 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3630 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3631 3632 if (props != NULL) { 3633 spa_configfile_set(spa, props, B_FALSE); 3634 spa_sync_props(props, tx); 3635 } 3636 3637 dmu_tx_commit(tx); 3638 3639 spa->spa_sync_on = B_TRUE; 3640 txg_sync_start(spa->spa_dsl_pool); 3641 3642 /* 3643 * We explicitly wait for the first transaction to complete so that our 3644 * bean counters are appropriately updated. 3645 */ 3646 txg_wait_synced(spa->spa_dsl_pool, txg); 3647 3648 spa_config_sync(spa, B_FALSE, B_TRUE); 3649 3650 spa_history_log_version(spa, "create"); 3651 3652 spa->spa_minref = refcount_count(&spa->spa_refcount); 3653 3654 mutex_exit(&spa_namespace_lock); 3655 3656 return (0); 3657 } 3658 3659 #ifdef _KERNEL 3660 /* 3661 * Get the root pool information from the root disk, then import the root pool 3662 * during the system boot up time. 3663 */ 3664 extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3665 3666 static nvlist_t * 3667 spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3668 { 3669 nvlist_t *config; 3670 nvlist_t *nvtop, *nvroot; 3671 uint64_t pgid; 3672 3673 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3674 return (NULL); 3675 3676 /* 3677 * Add this top-level vdev to the child array. 3678 */ 3679 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3680 &nvtop) == 0); 3681 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3682 &pgid) == 0); 3683 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3684 3685 /* 3686 * Put this pool's top-level vdevs into a root vdev. 3687 */ 3688 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3689 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3690 VDEV_TYPE_ROOT) == 0); 3691 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3692 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3693 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3694 &nvtop, 1) == 0); 3695 3696 /* 3697 * Replace the existing vdev_tree with the new root vdev in 3698 * this pool's configuration (remove the old, add the new). 3699 */ 3700 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3701 nvlist_free(nvroot); 3702 return (config); 3703 } 3704 3705 /* 3706 * Walk the vdev tree and see if we can find a device with "better" 3707 * configuration. A configuration is "better" if the label on that 3708 * device has a more recent txg. 3709 */ 3710 static void 3711 spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3712 { 3713 for (int c = 0; c < vd->vdev_children; c++) 3714 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3715 3716 if (vd->vdev_ops->vdev_op_leaf) { 3717 nvlist_t *label; 3718 uint64_t label_txg; 3719 3720 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3721 &label) != 0) 3722 return; 3723 3724 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3725 &label_txg) == 0); 3726 3727 /* 3728 * Do we have a better boot device? 3729 */ 3730 if (label_txg > *txg) { 3731 *txg = label_txg; 3732 *avd = vd; 3733 } 3734 nvlist_free(label); 3735 } 3736 } 3737 3738 /* 3739 * Import a root pool. 3740 * 3741 * For x86. devpath_list will consist of devid and/or physpath name of 3742 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3743 * The GRUB "findroot" command will return the vdev we should boot. 3744 * 3745 * For Sparc, devpath_list consists the physpath name of the booting device 3746 * no matter the rootpool is a single device pool or a mirrored pool. 3747 * e.g. 3748 * "/pci@1f,0/ide@d/disk@0,0:a" 3749 */ 3750 int 3751 spa_import_rootpool(char *devpath, char *devid) 3752 { 3753 spa_t *spa; 3754 vdev_t *rvd, *bvd, *avd = NULL; 3755 nvlist_t *config, *nvtop; 3756 uint64_t guid, txg; 3757 char *pname; 3758 int error; 3759 3760 /* 3761 * Read the label from the boot device and generate a configuration. 3762 */ 3763 config = spa_generate_rootconf(devpath, devid, &guid); 3764 #if defined(_OBP) && defined(_KERNEL) 3765 if (config == NULL) { 3766 if (strstr(devpath, "/iscsi/ssd") != NULL) { 3767 /* iscsi boot */ 3768 get_iscsi_bootpath_phy(devpath); 3769 config = spa_generate_rootconf(devpath, devid, &guid); 3770 } 3771 } 3772 #endif 3773 if (config == NULL) { 3774 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3775 devpath); 3776 return (SET_ERROR(EIO)); 3777 } 3778 3779 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3780 &pname) == 0); 3781 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3782 3783 mutex_enter(&spa_namespace_lock); 3784 if ((spa = spa_lookup(pname)) != NULL) { 3785 /* 3786 * Remove the existing root pool from the namespace so that we 3787 * can replace it with the correct config we just read in. 3788 */ 3789 spa_remove(spa); 3790 } 3791 3792 spa = spa_add(pname, config, NULL); 3793 spa->spa_is_root = B_TRUE; 3794 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3795 3796 /* 3797 * Build up a vdev tree based on the boot device's label config. 3798 */ 3799 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3800 &nvtop) == 0); 3801 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3802 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3803 VDEV_ALLOC_ROOTPOOL); 3804 spa_config_exit(spa, SCL_ALL, FTAG); 3805 if (error) { 3806 mutex_exit(&spa_namespace_lock); 3807 nvlist_free(config); 3808 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3809 pname); 3810 return (error); 3811 } 3812 3813 /* 3814 * Get the boot vdev. 3815 */ 3816 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3817 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3818 (u_longlong_t)guid); 3819 error = SET_ERROR(ENOENT); 3820 goto out; 3821 } 3822 3823 /* 3824 * Determine if there is a better boot device. 3825 */ 3826 avd = bvd; 3827 spa_alt_rootvdev(rvd, &avd, &txg); 3828 if (avd != bvd) { 3829 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 3830 "try booting from '%s'", avd->vdev_path); 3831 error = SET_ERROR(EINVAL); 3832 goto out; 3833 } 3834 3835 /* 3836 * If the boot device is part of a spare vdev then ensure that 3837 * we're booting off the active spare. 3838 */ 3839 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3840 !bvd->vdev_isspare) { 3841 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 3842 "try booting from '%s'", 3843 bvd->vdev_parent-> 3844 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3845 error = SET_ERROR(EINVAL); 3846 goto out; 3847 } 3848 3849 error = 0; 3850 out: 3851 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3852 vdev_free(rvd); 3853 spa_config_exit(spa, SCL_ALL, FTAG); 3854 mutex_exit(&spa_namespace_lock); 3855 3856 nvlist_free(config); 3857 return (error); 3858 } 3859 3860 #endif 3861 3862 /* 3863 * Import a non-root pool into the system. 3864 */ 3865 int 3866 spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 3867 { 3868 spa_t *spa; 3869 char *altroot = NULL; 3870 spa_load_state_t state = SPA_LOAD_IMPORT; 3871 zpool_rewind_policy_t policy; 3872 uint64_t mode = spa_mode_global; 3873 uint64_t readonly = B_FALSE; 3874 int error; 3875 nvlist_t *nvroot; 3876 nvlist_t **spares, **l2cache; 3877 uint_t nspares, nl2cache; 3878 3879 /* 3880 * If a pool with this name exists, return failure. 3881 */ 3882 mutex_enter(&spa_namespace_lock); 3883 if (spa_lookup(pool) != NULL) { 3884 mutex_exit(&spa_namespace_lock); 3885 return (SET_ERROR(EEXIST)); 3886 } 3887 3888 /* 3889 * Create and initialize the spa structure. 3890 */ 3891 (void) nvlist_lookup_string(props, 3892 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3893 (void) nvlist_lookup_uint64(props, 3894 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 3895 if (readonly) 3896 mode = FREAD; 3897 spa = spa_add(pool, config, altroot); 3898 spa->spa_import_flags = flags; 3899 3900 /* 3901 * Verbatim import - Take a pool and insert it into the namespace 3902 * as if it had been loaded at boot. 3903 */ 3904 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 3905 if (props != NULL) 3906 spa_configfile_set(spa, props, B_FALSE); 3907 3908 spa_config_sync(spa, B_FALSE, B_TRUE); 3909 3910 mutex_exit(&spa_namespace_lock); 3911 return (0); 3912 } 3913 3914 spa_activate(spa, mode); 3915 3916 /* 3917 * Don't start async tasks until we know everything is healthy. 3918 */ 3919 spa_async_suspend(spa); 3920 3921 zpool_get_rewind_policy(config, &policy); 3922 if (policy.zrp_request & ZPOOL_DO_REWIND) 3923 state = SPA_LOAD_RECOVER; 3924 3925 /* 3926 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 3927 * because the user-supplied config is actually the one to trust when 3928 * doing an import. 3929 */ 3930 if (state != SPA_LOAD_RECOVER) 3931 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 3932 3933 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 3934 policy.zrp_request); 3935 3936 /* 3937 * Propagate anything learned while loading the pool and pass it 3938 * back to caller (i.e. rewind info, missing devices, etc). 3939 */ 3940 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 3941 spa->spa_load_info) == 0); 3942 3943 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3944 /* 3945 * Toss any existing sparelist, as it doesn't have any validity 3946 * anymore, and conflicts with spa_has_spare(). 3947 */ 3948 if (spa->spa_spares.sav_config) { 3949 nvlist_free(spa->spa_spares.sav_config); 3950 spa->spa_spares.sav_config = NULL; 3951 spa_load_spares(spa); 3952 } 3953 if (spa->spa_l2cache.sav_config) { 3954 nvlist_free(spa->spa_l2cache.sav_config); 3955 spa->spa_l2cache.sav_config = NULL; 3956 spa_load_l2cache(spa); 3957 } 3958 3959 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3960 &nvroot) == 0); 3961 if (error == 0) 3962 error = spa_validate_aux(spa, nvroot, -1ULL, 3963 VDEV_ALLOC_SPARE); 3964 if (error == 0) 3965 error = spa_validate_aux(spa, nvroot, -1ULL, 3966 VDEV_ALLOC_L2CACHE); 3967 spa_config_exit(spa, SCL_ALL, FTAG); 3968 3969 if (props != NULL) 3970 spa_configfile_set(spa, props, B_FALSE); 3971 3972 if (error != 0 || (props && spa_writeable(spa) && 3973 (error = spa_prop_set(spa, props)))) { 3974 spa_unload(spa); 3975 spa_deactivate(spa); 3976 spa_remove(spa); 3977 mutex_exit(&spa_namespace_lock); 3978 return (error); 3979 } 3980 3981 spa_async_resume(spa); 3982 3983 /* 3984 * Override any spares and level 2 cache devices as specified by 3985 * the user, as these may have correct device names/devids, etc. 3986 */ 3987 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3988 &spares, &nspares) == 0) { 3989 if (spa->spa_spares.sav_config) 3990 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 3991 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 3992 else 3993 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 3994 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3995 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3996 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3997 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3998 spa_load_spares(spa); 3999 spa_config_exit(spa, SCL_ALL, FTAG); 4000 spa->spa_spares.sav_sync = B_TRUE; 4001 } 4002 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4003 &l2cache, &nl2cache) == 0) { 4004 if (spa->spa_l2cache.sav_config) 4005 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4006 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4007 else 4008 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4009 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4010 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4011 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4012 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4013 spa_load_l2cache(spa); 4014 spa_config_exit(spa, SCL_ALL, FTAG); 4015 spa->spa_l2cache.sav_sync = B_TRUE; 4016 } 4017 4018 /* 4019 * Check for any removed devices. 4020 */ 4021 if (spa->spa_autoreplace) { 4022 spa_aux_check_removed(&spa->spa_spares); 4023 spa_aux_check_removed(&spa->spa_l2cache); 4024 } 4025 4026 if (spa_writeable(spa)) { 4027 /* 4028 * Update the config cache to include the newly-imported pool. 4029 */ 4030 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4031 } 4032 4033 /* 4034 * It's possible that the pool was expanded while it was exported. 4035 * We kick off an async task to handle this for us. 4036 */ 4037 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4038 4039 mutex_exit(&spa_namespace_lock); 4040 spa_history_log_version(spa, "import"); 4041 4042 return (0); 4043 } 4044 4045 nvlist_t * 4046 spa_tryimport(nvlist_t *tryconfig) 4047 { 4048 nvlist_t *config = NULL; 4049 char *poolname; 4050 spa_t *spa; 4051 uint64_t state; 4052 int error; 4053 4054 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4055 return (NULL); 4056 4057 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4058 return (NULL); 4059 4060 /* 4061 * Create and initialize the spa structure. 4062 */ 4063 mutex_enter(&spa_namespace_lock); 4064 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4065 spa_activate(spa, FREAD); 4066 4067 /* 4068 * Pass off the heavy lifting to spa_load(). 4069 * Pass TRUE for mosconfig because the user-supplied config 4070 * is actually the one to trust when doing an import. 4071 */ 4072 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4073 4074 /* 4075 * If 'tryconfig' was at least parsable, return the current config. 4076 */ 4077 if (spa->spa_root_vdev != NULL) { 4078 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4079 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4080 poolname) == 0); 4081 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4082 state) == 0); 4083 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4084 spa->spa_uberblock.ub_timestamp) == 0); 4085 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4086 spa->spa_load_info) == 0); 4087 4088 /* 4089 * If the bootfs property exists on this pool then we 4090 * copy it out so that external consumers can tell which 4091 * pools are bootable. 4092 */ 4093 if ((!error || error == EEXIST) && spa->spa_bootfs) { 4094 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4095 4096 /* 4097 * We have to play games with the name since the 4098 * pool was opened as TRYIMPORT_NAME. 4099 */ 4100 if (dsl_dsobj_to_dsname(spa_name(spa), 4101 spa->spa_bootfs, tmpname) == 0) { 4102 char *cp; 4103 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4104 4105 cp = strchr(tmpname, '/'); 4106 if (cp == NULL) { 4107 (void) strlcpy(dsname, tmpname, 4108 MAXPATHLEN); 4109 } else { 4110 (void) snprintf(dsname, MAXPATHLEN, 4111 "%s/%s", poolname, ++cp); 4112 } 4113 VERIFY(nvlist_add_string(config, 4114 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4115 kmem_free(dsname, MAXPATHLEN); 4116 } 4117 kmem_free(tmpname, MAXPATHLEN); 4118 } 4119 4120 /* 4121 * Add the list of hot spares and level 2 cache devices. 4122 */ 4123 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4124 spa_add_spares(spa, config); 4125 spa_add_l2cache(spa, config); 4126 spa_config_exit(spa, SCL_CONFIG, FTAG); 4127 } 4128 4129 spa_unload(spa); 4130 spa_deactivate(spa); 4131 spa_remove(spa); 4132 mutex_exit(&spa_namespace_lock); 4133 4134 return (config); 4135 } 4136 4137 /* 4138 * Pool export/destroy 4139 * 4140 * The act of destroying or exporting a pool is very simple. We make sure there 4141 * is no more pending I/O and any references to the pool are gone. Then, we 4142 * update the pool state and sync all the labels to disk, removing the 4143 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4144 * we don't sync the labels or remove the configuration cache. 4145 */ 4146 static int 4147 spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4148 boolean_t force, boolean_t hardforce) 4149 { 4150 spa_t *spa; 4151 4152 if (oldconfig) 4153 *oldconfig = NULL; 4154 4155 if (!(spa_mode_global & FWRITE)) 4156 return (SET_ERROR(EROFS)); 4157 4158 mutex_enter(&spa_namespace_lock); 4159 if ((spa = spa_lookup(pool)) == NULL) { 4160 mutex_exit(&spa_namespace_lock); 4161 return (SET_ERROR(ENOENT)); 4162 } 4163 4164 /* 4165 * Put a hold on the pool, drop the namespace lock, stop async tasks, 4166 * reacquire the namespace lock, and see if we can export. 4167 */ 4168 spa_open_ref(spa, FTAG); 4169 mutex_exit(&spa_namespace_lock); 4170 spa_async_suspend(spa); 4171 mutex_enter(&spa_namespace_lock); 4172 spa_close(spa, FTAG); 4173 4174 /* 4175 * The pool will be in core if it's openable, 4176 * in which case we can modify its state. 4177 */ 4178 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4179 /* 4180 * Objsets may be open only because they're dirty, so we 4181 * have to force it to sync before checking spa_refcnt. 4182 */ 4183 txg_wait_synced(spa->spa_dsl_pool, 0); 4184 4185 /* 4186 * A pool cannot be exported or destroyed if there are active 4187 * references. If we are resetting a pool, allow references by 4188 * fault injection handlers. 4189 */ 4190 if (!spa_refcount_zero(spa) || 4191 (spa->spa_inject_ref != 0 && 4192 new_state != POOL_STATE_UNINITIALIZED)) { 4193 spa_async_resume(spa); 4194 mutex_exit(&spa_namespace_lock); 4195 return (SET_ERROR(EBUSY)); 4196 } 4197 4198 /* 4199 * A pool cannot be exported if it has an active shared spare. 4200 * This is to prevent other pools stealing the active spare 4201 * from an exported pool. At user's own will, such pool can 4202 * be forcedly exported. 4203 */ 4204 if (!force && new_state == POOL_STATE_EXPORTED && 4205 spa_has_active_shared_spare(spa)) { 4206 spa_async_resume(spa); 4207 mutex_exit(&spa_namespace_lock); 4208 return (SET_ERROR(EXDEV)); 4209 } 4210 4211 /* 4212 * We want this to be reflected on every label, 4213 * so mark them all dirty. spa_unload() will do the 4214 * final sync that pushes these changes out. 4215 */ 4216 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4217 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4218 spa->spa_state = new_state; 4219 spa->spa_final_txg = spa_last_synced_txg(spa) + 4220 TXG_DEFER_SIZE + 1; 4221 vdev_config_dirty(spa->spa_root_vdev); 4222 spa_config_exit(spa, SCL_ALL, FTAG); 4223 } 4224 } 4225 4226 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4227 4228 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4229 spa_unload(spa); 4230 spa_deactivate(spa); 4231 } 4232 4233 if (oldconfig && spa->spa_config) 4234 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4235 4236 if (new_state != POOL_STATE_UNINITIALIZED) { 4237 if (!hardforce) 4238 spa_config_sync(spa, B_TRUE, B_TRUE); 4239 spa_remove(spa); 4240 } 4241 mutex_exit(&spa_namespace_lock); 4242 4243 return (0); 4244 } 4245 4246 /* 4247 * Destroy a storage pool. 4248 */ 4249 int 4250 spa_destroy(char *pool) 4251 { 4252 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4253 B_FALSE, B_FALSE)); 4254 } 4255 4256 /* 4257 * Export a storage pool. 4258 */ 4259 int 4260 spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4261 boolean_t hardforce) 4262 { 4263 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4264 force, hardforce)); 4265 } 4266 4267 /* 4268 * Similar to spa_export(), this unloads the spa_t without actually removing it 4269 * from the namespace in any way. 4270 */ 4271 int 4272 spa_reset(char *pool) 4273 { 4274 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4275 B_FALSE, B_FALSE)); 4276 } 4277 4278 /* 4279 * ========================================================================== 4280 * Device manipulation 4281 * ========================================================================== 4282 */ 4283 4284 /* 4285 * Add a device to a storage pool. 4286 */ 4287 int 4288 spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4289 { 4290 uint64_t txg, id; 4291 int error; 4292 vdev_t *rvd = spa->spa_root_vdev; 4293 vdev_t *vd, *tvd; 4294 nvlist_t **spares, **l2cache; 4295 uint_t nspares, nl2cache; 4296 4297 ASSERT(spa_writeable(spa)); 4298 4299 txg = spa_vdev_enter(spa); 4300 4301 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4302 VDEV_ALLOC_ADD)) != 0) 4303 return (spa_vdev_exit(spa, NULL, txg, error)); 4304 4305 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4306 4307 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4308 &nspares) != 0) 4309 nspares = 0; 4310 4311 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4312 &nl2cache) != 0) 4313 nl2cache = 0; 4314 4315 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4316 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4317 4318 if (vd->vdev_children != 0 && 4319 (error = vdev_create(vd, txg, B_FALSE)) != 0) 4320 return (spa_vdev_exit(spa, vd, txg, error)); 4321 4322 /* 4323 * We must validate the spares and l2cache devices after checking the 4324 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4325 */ 4326 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4327 return (spa_vdev_exit(spa, vd, txg, error)); 4328 4329 /* 4330 * Transfer each new top-level vdev from vd to rvd. 4331 */ 4332 for (int c = 0; c < vd->vdev_children; c++) { 4333 4334 /* 4335 * Set the vdev id to the first hole, if one exists. 4336 */ 4337 for (id = 0; id < rvd->vdev_children; id++) { 4338 if (rvd->vdev_child[id]->vdev_ishole) { 4339 vdev_free(rvd->vdev_child[id]); 4340 break; 4341 } 4342 } 4343 tvd = vd->vdev_child[c]; 4344 vdev_remove_child(vd, tvd); 4345 tvd->vdev_id = id; 4346 vdev_add_child(rvd, tvd); 4347 vdev_config_dirty(tvd); 4348 } 4349 4350 if (nspares != 0) { 4351 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4352 ZPOOL_CONFIG_SPARES); 4353 spa_load_spares(spa); 4354 spa->spa_spares.sav_sync = B_TRUE; 4355 } 4356 4357 if (nl2cache != 0) { 4358 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4359 ZPOOL_CONFIG_L2CACHE); 4360 spa_load_l2cache(spa); 4361 spa->spa_l2cache.sav_sync = B_TRUE; 4362 } 4363 4364 /* 4365 * We have to be careful when adding new vdevs to an existing pool. 4366 * If other threads start allocating from these vdevs before we 4367 * sync the config cache, and we lose power, then upon reboot we may 4368 * fail to open the pool because there are DVAs that the config cache 4369 * can't translate. Therefore, we first add the vdevs without 4370 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4371 * and then let spa_config_update() initialize the new metaslabs. 4372 * 4373 * spa_load() checks for added-but-not-initialized vdevs, so that 4374 * if we lose power at any point in this sequence, the remaining 4375 * steps will be completed the next time we load the pool. 4376 */ 4377 (void) spa_vdev_exit(spa, vd, txg, 0); 4378 4379 mutex_enter(&spa_namespace_lock); 4380 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4381 mutex_exit(&spa_namespace_lock); 4382 4383 return (0); 4384 } 4385 4386 /* 4387 * Attach a device to a mirror. The arguments are the path to any device 4388 * in the mirror, and the nvroot for the new device. If the path specifies 4389 * a device that is not mirrored, we automatically insert the mirror vdev. 4390 * 4391 * If 'replacing' is specified, the new device is intended to replace the 4392 * existing device; in this case the two devices are made into their own 4393 * mirror using the 'replacing' vdev, which is functionally identical to 4394 * the mirror vdev (it actually reuses all the same ops) but has a few 4395 * extra rules: you can't attach to it after it's been created, and upon 4396 * completion of resilvering, the first disk (the one being replaced) 4397 * is automatically detached. 4398 */ 4399 int 4400 spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4401 { 4402 uint64_t txg, dtl_max_txg; 4403 vdev_t *rvd = spa->spa_root_vdev; 4404 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4405 vdev_ops_t *pvops; 4406 char *oldvdpath, *newvdpath; 4407 int newvd_isspare; 4408 int error; 4409 4410 ASSERT(spa_writeable(spa)); 4411 4412 txg = spa_vdev_enter(spa); 4413 4414 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4415 4416 if (oldvd == NULL) 4417 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4418 4419 if (!oldvd->vdev_ops->vdev_op_leaf) 4420 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4421 4422 pvd = oldvd->vdev_parent; 4423 4424 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4425 VDEV_ALLOC_ATTACH)) != 0) 4426 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4427 4428 if (newrootvd->vdev_children != 1) 4429 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4430 4431 newvd = newrootvd->vdev_child[0]; 4432 4433 if (!newvd->vdev_ops->vdev_op_leaf) 4434 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4435 4436 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4437 return (spa_vdev_exit(spa, newrootvd, txg, error)); 4438 4439 /* 4440 * Spares can't replace logs 4441 */ 4442 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4443 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4444 4445 if (!replacing) { 4446 /* 4447 * For attach, the only allowable parent is a mirror or the root 4448 * vdev. 4449 */ 4450 if (pvd->vdev_ops != &vdev_mirror_ops && 4451 pvd->vdev_ops != &vdev_root_ops) 4452 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4453 4454 pvops = &vdev_mirror_ops; 4455 } else { 4456 /* 4457 * Active hot spares can only be replaced by inactive hot 4458 * spares. 4459 */ 4460 if (pvd->vdev_ops == &vdev_spare_ops && 4461 oldvd->vdev_isspare && 4462 !spa_has_spare(spa, newvd->vdev_guid)) 4463 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4464 4465 /* 4466 * If the source is a hot spare, and the parent isn't already a 4467 * spare, then we want to create a new hot spare. Otherwise, we 4468 * want to create a replacing vdev. The user is not allowed to 4469 * attach to a spared vdev child unless the 'isspare' state is 4470 * the same (spare replaces spare, non-spare replaces 4471 * non-spare). 4472 */ 4473 if (pvd->vdev_ops == &vdev_replacing_ops && 4474 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4475 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4476 } else if (pvd->vdev_ops == &vdev_spare_ops && 4477 newvd->vdev_isspare != oldvd->vdev_isspare) { 4478 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4479 } 4480 4481 if (newvd->vdev_isspare) 4482 pvops = &vdev_spare_ops; 4483 else 4484 pvops = &vdev_replacing_ops; 4485 } 4486 4487 /* 4488 * Make sure the new device is big enough. 4489 */ 4490 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4491 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4492 4493 /* 4494 * The new device cannot have a higher alignment requirement 4495 * than the top-level vdev. 4496 */ 4497 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4498 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4499 4500 /* 4501 * If this is an in-place replacement, update oldvd's path and devid 4502 * to make it distinguishable from newvd, and unopenable from now on. 4503 */ 4504 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4505 spa_strfree(oldvd->vdev_path); 4506 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4507 KM_SLEEP); 4508 (void) sprintf(oldvd->vdev_path, "%s/%s", 4509 newvd->vdev_path, "old"); 4510 if (oldvd->vdev_devid != NULL) { 4511 spa_strfree(oldvd->vdev_devid); 4512 oldvd->vdev_devid = NULL; 4513 } 4514 } 4515 4516 /* mark the device being resilvered */ 4517 newvd->vdev_resilver_txg = txg; 4518 4519 /* 4520 * If the parent is not a mirror, or if we're replacing, insert the new 4521 * mirror/replacing/spare vdev above oldvd. 4522 */ 4523 if (pvd->vdev_ops != pvops) 4524 pvd = vdev_add_parent(oldvd, pvops); 4525 4526 ASSERT(pvd->vdev_top->vdev_parent == rvd); 4527 ASSERT(pvd->vdev_ops == pvops); 4528 ASSERT(oldvd->vdev_parent == pvd); 4529 4530 /* 4531 * Extract the new device from its root and add it to pvd. 4532 */ 4533 vdev_remove_child(newrootvd, newvd); 4534 newvd->vdev_id = pvd->vdev_children; 4535 newvd->vdev_crtxg = oldvd->vdev_crtxg; 4536 vdev_add_child(pvd, newvd); 4537 4538 tvd = newvd->vdev_top; 4539 ASSERT(pvd->vdev_top == tvd); 4540 ASSERT(tvd->vdev_parent == rvd); 4541 4542 vdev_config_dirty(tvd); 4543 4544 /* 4545 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4546 * for any dmu_sync-ed blocks. It will propagate upward when 4547 * spa_vdev_exit() calls vdev_dtl_reassess(). 4548 */ 4549 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4550 4551 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4552 dtl_max_txg - TXG_INITIAL); 4553 4554 if (newvd->vdev_isspare) { 4555 spa_spare_activate(newvd); 4556 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4557 } 4558 4559 oldvdpath = spa_strdup(oldvd->vdev_path); 4560 newvdpath = spa_strdup(newvd->vdev_path); 4561 newvd_isspare = newvd->vdev_isspare; 4562 4563 /* 4564 * Mark newvd's DTL dirty in this txg. 4565 */ 4566 vdev_dirty(tvd, VDD_DTL, newvd, txg); 4567 4568 /* 4569 * Schedule the resilver to restart in the future. We do this to 4570 * ensure that dmu_sync-ed blocks have been stitched into the 4571 * respective datasets. 4572 */ 4573 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4574 4575 /* 4576 * Commit the config 4577 */ 4578 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4579 4580 spa_history_log_internal(spa, "vdev attach", NULL, 4581 "%s vdev=%s %s vdev=%s", 4582 replacing && newvd_isspare ? "spare in" : 4583 replacing ? "replace" : "attach", newvdpath, 4584 replacing ? "for" : "to", oldvdpath); 4585 4586 spa_strfree(oldvdpath); 4587 spa_strfree(newvdpath); 4588 4589 if (spa->spa_bootfs) 4590 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4591 4592 return (0); 4593 } 4594 4595 /* 4596 * Detach a device from a mirror or replacing vdev. 4597 * 4598 * If 'replace_done' is specified, only detach if the parent 4599 * is a replacing vdev. 4600 */ 4601 int 4602 spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4603 { 4604 uint64_t txg; 4605 int error; 4606 vdev_t *rvd = spa->spa_root_vdev; 4607 vdev_t *vd, *pvd, *cvd, *tvd; 4608 boolean_t unspare = B_FALSE; 4609 uint64_t unspare_guid = 0; 4610 char *vdpath; 4611 4612 ASSERT(spa_writeable(spa)); 4613 4614 txg = spa_vdev_enter(spa); 4615 4616 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4617 4618 if (vd == NULL) 4619 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4620 4621 if (!vd->vdev_ops->vdev_op_leaf) 4622 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4623 4624 pvd = vd->vdev_parent; 4625 4626 /* 4627 * If the parent/child relationship is not as expected, don't do it. 4628 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4629 * vdev that's replacing B with C. The user's intent in replacing 4630 * is to go from M(A,B) to M(A,C). If the user decides to cancel 4631 * the replace by detaching C, the expected behavior is to end up 4632 * M(A,B). But suppose that right after deciding to detach C, 4633 * the replacement of B completes. We would have M(A,C), and then 4634 * ask to detach C, which would leave us with just A -- not what 4635 * the user wanted. To prevent this, we make sure that the 4636 * parent/child relationship hasn't changed -- in this example, 4637 * that C's parent is still the replacing vdev R. 4638 */ 4639 if (pvd->vdev_guid != pguid && pguid != 0) 4640 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4641 4642 /* 4643 * Only 'replacing' or 'spare' vdevs can be replaced. 4644 */ 4645 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4646 pvd->vdev_ops != &vdev_spare_ops) 4647 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4648 4649 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4650 spa_version(spa) >= SPA_VERSION_SPARES); 4651 4652 /* 4653 * Only mirror, replacing, and spare vdevs support detach. 4654 */ 4655 if (pvd->vdev_ops != &vdev_replacing_ops && 4656 pvd->vdev_ops != &vdev_mirror_ops && 4657 pvd->vdev_ops != &vdev_spare_ops) 4658 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4659 4660 /* 4661 * If this device has the only valid copy of some data, 4662 * we cannot safely detach it. 4663 */ 4664 if (vdev_dtl_required(vd)) 4665 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4666 4667 ASSERT(pvd->vdev_children >= 2); 4668 4669 /* 4670 * If we are detaching the second disk from a replacing vdev, then 4671 * check to see if we changed the original vdev's path to have "/old" 4672 * at the end in spa_vdev_attach(). If so, undo that change now. 4673 */ 4674 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4675 vd->vdev_path != NULL) { 4676 size_t len = strlen(vd->vdev_path); 4677 4678 for (int c = 0; c < pvd->vdev_children; c++) { 4679 cvd = pvd->vdev_child[c]; 4680 4681 if (cvd == vd || cvd->vdev_path == NULL) 4682 continue; 4683 4684 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4685 strcmp(cvd->vdev_path + len, "/old") == 0) { 4686 spa_strfree(cvd->vdev_path); 4687 cvd->vdev_path = spa_strdup(vd->vdev_path); 4688 break; 4689 } 4690 } 4691 } 4692 4693 /* 4694 * If we are detaching the original disk from a spare, then it implies 4695 * that the spare should become a real disk, and be removed from the 4696 * active spare list for the pool. 4697 */ 4698 if (pvd->vdev_ops == &vdev_spare_ops && 4699 vd->vdev_id == 0 && 4700 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4701 unspare = B_TRUE; 4702 4703 /* 4704 * Erase the disk labels so the disk can be used for other things. 4705 * This must be done after all other error cases are handled, 4706 * but before we disembowel vd (so we can still do I/O to it). 4707 * But if we can't do it, don't treat the error as fatal -- 4708 * it may be that the unwritability of the disk is the reason 4709 * it's being detached! 4710 */ 4711 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4712 4713 /* 4714 * Remove vd from its parent and compact the parent's children. 4715 */ 4716 vdev_remove_child(pvd, vd); 4717 vdev_compact_children(pvd); 4718 4719 /* 4720 * Remember one of the remaining children so we can get tvd below. 4721 */ 4722 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4723 4724 /* 4725 * If we need to remove the remaining child from the list of hot spares, 4726 * do it now, marking the vdev as no longer a spare in the process. 4727 * We must do this before vdev_remove_parent(), because that can 4728 * change the GUID if it creates a new toplevel GUID. For a similar 4729 * reason, we must remove the spare now, in the same txg as the detach; 4730 * otherwise someone could attach a new sibling, change the GUID, and 4731 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 4732 */ 4733 if (unspare) { 4734 ASSERT(cvd->vdev_isspare); 4735 spa_spare_remove(cvd); 4736 unspare_guid = cvd->vdev_guid; 4737 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 4738 cvd->vdev_unspare = B_TRUE; 4739 } 4740 4741 /* 4742 * If the parent mirror/replacing vdev only has one child, 4743 * the parent is no longer needed. Remove it from the tree. 4744 */ 4745 if (pvd->vdev_children == 1) { 4746 if (pvd->vdev_ops == &vdev_spare_ops) 4747 cvd->vdev_unspare = B_FALSE; 4748 vdev_remove_parent(cvd); 4749 } 4750 4751 4752 /* 4753 * We don't set tvd until now because the parent we just removed 4754 * may have been the previous top-level vdev. 4755 */ 4756 tvd = cvd->vdev_top; 4757 ASSERT(tvd->vdev_parent == rvd); 4758 4759 /* 4760 * Reevaluate the parent vdev state. 4761 */ 4762 vdev_propagate_state(cvd); 4763 4764 /* 4765 * If the 'autoexpand' property is set on the pool then automatically 4766 * try to expand the size of the pool. For example if the device we 4767 * just detached was smaller than the others, it may be possible to 4768 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 4769 * first so that we can obtain the updated sizes of the leaf vdevs. 4770 */ 4771 if (spa->spa_autoexpand) { 4772 vdev_reopen(tvd); 4773 vdev_expand(tvd, txg); 4774 } 4775 4776 vdev_config_dirty(tvd); 4777 4778 /* 4779 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 4780 * vd->vdev_detached is set and free vd's DTL object in syncing context. 4781 * But first make sure we're not on any *other* txg's DTL list, to 4782 * prevent vd from being accessed after it's freed. 4783 */ 4784 vdpath = spa_strdup(vd->vdev_path); 4785 for (int t = 0; t < TXG_SIZE; t++) 4786 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 4787 vd->vdev_detached = B_TRUE; 4788 vdev_dirty(tvd, VDD_DTL, vd, txg); 4789 4790 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 4791 4792 /* hang on to the spa before we release the lock */ 4793 spa_open_ref(spa, FTAG); 4794 4795 error = spa_vdev_exit(spa, vd, txg, 0); 4796 4797 spa_history_log_internal(spa, "detach", NULL, 4798 "vdev=%s", vdpath); 4799 spa_strfree(vdpath); 4800 4801 /* 4802 * If this was the removal of the original device in a hot spare vdev, 4803 * then we want to go through and remove the device from the hot spare 4804 * list of every other pool. 4805 */ 4806 if (unspare) { 4807 spa_t *altspa = NULL; 4808 4809 mutex_enter(&spa_namespace_lock); 4810 while ((altspa = spa_next(altspa)) != NULL) { 4811 if (altspa->spa_state != POOL_STATE_ACTIVE || 4812 altspa == spa) 4813 continue; 4814 4815 spa_open_ref(altspa, FTAG); 4816 mutex_exit(&spa_namespace_lock); 4817 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 4818 mutex_enter(&spa_namespace_lock); 4819 spa_close(altspa, FTAG); 4820 } 4821 mutex_exit(&spa_namespace_lock); 4822 4823 /* search the rest of the vdevs for spares to remove */ 4824 spa_vdev_resilver_done(spa); 4825 } 4826 4827 /* all done with the spa; OK to release */ 4828 mutex_enter(&spa_namespace_lock); 4829 spa_close(spa, FTAG); 4830 mutex_exit(&spa_namespace_lock); 4831 4832 return (error); 4833 } 4834 4835 /* 4836 * Split a set of devices from their mirrors, and create a new pool from them. 4837 */ 4838 int 4839 spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 4840 nvlist_t *props, boolean_t exp) 4841 { 4842 int error = 0; 4843 uint64_t txg, *glist; 4844 spa_t *newspa; 4845 uint_t c, children, lastlog; 4846 nvlist_t **child, *nvl, *tmp; 4847 dmu_tx_t *tx; 4848 char *altroot = NULL; 4849 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 4850 boolean_t activate_slog; 4851 4852 ASSERT(spa_writeable(spa)); 4853 4854 txg = spa_vdev_enter(spa); 4855 4856 /* clear the log and flush everything up to now */ 4857 activate_slog = spa_passivate_log(spa); 4858 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 4859 error = spa_offline_log(spa); 4860 txg = spa_vdev_config_enter(spa); 4861 4862 if (activate_slog) 4863 spa_activate_log(spa); 4864 4865 if (error != 0) 4866 return (spa_vdev_exit(spa, NULL, txg, error)); 4867 4868 /* check new spa name before going any further */ 4869 if (spa_lookup(newname) != NULL) 4870 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 4871 4872 /* 4873 * scan through all the children to ensure they're all mirrors 4874 */ 4875 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 4876 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 4877 &children) != 0) 4878 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4879 4880 /* first, check to ensure we've got the right child count */ 4881 rvd = spa->spa_root_vdev; 4882 lastlog = 0; 4883 for (c = 0; c < rvd->vdev_children; c++) { 4884 vdev_t *vd = rvd->vdev_child[c]; 4885 4886 /* don't count the holes & logs as children */ 4887 if (vd->vdev_islog || vd->vdev_ishole) { 4888 if (lastlog == 0) 4889 lastlog = c; 4890 continue; 4891 } 4892 4893 lastlog = 0; 4894 } 4895 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 4896 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4897 4898 /* next, ensure no spare or cache devices are part of the split */ 4899 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 4900 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 4901 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4902 4903 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 4904 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 4905 4906 /* then, loop over each vdev and validate it */ 4907 for (c = 0; c < children; c++) { 4908 uint64_t is_hole = 0; 4909 4910 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 4911 &is_hole); 4912 4913 if (is_hole != 0) { 4914 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 4915 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 4916 continue; 4917 } else { 4918 error = SET_ERROR(EINVAL); 4919 break; 4920 } 4921 } 4922 4923 /* which disk is going to be split? */ 4924 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 4925 &glist[c]) != 0) { 4926 error = SET_ERROR(EINVAL); 4927 break; 4928 } 4929 4930 /* look it up in the spa */ 4931 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 4932 if (vml[c] == NULL) { 4933 error = SET_ERROR(ENODEV); 4934 break; 4935 } 4936 4937 /* make sure there's nothing stopping the split */ 4938 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 4939 vml[c]->vdev_islog || 4940 vml[c]->vdev_ishole || 4941 vml[c]->vdev_isspare || 4942 vml[c]->vdev_isl2cache || 4943 !vdev_writeable(vml[c]) || 4944 vml[c]->vdev_children != 0 || 4945 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 4946 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 4947 error = SET_ERROR(EINVAL); 4948 break; 4949 } 4950 4951 if (vdev_dtl_required(vml[c])) { 4952 error = SET_ERROR(EBUSY); 4953 break; 4954 } 4955 4956 /* we need certain info from the top level */ 4957 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 4958 vml[c]->vdev_top->vdev_ms_array) == 0); 4959 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 4960 vml[c]->vdev_top->vdev_ms_shift) == 0); 4961 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 4962 vml[c]->vdev_top->vdev_asize) == 0); 4963 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 4964 vml[c]->vdev_top->vdev_ashift) == 0); 4965 } 4966 4967 if (error != 0) { 4968 kmem_free(vml, children * sizeof (vdev_t *)); 4969 kmem_free(glist, children * sizeof (uint64_t)); 4970 return (spa_vdev_exit(spa, NULL, txg, error)); 4971 } 4972 4973 /* stop writers from using the disks */ 4974 for (c = 0; c < children; c++) { 4975 if (vml[c] != NULL) 4976 vml[c]->vdev_offline = B_TRUE; 4977 } 4978 vdev_reopen(spa->spa_root_vdev); 4979 4980 /* 4981 * Temporarily record the splitting vdevs in the spa config. This 4982 * will disappear once the config is regenerated. 4983 */ 4984 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4985 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 4986 glist, children) == 0); 4987 kmem_free(glist, children * sizeof (uint64_t)); 4988 4989 mutex_enter(&spa->spa_props_lock); 4990 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 4991 nvl) == 0); 4992 mutex_exit(&spa->spa_props_lock); 4993 spa->spa_config_splitting = nvl; 4994 vdev_config_dirty(spa->spa_root_vdev); 4995 4996 /* configure and create the new pool */ 4997 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 4998 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4999 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5000 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5001 spa_version(spa)) == 0); 5002 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5003 spa->spa_config_txg) == 0); 5004 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5005 spa_generate_guid(NULL)) == 0); 5006 (void) nvlist_lookup_string(props, 5007 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5008 5009 /* add the new pool to the namespace */ 5010 newspa = spa_add(newname, config, altroot); 5011 newspa->spa_config_txg = spa->spa_config_txg; 5012 spa_set_log_state(newspa, SPA_LOG_CLEAR); 5013 5014 /* release the spa config lock, retaining the namespace lock */ 5015 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5016 5017 if (zio_injection_enabled) 5018 zio_handle_panic_injection(spa, FTAG, 1); 5019 5020 spa_activate(newspa, spa_mode_global); 5021 spa_async_suspend(newspa); 5022 5023 /* create the new pool from the disks of the original pool */ 5024 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5025 if (error) 5026 goto out; 5027 5028 /* if that worked, generate a real config for the new pool */ 5029 if (newspa->spa_root_vdev != NULL) { 5030 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5031 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5032 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5033 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5034 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5035 B_TRUE)); 5036 } 5037 5038 /* set the props */ 5039 if (props != NULL) { 5040 spa_configfile_set(newspa, props, B_FALSE); 5041 error = spa_prop_set(newspa, props); 5042 if (error) 5043 goto out; 5044 } 5045 5046 /* flush everything */ 5047 txg = spa_vdev_config_enter(newspa); 5048 vdev_config_dirty(newspa->spa_root_vdev); 5049 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5050 5051 if (zio_injection_enabled) 5052 zio_handle_panic_injection(spa, FTAG, 2); 5053 5054 spa_async_resume(newspa); 5055 5056 /* finally, update the original pool's config */ 5057 txg = spa_vdev_config_enter(spa); 5058 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5059 error = dmu_tx_assign(tx, TXG_WAIT); 5060 if (error != 0) 5061 dmu_tx_abort(tx); 5062 for (c = 0; c < children; c++) { 5063 if (vml[c] != NULL) { 5064 vdev_split(vml[c]); 5065 if (error == 0) 5066 spa_history_log_internal(spa, "detach", tx, 5067 "vdev=%s", vml[c]->vdev_path); 5068 vdev_free(vml[c]); 5069 } 5070 } 5071 vdev_config_dirty(spa->spa_root_vdev); 5072 spa->spa_config_splitting = NULL; 5073 nvlist_free(nvl); 5074 if (error == 0) 5075 dmu_tx_commit(tx); 5076 (void) spa_vdev_exit(spa, NULL, txg, 0); 5077 5078 if (zio_injection_enabled) 5079 zio_handle_panic_injection(spa, FTAG, 3); 5080 5081 /* split is complete; log a history record */ 5082 spa_history_log_internal(newspa, "split", NULL, 5083 "from pool %s", spa_name(spa)); 5084 5085 kmem_free(vml, children * sizeof (vdev_t *)); 5086 5087 /* if we're not going to mount the filesystems in userland, export */ 5088 if (exp) 5089 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5090 B_FALSE, B_FALSE); 5091 5092 return (error); 5093 5094 out: 5095 spa_unload(newspa); 5096 spa_deactivate(newspa); 5097 spa_remove(newspa); 5098 5099 txg = spa_vdev_config_enter(spa); 5100 5101 /* re-online all offlined disks */ 5102 for (c = 0; c < children; c++) { 5103 if (vml[c] != NULL) 5104 vml[c]->vdev_offline = B_FALSE; 5105 } 5106 vdev_reopen(spa->spa_root_vdev); 5107 5108 nvlist_free(spa->spa_config_splitting); 5109 spa->spa_config_splitting = NULL; 5110 (void) spa_vdev_exit(spa, NULL, txg, error); 5111 5112 kmem_free(vml, children * sizeof (vdev_t *)); 5113 return (error); 5114 } 5115 5116 static nvlist_t * 5117 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5118 { 5119 for (int i = 0; i < count; i++) { 5120 uint64_t guid; 5121 5122 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5123 &guid) == 0); 5124 5125 if (guid == target_guid) 5126 return (nvpp[i]); 5127 } 5128 5129 return (NULL); 5130 } 5131 5132 static void 5133 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5134 nvlist_t *dev_to_remove) 5135 { 5136 nvlist_t **newdev = NULL; 5137 5138 if (count > 1) 5139 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5140 5141 for (int i = 0, j = 0; i < count; i++) { 5142 if (dev[i] == dev_to_remove) 5143 continue; 5144 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5145 } 5146 5147 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5148 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5149 5150 for (int i = 0; i < count - 1; i++) 5151 nvlist_free(newdev[i]); 5152 5153 if (count > 1) 5154 kmem_free(newdev, (count - 1) * sizeof (void *)); 5155 } 5156 5157 /* 5158 * Evacuate the device. 5159 */ 5160 static int 5161 spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5162 { 5163 uint64_t txg; 5164 int error = 0; 5165 5166 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5167 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5168 ASSERT(vd == vd->vdev_top); 5169 5170 /* 5171 * Evacuate the device. We don't hold the config lock as writer 5172 * since we need to do I/O but we do keep the 5173 * spa_namespace_lock held. Once this completes the device 5174 * should no longer have any blocks allocated on it. 5175 */ 5176 if (vd->vdev_islog) { 5177 if (vd->vdev_stat.vs_alloc != 0) 5178 error = spa_offline_log(spa); 5179 } else { 5180 error = SET_ERROR(ENOTSUP); 5181 } 5182 5183 if (error) 5184 return (error); 5185 5186 /* 5187 * The evacuation succeeded. Remove any remaining MOS metadata 5188 * associated with this vdev, and wait for these changes to sync. 5189 */ 5190 ASSERT0(vd->vdev_stat.vs_alloc); 5191 txg = spa_vdev_config_enter(spa); 5192 vd->vdev_removing = B_TRUE; 5193 vdev_dirty_leaves(vd, VDD_DTL, txg); 5194 vdev_config_dirty(vd); 5195 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5196 5197 return (0); 5198 } 5199 5200 /* 5201 * Complete the removal by cleaning up the namespace. 5202 */ 5203 static void 5204 spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5205 { 5206 vdev_t *rvd = spa->spa_root_vdev; 5207 uint64_t id = vd->vdev_id; 5208 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5209 5210 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5211 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5212 ASSERT(vd == vd->vdev_top); 5213 5214 /* 5215 * Only remove any devices which are empty. 5216 */ 5217 if (vd->vdev_stat.vs_alloc != 0) 5218 return; 5219 5220 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5221 5222 if (list_link_active(&vd->vdev_state_dirty_node)) 5223 vdev_state_clean(vd); 5224 if (list_link_active(&vd->vdev_config_dirty_node)) 5225 vdev_config_clean(vd); 5226 5227 vdev_free(vd); 5228 5229 if (last_vdev) { 5230 vdev_compact_children(rvd); 5231 } else { 5232 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5233 vdev_add_child(rvd, vd); 5234 } 5235 vdev_config_dirty(rvd); 5236 5237 /* 5238 * Reassess the health of our root vdev. 5239 */ 5240 vdev_reopen(rvd); 5241 } 5242 5243 /* 5244 * Remove a device from the pool - 5245 * 5246 * Removing a device from the vdev namespace requires several steps 5247 * and can take a significant amount of time. As a result we use 5248 * the spa_vdev_config_[enter/exit] functions which allow us to 5249 * grab and release the spa_config_lock while still holding the namespace 5250 * lock. During each step the configuration is synced out. 5251 * 5252 * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5253 * devices. 5254 */ 5255 int 5256 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5257 { 5258 vdev_t *vd; 5259 metaslab_group_t *mg; 5260 nvlist_t **spares, **l2cache, *nv; 5261 uint64_t txg = 0; 5262 uint_t nspares, nl2cache; 5263 int error = 0; 5264 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5265 5266 ASSERT(spa_writeable(spa)); 5267 5268 if (!locked) 5269 txg = spa_vdev_enter(spa); 5270 5271 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5272 5273 if (spa->spa_spares.sav_vdevs != NULL && 5274 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5275 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5276 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5277 /* 5278 * Only remove the hot spare if it's not currently in use 5279 * in this pool. 5280 */ 5281 if (vd == NULL || unspare) { 5282 spa_vdev_remove_aux(spa->spa_spares.sav_config, 5283 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5284 spa_load_spares(spa); 5285 spa->spa_spares.sav_sync = B_TRUE; 5286 } else { 5287 error = SET_ERROR(EBUSY); 5288 } 5289 } else if (spa->spa_l2cache.sav_vdevs != NULL && 5290 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5291 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5292 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5293 /* 5294 * Cache devices can always be removed. 5295 */ 5296 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5297 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5298 spa_load_l2cache(spa); 5299 spa->spa_l2cache.sav_sync = B_TRUE; 5300 } else if (vd != NULL && vd->vdev_islog) { 5301 ASSERT(!locked); 5302 ASSERT(vd == vd->vdev_top); 5303 5304 mg = vd->vdev_mg; 5305 5306 /* 5307 * Stop allocating from this vdev. 5308 */ 5309 metaslab_group_passivate(mg); 5310 5311 /* 5312 * Wait for the youngest allocations and frees to sync, 5313 * and then wait for the deferral of those frees to finish. 5314 */ 5315 spa_vdev_config_exit(spa, NULL, 5316 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5317 5318 /* 5319 * Attempt to evacuate the vdev. 5320 */ 5321 error = spa_vdev_remove_evacuate(spa, vd); 5322 5323 txg = spa_vdev_config_enter(spa); 5324 5325 /* 5326 * If we couldn't evacuate the vdev, unwind. 5327 */ 5328 if (error) { 5329 metaslab_group_activate(mg); 5330 return (spa_vdev_exit(spa, NULL, txg, error)); 5331 } 5332 5333 /* 5334 * Clean up the vdev namespace. 5335 */ 5336 spa_vdev_remove_from_namespace(spa, vd); 5337 5338 } else if (vd != NULL) { 5339 /* 5340 * Normal vdevs cannot be removed (yet). 5341 */ 5342 error = SET_ERROR(ENOTSUP); 5343 } else { 5344 /* 5345 * There is no vdev of any kind with the specified guid. 5346 */ 5347 error = SET_ERROR(ENOENT); 5348 } 5349 5350 if (!locked) 5351 return (spa_vdev_exit(spa, NULL, txg, error)); 5352 5353 return (error); 5354 } 5355 5356 /* 5357 * Find any device that's done replacing, or a vdev marked 'unspare' that's 5358 * currently spared, so we can detach it. 5359 */ 5360 static vdev_t * 5361 spa_vdev_resilver_done_hunt(vdev_t *vd) 5362 { 5363 vdev_t *newvd, *oldvd; 5364 5365 for (int c = 0; c < vd->vdev_children; c++) { 5366 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5367 if (oldvd != NULL) 5368 return (oldvd); 5369 } 5370 5371 /* 5372 * Check for a completed replacement. We always consider the first 5373 * vdev in the list to be the oldest vdev, and the last one to be 5374 * the newest (see spa_vdev_attach() for how that works). In 5375 * the case where the newest vdev is faulted, we will not automatically 5376 * remove it after a resilver completes. This is OK as it will require 5377 * user intervention to determine which disk the admin wishes to keep. 5378 */ 5379 if (vd->vdev_ops == &vdev_replacing_ops) { 5380 ASSERT(vd->vdev_children > 1); 5381 5382 newvd = vd->vdev_child[vd->vdev_children - 1]; 5383 oldvd = vd->vdev_child[0]; 5384 5385 if (vdev_dtl_empty(newvd, DTL_MISSING) && 5386 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5387 !vdev_dtl_required(oldvd)) 5388 return (oldvd); 5389 } 5390 5391 /* 5392 * Check for a completed resilver with the 'unspare' flag set. 5393 */ 5394 if (vd->vdev_ops == &vdev_spare_ops) { 5395 vdev_t *first = vd->vdev_child[0]; 5396 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5397 5398 if (last->vdev_unspare) { 5399 oldvd = first; 5400 newvd = last; 5401 } else if (first->vdev_unspare) { 5402 oldvd = last; 5403 newvd = first; 5404 } else { 5405 oldvd = NULL; 5406 } 5407 5408 if (oldvd != NULL && 5409 vdev_dtl_empty(newvd, DTL_MISSING) && 5410 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5411 !vdev_dtl_required(oldvd)) 5412 return (oldvd); 5413 5414 /* 5415 * If there are more than two spares attached to a disk, 5416 * and those spares are not required, then we want to 5417 * attempt to free them up now so that they can be used 5418 * by other pools. Once we're back down to a single 5419 * disk+spare, we stop removing them. 5420 */ 5421 if (vd->vdev_children > 2) { 5422 newvd = vd->vdev_child[1]; 5423 5424 if (newvd->vdev_isspare && last->vdev_isspare && 5425 vdev_dtl_empty(last, DTL_MISSING) && 5426 vdev_dtl_empty(last, DTL_OUTAGE) && 5427 !vdev_dtl_required(newvd)) 5428 return (newvd); 5429 } 5430 } 5431 5432 return (NULL); 5433 } 5434 5435 static void 5436 spa_vdev_resilver_done(spa_t *spa) 5437 { 5438 vdev_t *vd, *pvd, *ppvd; 5439 uint64_t guid, sguid, pguid, ppguid; 5440 5441 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5442 5443 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5444 pvd = vd->vdev_parent; 5445 ppvd = pvd->vdev_parent; 5446 guid = vd->vdev_guid; 5447 pguid = pvd->vdev_guid; 5448 ppguid = ppvd->vdev_guid; 5449 sguid = 0; 5450 /* 5451 * If we have just finished replacing a hot spared device, then 5452 * we need to detach the parent's first child (the original hot 5453 * spare) as well. 5454 */ 5455 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5456 ppvd->vdev_children == 2) { 5457 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5458 sguid = ppvd->vdev_child[1]->vdev_guid; 5459 } 5460 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5461 5462 spa_config_exit(spa, SCL_ALL, FTAG); 5463 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5464 return; 5465 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5466 return; 5467 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5468 } 5469 5470 spa_config_exit(spa, SCL_ALL, FTAG); 5471 } 5472 5473 /* 5474 * Update the stored path or FRU for this vdev. 5475 */ 5476 int 5477 spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5478 boolean_t ispath) 5479 { 5480 vdev_t *vd; 5481 boolean_t sync = B_FALSE; 5482 5483 ASSERT(spa_writeable(spa)); 5484 5485 spa_vdev_state_enter(spa, SCL_ALL); 5486 5487 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5488 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5489 5490 if (!vd->vdev_ops->vdev_op_leaf) 5491 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5492 5493 if (ispath) { 5494 if (strcmp(value, vd->vdev_path) != 0) { 5495 spa_strfree(vd->vdev_path); 5496 vd->vdev_path = spa_strdup(value); 5497 sync = B_TRUE; 5498 } 5499 } else { 5500 if (vd->vdev_fru == NULL) { 5501 vd->vdev_fru = spa_strdup(value); 5502 sync = B_TRUE; 5503 } else if (strcmp(value, vd->vdev_fru) != 0) { 5504 spa_strfree(vd->vdev_fru); 5505 vd->vdev_fru = spa_strdup(value); 5506 sync = B_TRUE; 5507 } 5508 } 5509 5510 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5511 } 5512 5513 int 5514 spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5515 { 5516 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5517 } 5518 5519 int 5520 spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5521 { 5522 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5523 } 5524 5525 /* 5526 * ========================================================================== 5527 * SPA Scanning 5528 * ========================================================================== 5529 */ 5530 5531 int 5532 spa_scan_stop(spa_t *spa) 5533 { 5534 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5535 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5536 return (SET_ERROR(EBUSY)); 5537 return (dsl_scan_cancel(spa->spa_dsl_pool)); 5538 } 5539 5540 int 5541 spa_scan(spa_t *spa, pool_scan_func_t func) 5542 { 5543 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5544 5545 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5546 return (SET_ERROR(ENOTSUP)); 5547 5548 /* 5549 * If a resilver was requested, but there is no DTL on a 5550 * writeable leaf device, we have nothing to do. 5551 */ 5552 if (func == POOL_SCAN_RESILVER && 5553 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5554 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5555 return (0); 5556 } 5557 5558 return (dsl_scan(spa->spa_dsl_pool, func)); 5559 } 5560 5561 /* 5562 * ========================================================================== 5563 * SPA async task processing 5564 * ========================================================================== 5565 */ 5566 5567 static void 5568 spa_async_remove(spa_t *spa, vdev_t *vd) 5569 { 5570 if (vd->vdev_remove_wanted) { 5571 vd->vdev_remove_wanted = B_FALSE; 5572 vd->vdev_delayed_close = B_FALSE; 5573 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5574 5575 /* 5576 * We want to clear the stats, but we don't want to do a full 5577 * vdev_clear() as that will cause us to throw away 5578 * degraded/faulted state as well as attempt to reopen the 5579 * device, all of which is a waste. 5580 */ 5581 vd->vdev_stat.vs_read_errors = 0; 5582 vd->vdev_stat.vs_write_errors = 0; 5583 vd->vdev_stat.vs_checksum_errors = 0; 5584 5585 vdev_state_dirty(vd->vdev_top); 5586 } 5587 5588 for (int c = 0; c < vd->vdev_children; c++) 5589 spa_async_remove(spa, vd->vdev_child[c]); 5590 } 5591 5592 static void 5593 spa_async_probe(spa_t *spa, vdev_t *vd) 5594 { 5595 if (vd->vdev_probe_wanted) { 5596 vd->vdev_probe_wanted = B_FALSE; 5597 vdev_reopen(vd); /* vdev_open() does the actual probe */ 5598 } 5599 5600 for (int c = 0; c < vd->vdev_children; c++) 5601 spa_async_probe(spa, vd->vdev_child[c]); 5602 } 5603 5604 static void 5605 spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5606 { 5607 sysevent_id_t eid; 5608 nvlist_t *attr; 5609 char *physpath; 5610 5611 if (!spa->spa_autoexpand) 5612 return; 5613 5614 for (int c = 0; c < vd->vdev_children; c++) { 5615 vdev_t *cvd = vd->vdev_child[c]; 5616 spa_async_autoexpand(spa, cvd); 5617 } 5618 5619 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5620 return; 5621 5622 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5623 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5624 5625 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5626 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5627 5628 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5629 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 5630 5631 nvlist_free(attr); 5632 kmem_free(physpath, MAXPATHLEN); 5633 } 5634 5635 static void 5636 spa_async_thread(spa_t *spa) 5637 { 5638 int tasks; 5639 5640 ASSERT(spa->spa_sync_on); 5641 5642 mutex_enter(&spa->spa_async_lock); 5643 tasks = spa->spa_async_tasks; 5644 spa->spa_async_tasks = 0; 5645 mutex_exit(&spa->spa_async_lock); 5646 5647 /* 5648 * See if the config needs to be updated. 5649 */ 5650 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5651 uint64_t old_space, new_space; 5652 5653 mutex_enter(&spa_namespace_lock); 5654 old_space = metaslab_class_get_space(spa_normal_class(spa)); 5655 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5656 new_space = metaslab_class_get_space(spa_normal_class(spa)); 5657 mutex_exit(&spa_namespace_lock); 5658 5659 /* 5660 * If the pool grew as a result of the config update, 5661 * then log an internal history event. 5662 */ 5663 if (new_space != old_space) { 5664 spa_history_log_internal(spa, "vdev online", NULL, 5665 "pool '%s' size: %llu(+%llu)", 5666 spa_name(spa), new_space, new_space - old_space); 5667 } 5668 } 5669 5670 /* 5671 * See if any devices need to be marked REMOVED. 5672 */ 5673 if (tasks & SPA_ASYNC_REMOVE) { 5674 spa_vdev_state_enter(spa, SCL_NONE); 5675 spa_async_remove(spa, spa->spa_root_vdev); 5676 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 5677 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 5678 for (int i = 0; i < spa->spa_spares.sav_count; i++) 5679 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 5680 (void) spa_vdev_state_exit(spa, NULL, 0); 5681 } 5682 5683 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5684 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5685 spa_async_autoexpand(spa, spa->spa_root_vdev); 5686 spa_config_exit(spa, SCL_CONFIG, FTAG); 5687 } 5688 5689 /* 5690 * See if any devices need to be probed. 5691 */ 5692 if (tasks & SPA_ASYNC_PROBE) { 5693 spa_vdev_state_enter(spa, SCL_NONE); 5694 spa_async_probe(spa, spa->spa_root_vdev); 5695 (void) spa_vdev_state_exit(spa, NULL, 0); 5696 } 5697 5698 /* 5699 * If any devices are done replacing, detach them. 5700 */ 5701 if (tasks & SPA_ASYNC_RESILVER_DONE) 5702 spa_vdev_resilver_done(spa); 5703 5704 /* 5705 * Kick off a resilver. 5706 */ 5707 if (tasks & SPA_ASYNC_RESILVER) 5708 dsl_resilver_restart(spa->spa_dsl_pool, 0); 5709 5710 /* 5711 * Let the world know that we're done. 5712 */ 5713 mutex_enter(&spa->spa_async_lock); 5714 spa->spa_async_thread = NULL; 5715 cv_broadcast(&spa->spa_async_cv); 5716 mutex_exit(&spa->spa_async_lock); 5717 thread_exit(); 5718 } 5719 5720 void 5721 spa_async_suspend(spa_t *spa) 5722 { 5723 mutex_enter(&spa->spa_async_lock); 5724 spa->spa_async_suspended++; 5725 while (spa->spa_async_thread != NULL) 5726 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 5727 mutex_exit(&spa->spa_async_lock); 5728 } 5729 5730 void 5731 spa_async_resume(spa_t *spa) 5732 { 5733 mutex_enter(&spa->spa_async_lock); 5734 ASSERT(spa->spa_async_suspended != 0); 5735 spa->spa_async_suspended--; 5736 mutex_exit(&spa->spa_async_lock); 5737 } 5738 5739 static boolean_t 5740 spa_async_tasks_pending(spa_t *spa) 5741 { 5742 uint_t non_config_tasks; 5743 uint_t config_task; 5744 boolean_t config_task_suspended; 5745 5746 non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE; 5747 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 5748 if (spa->spa_ccw_fail_time == 0) { 5749 config_task_suspended = B_FALSE; 5750 } else { 5751 config_task_suspended = 5752 (gethrtime() - spa->spa_ccw_fail_time) < 5753 (zfs_ccw_retry_interval * NANOSEC); 5754 } 5755 5756 return (non_config_tasks || (config_task && !config_task_suspended)); 5757 } 5758 5759 static void 5760 spa_async_dispatch(spa_t *spa) 5761 { 5762 mutex_enter(&spa->spa_async_lock); 5763 if (spa_async_tasks_pending(spa) && 5764 !spa->spa_async_suspended && 5765 spa->spa_async_thread == NULL && 5766 rootdir != NULL) 5767 spa->spa_async_thread = thread_create(NULL, 0, 5768 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 5769 mutex_exit(&spa->spa_async_lock); 5770 } 5771 5772 void 5773 spa_async_request(spa_t *spa, int task) 5774 { 5775 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 5776 mutex_enter(&spa->spa_async_lock); 5777 spa->spa_async_tasks |= task; 5778 mutex_exit(&spa->spa_async_lock); 5779 } 5780 5781 /* 5782 * ========================================================================== 5783 * SPA syncing routines 5784 * ========================================================================== 5785 */ 5786 5787 static int 5788 bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5789 { 5790 bpobj_t *bpo = arg; 5791 bpobj_enqueue(bpo, bp, tx); 5792 return (0); 5793 } 5794 5795 static int 5796 spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 5797 { 5798 zio_t *zio = arg; 5799 5800 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 5801 zio->io_flags)); 5802 return (0); 5803 } 5804 5805 /* 5806 * Note: this simple function is not inlined to make it easier to dtrace the 5807 * amount of time spent syncing frees. 5808 */ 5809 static void 5810 spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 5811 { 5812 zio_t *zio = zio_root(spa, NULL, NULL, 0); 5813 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 5814 VERIFY(zio_wait(zio) == 0); 5815 } 5816 5817 /* 5818 * Note: this simple function is not inlined to make it easier to dtrace the 5819 * amount of time spent syncing deferred frees. 5820 */ 5821 static void 5822 spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 5823 { 5824 zio_t *zio = zio_root(spa, NULL, NULL, 0); 5825 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 5826 spa_free_sync_cb, zio, tx), ==, 0); 5827 VERIFY0(zio_wait(zio)); 5828 } 5829 5830 5831 static void 5832 spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 5833 { 5834 char *packed = NULL; 5835 size_t bufsize; 5836 size_t nvsize = 0; 5837 dmu_buf_t *db; 5838 5839 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 5840 5841 /* 5842 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 5843 * information. This avoids the dmu_buf_will_dirty() path and 5844 * saves us a pre-read to get data we don't actually care about. 5845 */ 5846 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 5847 packed = kmem_alloc(bufsize, KM_SLEEP); 5848 5849 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 5850 KM_SLEEP) == 0); 5851 bzero(packed + nvsize, bufsize - nvsize); 5852 5853 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 5854 5855 kmem_free(packed, bufsize); 5856 5857 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 5858 dmu_buf_will_dirty(db, tx); 5859 *(uint64_t *)db->db_data = nvsize; 5860 dmu_buf_rele(db, FTAG); 5861 } 5862 5863 static void 5864 spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 5865 const char *config, const char *entry) 5866 { 5867 nvlist_t *nvroot; 5868 nvlist_t **list; 5869 int i; 5870 5871 if (!sav->sav_sync) 5872 return; 5873 5874 /* 5875 * Update the MOS nvlist describing the list of available devices. 5876 * spa_validate_aux() will have already made sure this nvlist is 5877 * valid and the vdevs are labeled appropriately. 5878 */ 5879 if (sav->sav_object == 0) { 5880 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 5881 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 5882 sizeof (uint64_t), tx); 5883 VERIFY(zap_update(spa->spa_meta_objset, 5884 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 5885 &sav->sav_object, tx) == 0); 5886 } 5887 5888 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5889 if (sav->sav_count == 0) { 5890 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 5891 } else { 5892 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 5893 for (i = 0; i < sav->sav_count; i++) 5894 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 5895 B_FALSE, VDEV_CONFIG_L2CACHE); 5896 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 5897 sav->sav_count) == 0); 5898 for (i = 0; i < sav->sav_count; i++) 5899 nvlist_free(list[i]); 5900 kmem_free(list, sav->sav_count * sizeof (void *)); 5901 } 5902 5903 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 5904 nvlist_free(nvroot); 5905 5906 sav->sav_sync = B_FALSE; 5907 } 5908 5909 static void 5910 spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 5911 { 5912 nvlist_t *config; 5913 5914 if (list_is_empty(&spa->spa_config_dirty_list)) 5915 return; 5916 5917 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 5918 5919 config = spa_config_generate(spa, spa->spa_root_vdev, 5920 dmu_tx_get_txg(tx), B_FALSE); 5921 5922 /* 5923 * If we're upgrading the spa version then make sure that 5924 * the config object gets updated with the correct version. 5925 */ 5926 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 5927 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5928 spa->spa_uberblock.ub_version); 5929 5930 spa_config_exit(spa, SCL_STATE, FTAG); 5931 5932 if (spa->spa_config_syncing) 5933 nvlist_free(spa->spa_config_syncing); 5934 spa->spa_config_syncing = config; 5935 5936 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 5937 } 5938 5939 static void 5940 spa_sync_version(void *arg, dmu_tx_t *tx) 5941 { 5942 uint64_t *versionp = arg; 5943 uint64_t version = *versionp; 5944 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 5945 5946 /* 5947 * Setting the version is special cased when first creating the pool. 5948 */ 5949 ASSERT(tx->tx_txg != TXG_INITIAL); 5950 5951 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 5952 ASSERT(version >= spa_version(spa)); 5953 5954 spa->spa_uberblock.ub_version = version; 5955 vdev_config_dirty(spa->spa_root_vdev); 5956 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 5957 } 5958 5959 /* 5960 * Set zpool properties. 5961 */ 5962 static void 5963 spa_sync_props(void *arg, dmu_tx_t *tx) 5964 { 5965 nvlist_t *nvp = arg; 5966 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 5967 objset_t *mos = spa->spa_meta_objset; 5968 nvpair_t *elem = NULL; 5969 5970 mutex_enter(&spa->spa_props_lock); 5971 5972 while ((elem = nvlist_next_nvpair(nvp, elem))) { 5973 uint64_t intval; 5974 char *strval, *fname; 5975 zpool_prop_t prop; 5976 const char *propname; 5977 zprop_type_t proptype; 5978 spa_feature_t fid; 5979 5980 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 5981 case ZPROP_INVAL: 5982 /* 5983 * We checked this earlier in spa_prop_validate(). 5984 */ 5985 ASSERT(zpool_prop_feature(nvpair_name(elem))); 5986 5987 fname = strchr(nvpair_name(elem), '@') + 1; 5988 VERIFY0(zfeature_lookup_name(fname, &fid)); 5989 5990 spa_feature_enable(spa, fid, tx); 5991 spa_history_log_internal(spa, "set", tx, 5992 "%s=enabled", nvpair_name(elem)); 5993 break; 5994 5995 case ZPOOL_PROP_VERSION: 5996 intval = fnvpair_value_uint64(elem); 5997 /* 5998 * The version is synced seperatly before other 5999 * properties and should be correct by now. 6000 */ 6001 ASSERT3U(spa_version(spa), >=, intval); 6002 break; 6003 6004 case ZPOOL_PROP_ALTROOT: 6005 /* 6006 * 'altroot' is a non-persistent property. It should 6007 * have been set temporarily at creation or import time. 6008 */ 6009 ASSERT(spa->spa_root != NULL); 6010 break; 6011 6012 case ZPOOL_PROP_READONLY: 6013 case ZPOOL_PROP_CACHEFILE: 6014 /* 6015 * 'readonly' and 'cachefile' are also non-persisitent 6016 * properties. 6017 */ 6018 break; 6019 case ZPOOL_PROP_COMMENT: 6020 strval = fnvpair_value_string(elem); 6021 if (spa->spa_comment != NULL) 6022 spa_strfree(spa->spa_comment); 6023 spa->spa_comment = spa_strdup(strval); 6024 /* 6025 * We need to dirty the configuration on all the vdevs 6026 * so that their labels get updated. It's unnecessary 6027 * to do this for pool creation since the vdev's 6028 * configuratoin has already been dirtied. 6029 */ 6030 if (tx->tx_txg != TXG_INITIAL) 6031 vdev_config_dirty(spa->spa_root_vdev); 6032 spa_history_log_internal(spa, "set", tx, 6033 "%s=%s", nvpair_name(elem), strval); 6034 break; 6035 default: 6036 /* 6037 * Set pool property values in the poolprops mos object. 6038 */ 6039 if (spa->spa_pool_props_object == 0) { 6040 spa->spa_pool_props_object = 6041 zap_create_link(mos, DMU_OT_POOL_PROPS, 6042 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6043 tx); 6044 } 6045 6046 /* normalize the property name */ 6047 propname = zpool_prop_to_name(prop); 6048 proptype = zpool_prop_get_type(prop); 6049 6050 if (nvpair_type(elem) == DATA_TYPE_STRING) { 6051 ASSERT(proptype == PROP_TYPE_STRING); 6052 strval = fnvpair_value_string(elem); 6053 VERIFY0(zap_update(mos, 6054 spa->spa_pool_props_object, propname, 6055 1, strlen(strval) + 1, strval, tx)); 6056 spa_history_log_internal(spa, "set", tx, 6057 "%s=%s", nvpair_name(elem), strval); 6058 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6059 intval = fnvpair_value_uint64(elem); 6060 6061 if (proptype == PROP_TYPE_INDEX) { 6062 const char *unused; 6063 VERIFY0(zpool_prop_index_to_string( 6064 prop, intval, &unused)); 6065 } 6066 VERIFY0(zap_update(mos, 6067 spa->spa_pool_props_object, propname, 6068 8, 1, &intval, tx)); 6069 spa_history_log_internal(spa, "set", tx, 6070 "%s=%lld", nvpair_name(elem), intval); 6071 } else { 6072 ASSERT(0); /* not allowed */ 6073 } 6074 6075 switch (prop) { 6076 case ZPOOL_PROP_DELEGATION: 6077 spa->spa_delegation = intval; 6078 break; 6079 case ZPOOL_PROP_BOOTFS: 6080 spa->spa_bootfs = intval; 6081 break; 6082 case ZPOOL_PROP_FAILUREMODE: 6083 spa->spa_failmode = intval; 6084 break; 6085 case ZPOOL_PROP_AUTOEXPAND: 6086 spa->spa_autoexpand = intval; 6087 if (tx->tx_txg != TXG_INITIAL) 6088 spa_async_request(spa, 6089 SPA_ASYNC_AUTOEXPAND); 6090 break; 6091 case ZPOOL_PROP_DEDUPDITTO: 6092 spa->spa_dedup_ditto = intval; 6093 break; 6094 default: 6095 break; 6096 } 6097 } 6098 6099 } 6100 6101 mutex_exit(&spa->spa_props_lock); 6102 } 6103 6104 /* 6105 * Perform one-time upgrade on-disk changes. spa_version() does not 6106 * reflect the new version this txg, so there must be no changes this 6107 * txg to anything that the upgrade code depends on after it executes. 6108 * Therefore this must be called after dsl_pool_sync() does the sync 6109 * tasks. 6110 */ 6111 static void 6112 spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6113 { 6114 dsl_pool_t *dp = spa->spa_dsl_pool; 6115 6116 ASSERT(spa->spa_sync_pass == 1); 6117 6118 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6119 6120 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6121 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6122 dsl_pool_create_origin(dp, tx); 6123 6124 /* Keeping the origin open increases spa_minref */ 6125 spa->spa_minref += 3; 6126 } 6127 6128 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6129 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6130 dsl_pool_upgrade_clones(dp, tx); 6131 } 6132 6133 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6134 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6135 dsl_pool_upgrade_dir_clones(dp, tx); 6136 6137 /* Keeping the freedir open increases spa_minref */ 6138 spa->spa_minref += 3; 6139 } 6140 6141 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6142 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6143 spa_feature_create_zap_objects(spa, tx); 6144 } 6145 6146 /* 6147 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6148 * when possibility to use lz4 compression for metadata was added 6149 * Old pools that have this feature enabled must be upgraded to have 6150 * this feature active 6151 */ 6152 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6153 boolean_t lz4_en = spa_feature_is_enabled(spa, 6154 SPA_FEATURE_LZ4_COMPRESS); 6155 boolean_t lz4_ac = spa_feature_is_active(spa, 6156 SPA_FEATURE_LZ4_COMPRESS); 6157 6158 if (lz4_en && !lz4_ac) 6159 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6160 } 6161 rrw_exit(&dp->dp_config_rwlock, FTAG); 6162 } 6163 6164 /* 6165 * Sync the specified transaction group. New blocks may be dirtied as 6166 * part of the process, so we iterate until it converges. 6167 */ 6168 void 6169 spa_sync(spa_t *spa, uint64_t txg) 6170 { 6171 dsl_pool_t *dp = spa->spa_dsl_pool; 6172 objset_t *mos = spa->spa_meta_objset; 6173 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6174 vdev_t *rvd = spa->spa_root_vdev; 6175 vdev_t *vd; 6176 dmu_tx_t *tx; 6177 int error; 6178 6179 VERIFY(spa_writeable(spa)); 6180 6181 /* 6182 * Lock out configuration changes. 6183 */ 6184 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6185 6186 spa->spa_syncing_txg = txg; 6187 spa->spa_sync_pass = 0; 6188 6189 /* 6190 * If there are any pending vdev state changes, convert them 6191 * into config changes that go out with this transaction group. 6192 */ 6193 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6194 while (list_head(&spa->spa_state_dirty_list) != NULL) { 6195 /* 6196 * We need the write lock here because, for aux vdevs, 6197 * calling vdev_config_dirty() modifies sav_config. 6198 * This is ugly and will become unnecessary when we 6199 * eliminate the aux vdev wart by integrating all vdevs 6200 * into the root vdev tree. 6201 */ 6202 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6203 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6204 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6205 vdev_state_clean(vd); 6206 vdev_config_dirty(vd); 6207 } 6208 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6209 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6210 } 6211 spa_config_exit(spa, SCL_STATE, FTAG); 6212 6213 tx = dmu_tx_create_assigned(dp, txg); 6214 6215 spa->spa_sync_starttime = gethrtime(); 6216 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6217 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6218 6219 /* 6220 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6221 * set spa_deflate if we have no raid-z vdevs. 6222 */ 6223 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6224 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6225 int i; 6226 6227 for (i = 0; i < rvd->vdev_children; i++) { 6228 vd = rvd->vdev_child[i]; 6229 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6230 break; 6231 } 6232 if (i == rvd->vdev_children) { 6233 spa->spa_deflate = TRUE; 6234 VERIFY(0 == zap_add(spa->spa_meta_objset, 6235 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6236 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6237 } 6238 } 6239 6240 /* 6241 * If anything has changed in this txg, or if someone is waiting 6242 * for this txg to sync (eg, spa_vdev_remove()), push the 6243 * deferred frees from the previous txg. If not, leave them 6244 * alone so that we don't generate work on an otherwise idle 6245 * system. 6246 */ 6247 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 6248 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 6249 !txg_list_empty(&dp->dp_sync_tasks, txg) || 6250 ((dsl_scan_active(dp->dp_scan) || 6251 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { 6252 spa_sync_deferred_frees(spa, tx); 6253 } 6254 6255 /* 6256 * Iterate to convergence. 6257 */ 6258 do { 6259 int pass = ++spa->spa_sync_pass; 6260 6261 spa_sync_config_object(spa, tx); 6262 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6263 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6264 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6265 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6266 spa_errlog_sync(spa, txg); 6267 dsl_pool_sync(dp, txg); 6268 6269 if (pass < zfs_sync_pass_deferred_free) { 6270 spa_sync_frees(spa, free_bpl, tx); 6271 } else { 6272 bplist_iterate(free_bpl, bpobj_enqueue_cb, 6273 &spa->spa_deferred_bpobj, tx); 6274 } 6275 6276 ddt_sync(spa, txg); 6277 dsl_scan_sync(dp, tx); 6278 6279 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6280 vdev_sync(vd, txg); 6281 6282 if (pass == 1) 6283 spa_sync_upgrades(spa, tx); 6284 6285 } while (dmu_objset_is_dirty(mos, txg)); 6286 6287 /* 6288 * Rewrite the vdev configuration (which includes the uberblock) 6289 * to commit the transaction group. 6290 * 6291 * If there are no dirty vdevs, we sync the uberblock to a few 6292 * random top-level vdevs that are known to be visible in the 6293 * config cache (see spa_vdev_add() for a complete description). 6294 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6295 */ 6296 for (;;) { 6297 /* 6298 * We hold SCL_STATE to prevent vdev open/close/etc. 6299 * while we're attempting to write the vdev labels. 6300 */ 6301 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6302 6303 if (list_is_empty(&spa->spa_config_dirty_list)) { 6304 vdev_t *svd[SPA_DVAS_PER_BP]; 6305 int svdcount = 0; 6306 int children = rvd->vdev_children; 6307 int c0 = spa_get_random(children); 6308 6309 for (int c = 0; c < children; c++) { 6310 vd = rvd->vdev_child[(c0 + c) % children]; 6311 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6312 continue; 6313 svd[svdcount++] = vd; 6314 if (svdcount == SPA_DVAS_PER_BP) 6315 break; 6316 } 6317 error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 6318 if (error != 0) 6319 error = vdev_config_sync(svd, svdcount, txg, 6320 B_TRUE); 6321 } else { 6322 error = vdev_config_sync(rvd->vdev_child, 6323 rvd->vdev_children, txg, B_FALSE); 6324 if (error != 0) 6325 error = vdev_config_sync(rvd->vdev_child, 6326 rvd->vdev_children, txg, B_TRUE); 6327 } 6328 6329 if (error == 0) 6330 spa->spa_last_synced_guid = rvd->vdev_guid; 6331 6332 spa_config_exit(spa, SCL_STATE, FTAG); 6333 6334 if (error == 0) 6335 break; 6336 zio_suspend(spa, NULL); 6337 zio_resume_wait(spa); 6338 } 6339 dmu_tx_commit(tx); 6340 6341 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6342 6343 /* 6344 * Clear the dirty config list. 6345 */ 6346 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6347 vdev_config_clean(vd); 6348 6349 /* 6350 * Now that the new config has synced transactionally, 6351 * let it become visible to the config cache. 6352 */ 6353 if (spa->spa_config_syncing != NULL) { 6354 spa_config_set(spa, spa->spa_config_syncing); 6355 spa->spa_config_txg = txg; 6356 spa->spa_config_syncing = NULL; 6357 } 6358 6359 spa->spa_ubsync = spa->spa_uberblock; 6360 6361 dsl_pool_sync_done(dp, txg); 6362 6363 /* 6364 * Update usable space statistics. 6365 */ 6366 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6367 vdev_sync_done(vd, txg); 6368 6369 spa_update_dspace(spa); 6370 6371 /* 6372 * It had better be the case that we didn't dirty anything 6373 * since vdev_config_sync(). 6374 */ 6375 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6376 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6377 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6378 6379 spa->spa_sync_pass = 0; 6380 6381 spa_config_exit(spa, SCL_CONFIG, FTAG); 6382 6383 spa_handle_ignored_writes(spa); 6384 6385 /* 6386 * If any async tasks have been requested, kick them off. 6387 */ 6388 spa_async_dispatch(spa); 6389 } 6390 6391 /* 6392 * Sync all pools. We don't want to hold the namespace lock across these 6393 * operations, so we take a reference on the spa_t and drop the lock during the 6394 * sync. 6395 */ 6396 void 6397 spa_sync_allpools(void) 6398 { 6399 spa_t *spa = NULL; 6400 mutex_enter(&spa_namespace_lock); 6401 while ((spa = spa_next(spa)) != NULL) { 6402 if (spa_state(spa) != POOL_STATE_ACTIVE || 6403 !spa_writeable(spa) || spa_suspended(spa)) 6404 continue; 6405 spa_open_ref(spa, FTAG); 6406 mutex_exit(&spa_namespace_lock); 6407 txg_wait_synced(spa_get_dsl(spa), 0); 6408 mutex_enter(&spa_namespace_lock); 6409 spa_close(spa, FTAG); 6410 } 6411 mutex_exit(&spa_namespace_lock); 6412 } 6413 6414 /* 6415 * ========================================================================== 6416 * Miscellaneous routines 6417 * ========================================================================== 6418 */ 6419 6420 /* 6421 * Remove all pools in the system. 6422 */ 6423 void 6424 spa_evict_all(void) 6425 { 6426 spa_t *spa; 6427 6428 /* 6429 * Remove all cached state. All pools should be closed now, 6430 * so every spa in the AVL tree should be unreferenced. 6431 */ 6432 mutex_enter(&spa_namespace_lock); 6433 while ((spa = spa_next(NULL)) != NULL) { 6434 /* 6435 * Stop async tasks. The async thread may need to detach 6436 * a device that's been replaced, which requires grabbing 6437 * spa_namespace_lock, so we must drop it here. 6438 */ 6439 spa_open_ref(spa, FTAG); 6440 mutex_exit(&spa_namespace_lock); 6441 spa_async_suspend(spa); 6442 mutex_enter(&spa_namespace_lock); 6443 spa_close(spa, FTAG); 6444 6445 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6446 spa_unload(spa); 6447 spa_deactivate(spa); 6448 } 6449 spa_remove(spa); 6450 } 6451 mutex_exit(&spa_namespace_lock); 6452 } 6453 6454 vdev_t * 6455 spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6456 { 6457 vdev_t *vd; 6458 int i; 6459 6460 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6461 return (vd); 6462 6463 if (aux) { 6464 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6465 vd = spa->spa_l2cache.sav_vdevs[i]; 6466 if (vd->vdev_guid == guid) 6467 return (vd); 6468 } 6469 6470 for (i = 0; i < spa->spa_spares.sav_count; i++) { 6471 vd = spa->spa_spares.sav_vdevs[i]; 6472 if (vd->vdev_guid == guid) 6473 return (vd); 6474 } 6475 } 6476 6477 return (NULL); 6478 } 6479 6480 void 6481 spa_upgrade(spa_t *spa, uint64_t version) 6482 { 6483 ASSERT(spa_writeable(spa)); 6484 6485 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6486 6487 /* 6488 * This should only be called for a non-faulted pool, and since a 6489 * future version would result in an unopenable pool, this shouldn't be 6490 * possible. 6491 */ 6492 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6493 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6494 6495 spa->spa_uberblock.ub_version = version; 6496 vdev_config_dirty(spa->spa_root_vdev); 6497 6498 spa_config_exit(spa, SCL_ALL, FTAG); 6499 6500 txg_wait_synced(spa_get_dsl(spa), 0); 6501 } 6502 6503 boolean_t 6504 spa_has_spare(spa_t *spa, uint64_t guid) 6505 { 6506 int i; 6507 uint64_t spareguid; 6508 spa_aux_vdev_t *sav = &spa->spa_spares; 6509 6510 for (i = 0; i < sav->sav_count; i++) 6511 if (sav->sav_vdevs[i]->vdev_guid == guid) 6512 return (B_TRUE); 6513 6514 for (i = 0; i < sav->sav_npending; i++) { 6515 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6516 &spareguid) == 0 && spareguid == guid) 6517 return (B_TRUE); 6518 } 6519 6520 return (B_FALSE); 6521 } 6522 6523 /* 6524 * Check if a pool has an active shared spare device. 6525 * Note: reference count of an active spare is 2, as a spare and as a replace 6526 */ 6527 static boolean_t 6528 spa_has_active_shared_spare(spa_t *spa) 6529 { 6530 int i, refcnt; 6531 uint64_t pool; 6532 spa_aux_vdev_t *sav = &spa->spa_spares; 6533 6534 for (i = 0; i < sav->sav_count; i++) { 6535 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6536 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6537 refcnt > 2) 6538 return (B_TRUE); 6539 } 6540 6541 return (B_FALSE); 6542 } 6543 6544 /* 6545 * Post a sysevent corresponding to the given event. The 'name' must be one of 6546 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 6547 * filled in from the spa and (optionally) the vdev. This doesn't do anything 6548 * in the userland libzpool, as we don't want consumers to misinterpret ztest 6549 * or zdb as real changes. 6550 */ 6551 void 6552 spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 6553 { 6554 #ifdef _KERNEL 6555 sysevent_t *ev; 6556 sysevent_attr_list_t *attr = NULL; 6557 sysevent_value_t value; 6558 sysevent_id_t eid; 6559 6560 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6561 SE_SLEEP); 6562 6563 value.value_type = SE_DATA_TYPE_STRING; 6564 value.value.sv_string = spa_name(spa); 6565 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6566 goto done; 6567 6568 value.value_type = SE_DATA_TYPE_UINT64; 6569 value.value.sv_uint64 = spa_guid(spa); 6570 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6571 goto done; 6572 6573 if (vd) { 6574 value.value_type = SE_DATA_TYPE_UINT64; 6575 value.value.sv_uint64 = vd->vdev_guid; 6576 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6577 SE_SLEEP) != 0) 6578 goto done; 6579 6580 if (vd->vdev_path) { 6581 value.value_type = SE_DATA_TYPE_STRING; 6582 value.value.sv_string = vd->vdev_path; 6583 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6584 &value, SE_SLEEP) != 0) 6585 goto done; 6586 } 6587 } 6588 6589 if (sysevent_attach_attributes(ev, attr) != 0) 6590 goto done; 6591 attr = NULL; 6592 6593 (void) log_sysevent(ev, SE_SLEEP, &eid); 6594 6595 done: 6596 if (attr) 6597 sysevent_free_attr(attr); 6598 sysevent_free(ev); 6599 #endif 6600 }