Bug

[В начало]

Ошибка # 101

Показать/спрятать трассу ошибок
Error trace
Function bodies
Blocks
  • Others...
    Function bodies without model function calls
    Initialization function calls
    Initialization function bodies
    Entry point
    Entry point body
    Function calls
    Skipped function calls
    Formal parameter names
    Declarations
    Assumes
    Assume conditions
    Returns
    Return values
    DEG initialization
    DEG function calls
    Model function calls
    Model function bodies
    Model asserts
    Model state changes
    Model function function calls
    Model function function bodies
    Model returns
    Model others
    Identation
    Line numbers
    Expand signs
-entry_point
{
1966 ldv_s_acpi_nfit_driver_acpi_driver = 0;
1946 LDV_IN_INTERRUPT = 1;
1955 ldv_initialize() { /* Function call is skipped due to function is undefined */}
1961 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
1962 +nfit_init()
1962 assume(!(tmp != 0));
1970 goto ldv_34702;
1970 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
1970 assume(tmp___1 != 0);
1973 goto ldv_34701;
1971 ldv_34701:;
1974 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1974 switch (tmp___0)
1975 assume(!(tmp___0 == 0));
1993 assume(!(tmp___0 == 1));
2010 assume(!(tmp___0 == 2));
2027 assume(tmp___0 == 3);
2035 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2036 -acpi_nfit_notify(var_group3, var_acpi_nfit_notify_62_p1)
{
1779 +dev_get_drvdata((const struct device *)(&(adev->dev)))
1779 acpi_desc = (struct acpi_nfit_desc *)tmp;
1780 buf.length = 18446744073709551615ULL;
1780 buf.pointer = (void *)0;
1782 dev = &(adev->dev);
1786 descriptor.modname = "nfit";
1786 descriptor.function = "acpi_nfit_notify";
1786 descriptor.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1786 descriptor.format = "%s: event: %d\n";
1786 descriptor.lineno = 1786U;
1786 descriptor.flags = 0U;
1786 +__builtin_expect(((long)(descriptor.flags)) & 1L, 0L)
1786 assume(!(tmp___0 != 0L));
1788 -device_lock(dev)
{
957 -ldv_mutex_lock_7(&(dev->mutex))
{
128 -ldv_mutex_lock_mutex_of_device(ldv_func_arg1)
{
624 assume(!(ldv_mutex_mutex_of_device != 1));
626 ldv_mutex_mutex_of_device = 2;
627 return ;;
}
130 mutex_lock(ldv_func_arg1) { /* Function call is skipped due to function is undefined */}
131 return ;;
}
958 return ;;
}
1789 unsigned long __CPAchecker_TMP_0 = (unsigned long)(dev->driver);
1789 assume(__CPAchecker_TMP_0 == ((unsigned long)((struct device_driver *)0)));
1791 descriptor___0.modname = "nfit";
1791 descriptor___0.function = "acpi_nfit_notify";
1791 descriptor___0.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1791 descriptor___0.format = "%s: no driver found for dev\n";
1791 descriptor___0.lineno = 1791U;
1791 descriptor___0.flags = 0U;
1791 +__builtin_expect(((long)(descriptor___0.flags)) & 1L, 0L)
1791 assume(!(tmp___1 != 0L));
1792 return ;;
}
2043 goto ldv_34696;
2046 ldv_34696:;
2047 ldv_34702:;
1970 tmp___1 = nondet_int() { /* Function call is skipped due to function is undefined */}
1970 assume(tmp___1 != 0);
1973 goto ldv_34701;
1971 ldv_34701:;
1974 tmp___0 = nondet_int() { /* Function call is skipped due to function is undefined */}
1974 switch (tmp___0)
1975 assume(!(tmp___0 == 0));
1993 assume(!(tmp___0 == 1));
2010 assume(!(tmp___0 == 2));
2027 assume(tmp___0 == 3);
2035 ldv_handler_precall() { /* Function call is skipped due to function is undefined */}
2036 -acpi_nfit_notify(var_group3, var_acpi_nfit_notify_62_p1)
{
1779 +dev_get_drvdata((const struct device *)(&(adev->dev)))
1779 acpi_desc = (struct acpi_nfit_desc *)tmp;
1780 buf.length = 18446744073709551615ULL;
1780 buf.pointer = (void *)0;
1782 dev = &(adev->dev);
1786 descriptor.modname = "nfit";
1786 descriptor.function = "acpi_nfit_notify";
1786 descriptor.filename = "/home/ldvuser/ref_launch/work/current--X--drivers--X--defaultlinux-4.4-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.4-rc1.tar.xz/csd_deg_dscv/698/dscv_tempdir/dscv/ri/32_7a/drivers/acpi/nfit.c";
1786 descriptor.format = "%s: event: %d\n";
1786 descriptor.lineno = 1786U;
1786 descriptor.flags = 0U;
1786 +__builtin_expect(((long)(descriptor.flags)) & 1L, 0L)
1786 assume(!(tmp___0 != 0L));
1788 -device_lock(dev)
{
957 -ldv_mutex_lock_7(&(dev->mutex))
{
128 -ldv_mutex_lock_mutex_of_device(ldv_func_arg1)
{
624 assume(ldv_mutex_mutex_of_device != 1);
624 +ldv_error()
}
}
}
}
}
Source code
1 2 /* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of version 2 of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 */ 14 #include <linux/list_sort.h> 15 #include <linux/libnvdimm.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/ndctl.h> 19 #include <linux/list.h> 20 #include <linux/acpi.h> 21 #include <linux/sort.h> 22 #include <linux/pmem.h> 23 #include <linux/io.h> 24 #include <asm/cacheflush.h> 25 #include "nfit.h" 26 27 /* 28 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 29 * irrelevant. 30 */ 31 #include <linux/io-64-nonatomic-hi-lo.h> 32 33 static bool force_enable_dimms; 34 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 35 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 36 37 struct nfit_table_prev { 38 struct list_head spas; 39 struct list_head memdevs; 40 struct list_head dcrs; 41 struct list_head bdws; 42 struct list_head idts; 43 struct list_head flushes; 44 }; 45 46 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 47 48 const u8 *to_nfit_uuid(enum nfit_uuids id) 49 { 50 return nfit_uuid[id]; 51 } 52 EXPORT_SYMBOL(to_nfit_uuid); 53 54 static struct acpi_nfit_desc *to_acpi_nfit_desc( 55 struct nvdimm_bus_descriptor *nd_desc) 56 { 57 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 58 } 59 60 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 61 { 62 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 63 64 /* 65 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 66 * acpi_device. 67 */ 68 if (!nd_desc->provider_name 69 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 70 return NULL; 71 72 return to_acpi_device(acpi_desc->dev); 73 } 74 75 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 77 unsigned int buf_len) 78 { 79 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 80 const struct nd_cmd_desc *desc = NULL; 81 union acpi_object in_obj, in_buf, *out_obj; 82 struct device *dev = acpi_desc->dev; 83 const char *cmd_name, *dimm_name; 84 unsigned long dsm_mask; 85 acpi_handle handle; 86 const u8 *uuid; 87 u32 offset; 88 int rc, i; 89 90 if (nvdimm) { 91 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 92 struct acpi_device *adev = nfit_mem->adev; 93 94 if (!adev) 95 return -ENOTTY; 96 dimm_name = nvdimm_name(nvdimm); 97 cmd_name = nvdimm_cmd_name(cmd); 98 dsm_mask = nfit_mem->dsm_mask; 99 desc = nd_cmd_dimm_desc(cmd); 100 uuid = to_nfit_uuid(NFIT_DEV_DIMM); 101 handle = adev->handle; 102 } else { 103 struct acpi_device *adev = to_acpi_dev(acpi_desc); 104 105 cmd_name = nvdimm_bus_cmd_name(cmd); 106 dsm_mask = nd_desc->dsm_mask; 107 desc = nd_cmd_bus_desc(cmd); 108 uuid = to_nfit_uuid(NFIT_DEV_BUS); 109 handle = adev->handle; 110 dimm_name = "bus"; 111 } 112 113 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 114 return -ENOTTY; 115 116 if (!test_bit(cmd, &dsm_mask)) 117 return -ENOTTY; 118 119 in_obj.type = ACPI_TYPE_PACKAGE; 120 in_obj.package.count = 1; 121 in_obj.package.elements = &in_buf; 122 in_buf.type = ACPI_TYPE_BUFFER; 123 in_buf.buffer.pointer = buf; 124 in_buf.buffer.length = 0; 125 126 /* libnvdimm has already validated the input envelope */ 127 for (i = 0; i < desc->in_num; i++) 128 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 129 i, buf); 130 131 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 132 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__, 133 dimm_name, cmd_name, in_buf.buffer.length); 134 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 135 4, in_buf.buffer.pointer, min_t(u32, 128, 136 in_buf.buffer.length), true); 137 } 138 139 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj); 140 if (!out_obj) { 141 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 142 cmd_name); 143 return -EINVAL; 144 } 145 146 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 147 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 148 __func__, dimm_name, cmd_name, out_obj->type); 149 rc = -EINVAL; 150 goto out; 151 } 152 153 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 154 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, 155 dimm_name, cmd_name, out_obj->buffer.length); 156 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 157 4, out_obj->buffer.pointer, min_t(u32, 128, 158 out_obj->buffer.length), true); 159 } 160 161 for (i = 0, offset = 0; i < desc->out_num; i++) { 162 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 163 (u32 *) out_obj->buffer.pointer); 164 165 if (offset + out_size > out_obj->buffer.length) { 166 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 167 __func__, dimm_name, cmd_name, i); 168 break; 169 } 170 171 if (in_buf.buffer.length + offset + out_size > buf_len) { 172 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 173 __func__, dimm_name, cmd_name, i); 174 rc = -ENXIO; 175 goto out; 176 } 177 memcpy(buf + in_buf.buffer.length + offset, 178 out_obj->buffer.pointer + offset, out_size); 179 offset += out_size; 180 } 181 if (offset + in_buf.buffer.length < buf_len) { 182 if (i >= 1) { 183 /* 184 * status valid, return the number of bytes left 185 * unfilled in the output buffer 186 */ 187 rc = buf_len - offset - in_buf.buffer.length; 188 } else { 189 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 190 __func__, dimm_name, cmd_name, buf_len, 191 offset); 192 rc = -ENXIO; 193 } 194 } else 195 rc = 0; 196 197 out: 198 ACPI_FREE(out_obj); 199 200 return rc; 201 } 202 203 static const char *spa_type_name(u16 type) 204 { 205 static const char *to_name[] = { 206 [NFIT_SPA_VOLATILE] = "volatile", 207 [NFIT_SPA_PM] = "pmem", 208 [NFIT_SPA_DCR] = "dimm-control-region", 209 [NFIT_SPA_BDW] = "block-data-window", 210 [NFIT_SPA_VDISK] = "volatile-disk", 211 [NFIT_SPA_VCD] = "volatile-cd", 212 [NFIT_SPA_PDISK] = "persistent-disk", 213 [NFIT_SPA_PCD] = "persistent-cd", 214 215 }; 216 217 if (type > NFIT_SPA_PCD) 218 return "unknown"; 219 220 return to_name[type]; 221 } 222 223 static int nfit_spa_type(struct acpi_nfit_system_address *spa) 224 { 225 int i; 226 227 for (i = 0; i < NFIT_UUID_MAX; i++) 228 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 229 return i; 230 return -1; 231 } 232 233 static bool add_spa(struct acpi_nfit_desc *acpi_desc, 234 struct nfit_table_prev *prev, 235 struct acpi_nfit_system_address *spa) 236 { 237 struct device *dev = acpi_desc->dev; 238 struct nfit_spa *nfit_spa; 239 240 list_for_each_entry(nfit_spa, &prev->spas, list) { 241 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 242 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 243 return true; 244 } 245 } 246 247 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); 248 if (!nfit_spa) 249 return false; 250 INIT_LIST_HEAD(&nfit_spa->list); 251 nfit_spa->spa = spa; 252 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 253 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 254 spa->range_index, 255 spa_type_name(nfit_spa_type(spa))); 256 return true; 257 } 258 259 static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 260 struct nfit_table_prev *prev, 261 struct acpi_nfit_memory_map *memdev) 262 { 263 struct device *dev = acpi_desc->dev; 264 struct nfit_memdev *nfit_memdev; 265 266 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 267 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 268 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 269 return true; 270 } 271 272 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); 273 if (!nfit_memdev) 274 return false; 275 INIT_LIST_HEAD(&nfit_memdev->list); 276 nfit_memdev->memdev = memdev; 277 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 278 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 279 __func__, memdev->device_handle, memdev->range_index, 280 memdev->region_index); 281 return true; 282 } 283 284 static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 285 struct nfit_table_prev *prev, 286 struct acpi_nfit_control_region *dcr) 287 { 288 struct device *dev = acpi_desc->dev; 289 struct nfit_dcr *nfit_dcr; 290 291 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 292 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 293 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 294 return true; 295 } 296 297 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); 298 if (!nfit_dcr) 299 return false; 300 INIT_LIST_HEAD(&nfit_dcr->list); 301 nfit_dcr->dcr = dcr; 302 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 303 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 304 dcr->region_index, dcr->windows); 305 return true; 306 } 307 308 static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 309 struct nfit_table_prev *prev, 310 struct acpi_nfit_data_region *bdw) 311 { 312 struct device *dev = acpi_desc->dev; 313 struct nfit_bdw *nfit_bdw; 314 315 list_for_each_entry(nfit_bdw, &prev->bdws, list) 316 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 317 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 318 return true; 319 } 320 321 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); 322 if (!nfit_bdw) 323 return false; 324 INIT_LIST_HEAD(&nfit_bdw->list); 325 nfit_bdw->bdw = bdw; 326 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 327 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 328 bdw->region_index, bdw->windows); 329 return true; 330 } 331 332 static bool add_idt(struct acpi_nfit_desc *acpi_desc, 333 struct nfit_table_prev *prev, 334 struct acpi_nfit_interleave *idt) 335 { 336 struct device *dev = acpi_desc->dev; 337 struct nfit_idt *nfit_idt; 338 339 list_for_each_entry(nfit_idt, &prev->idts, list) 340 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 341 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 342 return true; 343 } 344 345 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); 346 if (!nfit_idt) 347 return false; 348 INIT_LIST_HEAD(&nfit_idt->list); 349 nfit_idt->idt = idt; 350 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 351 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 352 idt->interleave_index, idt->line_count); 353 return true; 354 } 355 356 static bool add_flush(struct acpi_nfit_desc *acpi_desc, 357 struct nfit_table_prev *prev, 358 struct acpi_nfit_flush_address *flush) 359 { 360 struct device *dev = acpi_desc->dev; 361 struct nfit_flush *nfit_flush; 362 363 list_for_each_entry(nfit_flush, &prev->flushes, list) 364 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 365 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 366 return true; 367 } 368 369 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); 370 if (!nfit_flush) 371 return false; 372 INIT_LIST_HEAD(&nfit_flush->list); 373 nfit_flush->flush = flush; 374 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 375 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 376 flush->device_handle, flush->hint_count); 377 return true; 378 } 379 380 static void *add_table(struct acpi_nfit_desc *acpi_desc, 381 struct nfit_table_prev *prev, void *table, const void *end) 382 { 383 struct device *dev = acpi_desc->dev; 384 struct acpi_nfit_header *hdr; 385 void *err = ERR_PTR(-ENOMEM); 386 387 if (table >= end) 388 return NULL; 389 390 hdr = table; 391 if (!hdr->length) { 392 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 393 hdr->type); 394 return NULL; 395 } 396 397 switch (hdr->type) { 398 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 399 if (!add_spa(acpi_desc, prev, table)) 400 return err; 401 break; 402 case ACPI_NFIT_TYPE_MEMORY_MAP: 403 if (!add_memdev(acpi_desc, prev, table)) 404 return err; 405 break; 406 case ACPI_NFIT_TYPE_CONTROL_REGION: 407 if (!add_dcr(acpi_desc, prev, table)) 408 return err; 409 break; 410 case ACPI_NFIT_TYPE_DATA_REGION: 411 if (!add_bdw(acpi_desc, prev, table)) 412 return err; 413 break; 414 case ACPI_NFIT_TYPE_INTERLEAVE: 415 if (!add_idt(acpi_desc, prev, table)) 416 return err; 417 break; 418 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 419 if (!add_flush(acpi_desc, prev, table)) 420 return err; 421 break; 422 case ACPI_NFIT_TYPE_SMBIOS: 423 dev_dbg(dev, "%s: smbios\n", __func__); 424 break; 425 default: 426 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 427 break; 428 } 429 430 return table + hdr->length; 431 } 432 433 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 434 struct nfit_mem *nfit_mem) 435 { 436 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 437 u16 dcr = nfit_mem->dcr->region_index; 438 struct nfit_spa *nfit_spa; 439 440 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 441 u16 range_index = nfit_spa->spa->range_index; 442 int type = nfit_spa_type(nfit_spa->spa); 443 struct nfit_memdev *nfit_memdev; 444 445 if (type != NFIT_SPA_BDW) 446 continue; 447 448 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 449 if (nfit_memdev->memdev->range_index != range_index) 450 continue; 451 if (nfit_memdev->memdev->device_handle != device_handle) 452 continue; 453 if (nfit_memdev->memdev->region_index != dcr) 454 continue; 455 456 nfit_mem->spa_bdw = nfit_spa->spa; 457 return; 458 } 459 } 460 461 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 462 nfit_mem->spa_dcr->range_index); 463 nfit_mem->bdw = NULL; 464 } 465 466 static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 467 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 468 { 469 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 470 struct nfit_memdev *nfit_memdev; 471 struct nfit_flush *nfit_flush; 472 struct nfit_dcr *nfit_dcr; 473 struct nfit_bdw *nfit_bdw; 474 struct nfit_idt *nfit_idt; 475 u16 idt_idx, range_index; 476 477 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 478 if (nfit_dcr->dcr->region_index != dcr) 479 continue; 480 nfit_mem->dcr = nfit_dcr->dcr; 481 break; 482 } 483 484 if (!nfit_mem->dcr) { 485 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n", 486 spa->range_index, __to_nfit_memdev(nfit_mem) 487 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR"); 488 return -ENODEV; 489 } 490 491 /* 492 * We've found enough to create an nvdimm, optionally 493 * find an associated BDW 494 */ 495 list_add(&nfit_mem->list, &acpi_desc->dimms); 496 497 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 498 if (nfit_bdw->bdw->region_index != dcr) 499 continue; 500 nfit_mem->bdw = nfit_bdw->bdw; 501 break; 502 } 503 504 if (!nfit_mem->bdw) 505 return 0; 506 507 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 508 509 if (!nfit_mem->spa_bdw) 510 return 0; 511 512 range_index = nfit_mem->spa_bdw->range_index; 513 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 514 if (nfit_memdev->memdev->range_index != range_index || 515 nfit_memdev->memdev->region_index != dcr) 516 continue; 517 nfit_mem->memdev_bdw = nfit_memdev->memdev; 518 idt_idx = nfit_memdev->memdev->interleave_index; 519 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 520 if (nfit_idt->idt->interleave_index != idt_idx) 521 continue; 522 nfit_mem->idt_bdw = nfit_idt->idt; 523 break; 524 } 525 526 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 527 if (nfit_flush->flush->device_handle != 528 nfit_memdev->memdev->device_handle) 529 continue; 530 nfit_mem->nfit_flush = nfit_flush; 531 break; 532 } 533 break; 534 } 535 536 return 0; 537 } 538 539 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 540 struct acpi_nfit_system_address *spa) 541 { 542 struct nfit_mem *nfit_mem, *found; 543 struct nfit_memdev *nfit_memdev; 544 int type = nfit_spa_type(spa); 545 u16 dcr; 546 547 switch (type) { 548 case NFIT_SPA_DCR: 549 case NFIT_SPA_PM: 550 break; 551 default: 552 return 0; 553 } 554 555 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 556 int rc; 557 558 if (nfit_memdev->memdev->range_index != spa->range_index) 559 continue; 560 found = NULL; 561 dcr = nfit_memdev->memdev->region_index; 562 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 563 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 564 found = nfit_mem; 565 break; 566 } 567 568 if (found) 569 nfit_mem = found; 570 else { 571 nfit_mem = devm_kzalloc(acpi_desc->dev, 572 sizeof(*nfit_mem), GFP_KERNEL); 573 if (!nfit_mem) 574 return -ENOMEM; 575 INIT_LIST_HEAD(&nfit_mem->list); 576 } 577 578 if (type == NFIT_SPA_DCR) { 579 struct nfit_idt *nfit_idt; 580 u16 idt_idx; 581 582 /* multiple dimms may share a SPA when interleaved */ 583 nfit_mem->spa_dcr = spa; 584 nfit_mem->memdev_dcr = nfit_memdev->memdev; 585 idt_idx = nfit_memdev->memdev->interleave_index; 586 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 587 if (nfit_idt->idt->interleave_index != idt_idx) 588 continue; 589 nfit_mem->idt_dcr = nfit_idt->idt; 590 break; 591 } 592 } else { 593 /* 594 * A single dimm may belong to multiple SPA-PM 595 * ranges, record at least one in addition to 596 * any SPA-DCR range. 597 */ 598 nfit_mem->memdev_pmem = nfit_memdev->memdev; 599 } 600 601 if (found) 602 continue; 603 604 rc = nfit_mem_add(acpi_desc, nfit_mem, spa); 605 if (rc) 606 return rc; 607 } 608 609 return 0; 610 } 611 612 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 613 { 614 struct nfit_mem *a = container_of(_a, typeof(*a), list); 615 struct nfit_mem *b = container_of(_b, typeof(*b), list); 616 u32 handleA, handleB; 617 618 handleA = __to_nfit_memdev(a)->device_handle; 619 handleB = __to_nfit_memdev(b)->device_handle; 620 if (handleA < handleB) 621 return -1; 622 else if (handleA > handleB) 623 return 1; 624 return 0; 625 } 626 627 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 628 { 629 struct nfit_spa *nfit_spa; 630 631 /* 632 * For each SPA-DCR or SPA-PMEM address range find its 633 * corresponding MEMDEV(s). From each MEMDEV find the 634 * corresponding DCR. Then, if we're operating on a SPA-DCR, 635 * try to find a SPA-BDW and a corresponding BDW that references 636 * the DCR. Throw it all into an nfit_mem object. Note, that 637 * BDWs are optional. 638 */ 639 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 640 int rc; 641 642 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); 643 if (rc) 644 return rc; 645 } 646 647 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 648 649 return 0; 650 } 651 652 static ssize_t revision_show(struct device *dev, 653 struct device_attribute *attr, char *buf) 654 { 655 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 656 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 657 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 658 659 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 660 } 661 static DEVICE_ATTR_RO(revision); 662 663 static struct attribute *acpi_nfit_attributes[] = { 664 &dev_attr_revision.attr, 665 NULL, 666 }; 667 668 static struct attribute_group acpi_nfit_attribute_group = { 669 .name = "nfit", 670 .attrs = acpi_nfit_attributes, 671 }; 672 673 const struct attribute_group *acpi_nfit_attribute_groups[] = { 674 &nvdimm_bus_attribute_group, 675 &acpi_nfit_attribute_group, 676 NULL, 677 }; 678 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); 679 680 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 681 { 682 struct nvdimm *nvdimm = to_nvdimm(dev); 683 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 684 685 return __to_nfit_memdev(nfit_mem); 686 } 687 688 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 689 { 690 struct nvdimm *nvdimm = to_nvdimm(dev); 691 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 692 693 return nfit_mem->dcr; 694 } 695 696 static ssize_t handle_show(struct device *dev, 697 struct device_attribute *attr, char *buf) 698 { 699 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 700 701 return sprintf(buf, "%#x\n", memdev->device_handle); 702 } 703 static DEVICE_ATTR_RO(handle); 704 705 static ssize_t phys_id_show(struct device *dev, 706 struct device_attribute *attr, char *buf) 707 { 708 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 709 710 return sprintf(buf, "%#x\n", memdev->physical_id); 711 } 712 static DEVICE_ATTR_RO(phys_id); 713 714 static ssize_t vendor_show(struct device *dev, 715 struct device_attribute *attr, char *buf) 716 { 717 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 718 719 return sprintf(buf, "%#x\n", dcr->vendor_id); 720 } 721 static DEVICE_ATTR_RO(vendor); 722 723 static ssize_t rev_id_show(struct device *dev, 724 struct device_attribute *attr, char *buf) 725 { 726 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 727 728 return sprintf(buf, "%#x\n", dcr->revision_id); 729 } 730 static DEVICE_ATTR_RO(rev_id); 731 732 static ssize_t device_show(struct device *dev, 733 struct device_attribute *attr, char *buf) 734 { 735 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 736 737 return sprintf(buf, "%#x\n", dcr->device_id); 738 } 739 static DEVICE_ATTR_RO(device); 740 741 static ssize_t format_show(struct device *dev, 742 struct device_attribute *attr, char *buf) 743 { 744 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 745 746 return sprintf(buf, "%#x\n", dcr->code); 747 } 748 static DEVICE_ATTR_RO(format); 749 750 static ssize_t serial_show(struct device *dev, 751 struct device_attribute *attr, char *buf) 752 { 753 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 754 755 return sprintf(buf, "%#x\n", dcr->serial_number); 756 } 757 static DEVICE_ATTR_RO(serial); 758 759 static ssize_t flags_show(struct device *dev, 760 struct device_attribute *attr, char *buf) 761 { 762 u16 flags = to_nfit_memdev(dev)->flags; 763 764 return sprintf(buf, "%s%s%s%s%s\n", 765 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 766 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 767 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 768 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 769 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); 770 } 771 static DEVICE_ATTR_RO(flags); 772 773 static struct attribute *acpi_nfit_dimm_attributes[] = { 774 &dev_attr_handle.attr, 775 &dev_attr_phys_id.attr, 776 &dev_attr_vendor.attr, 777 &dev_attr_device.attr, 778 &dev_attr_format.attr, 779 &dev_attr_serial.attr, 780 &dev_attr_rev_id.attr, 781 &dev_attr_flags.attr, 782 NULL, 783 }; 784 785 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 786 struct attribute *a, int n) 787 { 788 struct device *dev = container_of(kobj, struct device, kobj); 789 790 if (to_nfit_dcr(dev)) 791 return a->mode; 792 else 793 return 0; 794 } 795 796 static struct attribute_group acpi_nfit_dimm_attribute_group = { 797 .name = "nfit", 798 .attrs = acpi_nfit_dimm_attributes, 799 .is_visible = acpi_nfit_dimm_attr_visible, 800 }; 801 802 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 803 &nvdimm_attribute_group, 804 &nd_device_attribute_group, 805 &acpi_nfit_dimm_attribute_group, 806 NULL, 807 }; 808 809 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 810 u32 device_handle) 811 { 812 struct nfit_mem *nfit_mem; 813 814 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 815 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 816 return nfit_mem->nvdimm; 817 818 return NULL; 819 } 820 821 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 822 struct nfit_mem *nfit_mem, u32 device_handle) 823 { 824 struct acpi_device *adev, *adev_dimm; 825 struct device *dev = acpi_desc->dev; 826 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); 827 int i; 828 829 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; 830 adev = to_acpi_dev(acpi_desc); 831 if (!adev) 832 return 0; 833 834 adev_dimm = acpi_find_child_device(adev, device_handle, false); 835 nfit_mem->adev = adev_dimm; 836 if (!adev_dimm) { 837 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 838 device_handle); 839 return force_enable_dimms ? 0 : -ENODEV; 840 } 841 842 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) 843 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 844 set_bit(i, &nfit_mem->dsm_mask); 845 846 return 0; 847 } 848 849 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 850 { 851 struct nfit_mem *nfit_mem; 852 int dimm_count = 0; 853 854 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 855 struct nvdimm *nvdimm; 856 unsigned long flags = 0; 857 u32 device_handle; 858 u16 mem_flags; 859 int rc; 860 861 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 862 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 863 if (nvdimm) { 864 dimm_count++; 865 continue; 866 } 867 868 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 869 flags |= NDD_ALIASING; 870 871 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 872 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 873 flags |= NDD_UNARMED; 874 875 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 876 if (rc) 877 continue; 878 879 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 880 acpi_nfit_dimm_attribute_groups, 881 flags, &nfit_mem->dsm_mask); 882 if (!nvdimm) 883 return -ENOMEM; 884 885 nfit_mem->nvdimm = nvdimm; 886 dimm_count++; 887 888 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 889 continue; 890 891 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", 892 nvdimm_name(nvdimm), 893 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 894 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 895 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 896 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); 897 898 } 899 900 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 901 } 902 903 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 904 { 905 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 906 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 907 struct acpi_device *adev; 908 int i; 909 910 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; 911 adev = to_acpi_dev(acpi_desc); 912 if (!adev) 913 return; 914 915 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) 916 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 917 set_bit(i, &nd_desc->dsm_mask); 918 } 919 920 static ssize_t range_index_show(struct device *dev, 921 struct device_attribute *attr, char *buf) 922 { 923 struct nd_region *nd_region = to_nd_region(dev); 924 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 925 926 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 927 } 928 static DEVICE_ATTR_RO(range_index); 929 930 static struct attribute *acpi_nfit_region_attributes[] = { 931 &dev_attr_range_index.attr, 932 NULL, 933 }; 934 935 static struct attribute_group acpi_nfit_region_attribute_group = { 936 .name = "nfit", 937 .attrs = acpi_nfit_region_attributes, 938 }; 939 940 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 941 &nd_region_attribute_group, 942 &nd_mapping_attribute_group, 943 &nd_device_attribute_group, 944 &nd_numa_attribute_group, 945 &acpi_nfit_region_attribute_group, 946 NULL, 947 }; 948 949 /* enough info to uniquely specify an interleave set */ 950 struct nfit_set_info { 951 struct nfit_set_info_map { 952 u64 region_offset; 953 u32 serial_number; 954 u32 pad; 955 } mapping[0]; 956 }; 957 958 static size_t sizeof_nfit_set_info(int num_mappings) 959 { 960 return sizeof(struct nfit_set_info) 961 + num_mappings * sizeof(struct nfit_set_info_map); 962 } 963 964 static int cmp_map(const void *m0, const void *m1) 965 { 966 const struct nfit_set_info_map *map0 = m0; 967 const struct nfit_set_info_map *map1 = m1; 968 969 return memcmp(&map0->region_offset, &map1->region_offset, 970 sizeof(u64)); 971 } 972 973 /* Retrieve the nth entry referencing this spa */ 974 static struct acpi_nfit_memory_map *memdev_from_spa( 975 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 976 { 977 struct nfit_memdev *nfit_memdev; 978 979 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 980 if (nfit_memdev->memdev->range_index == range_index) 981 if (n-- == 0) 982 return nfit_memdev->memdev; 983 return NULL; 984 } 985 986 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 987 struct nd_region_desc *ndr_desc, 988 struct acpi_nfit_system_address *spa) 989 { 990 int i, spa_type = nfit_spa_type(spa); 991 struct device *dev = acpi_desc->dev; 992 struct nd_interleave_set *nd_set; 993 u16 nr = ndr_desc->num_mappings; 994 struct nfit_set_info *info; 995 996 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 997 /* pass */; 998 else 999 return 0; 1000 1001 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1002 if (!nd_set) 1003 return -ENOMEM; 1004 1005 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1006 if (!info) 1007 return -ENOMEM; 1008 for (i = 0; i < nr; i++) { 1009 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 1010 struct nfit_set_info_map *map = &info->mapping[i]; 1011 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1012 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1013 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1014 spa->range_index, i); 1015 1016 if (!memdev || !nfit_mem->dcr) { 1017 dev_err(dev, "%s: failed to find DCR\n", __func__); 1018 return -ENODEV; 1019 } 1020 1021 map->region_offset = memdev->region_offset; 1022 map->serial_number = nfit_mem->dcr->serial_number; 1023 } 1024 1025 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1026 cmp_map, NULL); 1027 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1028 ndr_desc->nd_set = nd_set; 1029 devm_kfree(dev, info); 1030 1031 return 0; 1032 } 1033 1034 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1035 { 1036 struct acpi_nfit_interleave *idt = mmio->idt; 1037 u32 sub_line_offset, line_index, line_offset; 1038 u64 line_no, table_skip_count, table_offset; 1039 1040 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1041 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1042 line_offset = idt->line_offset[line_index] 1043 * mmio->line_size; 1044 table_offset = table_skip_count * mmio->table_size; 1045 1046 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1047 } 1048 1049 static void wmb_blk(struct nfit_blk *nfit_blk) 1050 { 1051 1052 if (nfit_blk->nvdimm_flush) { 1053 /* 1054 * The first wmb() is needed to 'sfence' all previous writes 1055 * such that they are architecturally visible for the platform 1056 * buffer flush. Note that we've already arranged for pmem 1057 * writes to avoid the cache via arch_memcpy_to_pmem(). The 1058 * final wmb() ensures ordering for the NVDIMM flush write. 1059 */ 1060 wmb(); 1061 writeq(1, nfit_blk->nvdimm_flush); 1062 wmb(); 1063 } else 1064 wmb_pmem(); 1065 } 1066 1067 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1068 { 1069 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1070 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1071 1072 if (mmio->num_lines) 1073 offset = to_interleave_offset(offset, mmio); 1074 1075 return readl(mmio->addr.base + offset); 1076 } 1077 1078 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1079 resource_size_t dpa, unsigned int len, unsigned int write) 1080 { 1081 u64 cmd, offset; 1082 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1083 1084 enum { 1085 BCW_OFFSET_MASK = (1ULL << 48)-1, 1086 BCW_LEN_SHIFT = 48, 1087 BCW_LEN_MASK = (1ULL << 8) - 1, 1088 BCW_CMD_SHIFT = 56, 1089 }; 1090 1091 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1092 len = len >> L1_CACHE_SHIFT; 1093 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1094 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1095 1096 offset = nfit_blk->cmd_offset + mmio->size * bw; 1097 if (mmio->num_lines) 1098 offset = to_interleave_offset(offset, mmio); 1099 1100 writeq(cmd, mmio->addr.base + offset); 1101 wmb_blk(nfit_blk); 1102 1103 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) 1104 readq(mmio->addr.base + offset); 1105 } 1106 1107 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1108 resource_size_t dpa, void *iobuf, size_t len, int rw, 1109 unsigned int lane) 1110 { 1111 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1112 unsigned int copied = 0; 1113 u64 base_offset; 1114 int rc; 1115 1116 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1117 + lane * mmio->size; 1118 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1119 while (len) { 1120 unsigned int c; 1121 u64 offset; 1122 1123 if (mmio->num_lines) { 1124 u32 line_offset; 1125 1126 offset = to_interleave_offset(base_offset + copied, 1127 mmio); 1128 div_u64_rem(offset, mmio->line_size, &line_offset); 1129 c = min_t(size_t, len, mmio->line_size - line_offset); 1130 } else { 1131 offset = base_offset + nfit_blk->bdw_offset; 1132 c = len; 1133 } 1134 1135 if (rw) 1136 memcpy_to_pmem(mmio->addr.aperture + offset, 1137 iobuf + copied, c); 1138 else { 1139 if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) 1140 mmio_flush_range((void __force *) 1141 mmio->addr.aperture + offset, c); 1142 1143 memcpy_from_pmem(iobuf + copied, 1144 mmio->addr.aperture + offset, c); 1145 } 1146 1147 copied += c; 1148 len -= c; 1149 } 1150 1151 if (rw) 1152 wmb_blk(nfit_blk); 1153 1154 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1155 return rc; 1156 } 1157 1158 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1159 resource_size_t dpa, void *iobuf, u64 len, int rw) 1160 { 1161 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1162 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1163 struct nd_region *nd_region = nfit_blk->nd_region; 1164 unsigned int lane, copied = 0; 1165 int rc = 0; 1166 1167 lane = nd_region_acquire_lane(nd_region); 1168 while (len) { 1169 u64 c = min(len, mmio->size); 1170 1171 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1172 iobuf + copied, c, rw, lane); 1173 if (rc) 1174 break; 1175 1176 copied += c; 1177 len -= c; 1178 } 1179 nd_region_release_lane(nd_region, lane); 1180 1181 return rc; 1182 } 1183 1184 static void nfit_spa_mapping_release(struct kref *kref) 1185 { 1186 struct nfit_spa_mapping *spa_map = to_spa_map(kref); 1187 struct acpi_nfit_system_address *spa = spa_map->spa; 1188 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; 1189 1190 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1191 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); 1192 if (spa_map->type == SPA_MAP_APERTURE) 1193 memunmap((void __force *)spa_map->addr.aperture); 1194 else 1195 iounmap(spa_map->addr.base); 1196 release_mem_region(spa->address, spa->length); 1197 list_del(&spa_map->list); 1198 kfree(spa_map); 1199 } 1200 1201 static struct nfit_spa_mapping *find_spa_mapping( 1202 struct acpi_nfit_desc *acpi_desc, 1203 struct acpi_nfit_system_address *spa) 1204 { 1205 struct nfit_spa_mapping *spa_map; 1206 1207 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1208 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) 1209 if (spa_map->spa == spa) 1210 return spa_map; 1211 1212 return NULL; 1213 } 1214 1215 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, 1216 struct acpi_nfit_system_address *spa) 1217 { 1218 struct nfit_spa_mapping *spa_map; 1219 1220 mutex_lock(&acpi_desc->spa_map_mutex); 1221 spa_map = find_spa_mapping(acpi_desc, spa); 1222 1223 if (spa_map) 1224 kref_put(&spa_map->kref, nfit_spa_mapping_release); 1225 mutex_unlock(&acpi_desc->spa_map_mutex); 1226 } 1227 1228 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1229 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1230 { 1231 resource_size_t start = spa->address; 1232 resource_size_t n = spa->length; 1233 struct nfit_spa_mapping *spa_map; 1234 struct resource *res; 1235 1236 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1237 1238 spa_map = find_spa_mapping(acpi_desc, spa); 1239 if (spa_map) { 1240 kref_get(&spa_map->kref); 1241 return spa_map->addr.base; 1242 } 1243 1244 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); 1245 if (!spa_map) 1246 return NULL; 1247 1248 INIT_LIST_HEAD(&spa_map->list); 1249 spa_map->spa = spa; 1250 kref_init(&spa_map->kref); 1251 spa_map->acpi_desc = acpi_desc; 1252 1253 res = request_mem_region(start, n, dev_name(acpi_desc->dev)); 1254 if (!res) 1255 goto err_mem; 1256 1257 spa_map->type = type; 1258 if (type == SPA_MAP_APERTURE) 1259 spa_map->addr.aperture = (void __pmem *)memremap(start, n, 1260 ARCH_MEMREMAP_PMEM); 1261 else 1262 spa_map->addr.base = ioremap_nocache(start, n); 1263 1264 1265 if (!spa_map->addr.base) 1266 goto err_map; 1267 1268 list_add_tail(&spa_map->list, &acpi_desc->spa_maps); 1269 return spa_map->addr.base; 1270 1271 err_map: 1272 release_mem_region(start, n); 1273 err_mem: 1274 kfree(spa_map); 1275 return NULL; 1276 } 1277 1278 /** 1279 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1280 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1281 * @nfit_spa: spa table to map 1282 * @type: aperture or control region 1283 * 1284 * In the case where block-data-window apertures and 1285 * dimm-control-regions are interleaved they will end up sharing a 1286 * single request_mem_region() + ioremap() for the address range. In 1287 * the style of devm nfit_spa_map() mappings are automatically dropped 1288 * when all region devices referencing the same mapping are disabled / 1289 * unbound. 1290 */ 1291 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1292 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1293 { 1294 void __iomem *iomem; 1295 1296 mutex_lock(&acpi_desc->spa_map_mutex); 1297 iomem = __nfit_spa_map(acpi_desc, spa, type); 1298 mutex_unlock(&acpi_desc->spa_map_mutex); 1299 1300 return iomem; 1301 } 1302 1303 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1304 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1305 { 1306 if (idt) { 1307 mmio->num_lines = idt->line_count; 1308 mmio->line_size = idt->line_size; 1309 if (interleave_ways == 0) 1310 return -ENXIO; 1311 mmio->table_size = mmio->num_lines * interleave_ways 1312 * mmio->line_size; 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1319 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1320 { 1321 struct nd_cmd_dimm_flags flags; 1322 int rc; 1323 1324 memset(&flags, 0, sizeof(flags)); 1325 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1326 sizeof(flags)); 1327 1328 if (rc >= 0 && flags.status == 0) 1329 nfit_blk->dimm_flags = flags.flags; 1330 else if (rc == -ENOTTY) { 1331 /* fall back to a conservative default */ 1332 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; 1333 rc = 0; 1334 } else 1335 rc = -ENXIO; 1336 1337 return rc; 1338 } 1339 1340 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1341 struct device *dev) 1342 { 1343 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1344 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1345 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1346 struct nfit_flush *nfit_flush; 1347 struct nfit_blk_mmio *mmio; 1348 struct nfit_blk *nfit_blk; 1349 struct nfit_mem *nfit_mem; 1350 struct nvdimm *nvdimm; 1351 int rc; 1352 1353 nvdimm = nd_blk_region_to_dimm(ndbr); 1354 nfit_mem = nvdimm_provider_data(nvdimm); 1355 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1356 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1357 nfit_mem ? "" : " nfit_mem", 1358 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1359 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1360 return -ENXIO; 1361 } 1362 1363 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1364 if (!nfit_blk) 1365 return -ENOMEM; 1366 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1367 nfit_blk->nd_region = to_nd_region(dev); 1368 1369 /* map block aperture memory */ 1370 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1371 mmio = &nfit_blk->mmio[BDW]; 1372 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, 1373 SPA_MAP_APERTURE); 1374 if (!mmio->addr.base) { 1375 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1376 nvdimm_name(nvdimm)); 1377 return -ENOMEM; 1378 } 1379 mmio->size = nfit_mem->bdw->size; 1380 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1381 mmio->idt = nfit_mem->idt_bdw; 1382 mmio->spa = nfit_mem->spa_bdw; 1383 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1384 nfit_mem->memdev_bdw->interleave_ways); 1385 if (rc) { 1386 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1387 __func__, nvdimm_name(nvdimm)); 1388 return rc; 1389 } 1390 1391 /* map block control memory */ 1392 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1393 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1394 mmio = &nfit_blk->mmio[DCR]; 1395 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, 1396 SPA_MAP_CONTROL); 1397 if (!mmio->addr.base) { 1398 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1399 nvdimm_name(nvdimm)); 1400 return -ENOMEM; 1401 } 1402 mmio->size = nfit_mem->dcr->window_size; 1403 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1404 mmio->idt = nfit_mem->idt_dcr; 1405 mmio->spa = nfit_mem->spa_dcr; 1406 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1407 nfit_mem->memdev_dcr->interleave_ways); 1408 if (rc) { 1409 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1410 __func__, nvdimm_name(nvdimm)); 1411 return rc; 1412 } 1413 1414 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 1415 if (rc < 0) { 1416 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 1417 __func__, nvdimm_name(nvdimm)); 1418 return rc; 1419 } 1420 1421 nfit_flush = nfit_mem->nfit_flush; 1422 if (nfit_flush && nfit_flush->flush->hint_count != 0) { 1423 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, 1424 nfit_flush->flush->hint_address[0], 8); 1425 if (!nfit_blk->nvdimm_flush) 1426 return -ENOMEM; 1427 } 1428 1429 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) 1430 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1431 1432 if (mmio->line_size == 0) 1433 return 0; 1434 1435 if ((u32) nfit_blk->cmd_offset % mmio->line_size 1436 + 8 > mmio->line_size) { 1437 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 1438 return -ENXIO; 1439 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 1440 + 8 > mmio->line_size) { 1441 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 1442 return -ENXIO; 1443 } 1444 1445 return 0; 1446 } 1447 1448 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, 1449 struct device *dev) 1450 { 1451 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1452 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1453 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1454 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1455 int i; 1456 1457 if (!nfit_blk) 1458 return; /* never enabled */ 1459 1460 /* auto-free BLK spa mappings */ 1461 for (i = 0; i < 2; i++) { 1462 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; 1463 1464 if (mmio->addr.base) 1465 nfit_spa_unmap(acpi_desc, mmio->spa); 1466 } 1467 nd_blk_region_set_provider_data(ndbr, NULL); 1468 /* devm will free nfit_blk */ 1469 } 1470 1471 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1472 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1473 struct acpi_nfit_memory_map *memdev, 1474 struct acpi_nfit_system_address *spa) 1475 { 1476 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1477 memdev->device_handle); 1478 struct nd_blk_region_desc *ndbr_desc; 1479 struct nfit_mem *nfit_mem; 1480 int blk_valid = 0; 1481 1482 if (!nvdimm) { 1483 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 1484 spa->range_index, memdev->device_handle); 1485 return -ENODEV; 1486 } 1487 1488 nd_mapping->nvdimm = nvdimm; 1489 switch (nfit_spa_type(spa)) { 1490 case NFIT_SPA_PM: 1491 case NFIT_SPA_VOLATILE: 1492 nd_mapping->start = memdev->address; 1493 nd_mapping->size = memdev->region_size; 1494 break; 1495 case NFIT_SPA_DCR: 1496 nfit_mem = nvdimm_provider_data(nvdimm); 1497 if (!nfit_mem || !nfit_mem->bdw) { 1498 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 1499 spa->range_index, nvdimm_name(nvdimm)); 1500 } else { 1501 nd_mapping->size = nfit_mem->bdw->capacity; 1502 nd_mapping->start = nfit_mem->bdw->start_address; 1503 ndr_desc->num_lanes = nfit_mem->bdw->windows; 1504 blk_valid = 1; 1505 } 1506 1507 ndr_desc->nd_mapping = nd_mapping; 1508 ndr_desc->num_mappings = blk_valid; 1509 ndbr_desc = to_blk_region_desc(ndr_desc); 1510 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1511 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1512 ndbr_desc->do_io = acpi_desc->blk_do_io; 1513 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1514 return -ENOMEM; 1515 break; 1516 } 1517 1518 return 0; 1519 } 1520 1521 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1522 struct nfit_spa *nfit_spa) 1523 { 1524 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; 1525 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1526 struct nd_blk_region_desc ndbr_desc; 1527 struct nd_region_desc *ndr_desc; 1528 struct nfit_memdev *nfit_memdev; 1529 struct nvdimm_bus *nvdimm_bus; 1530 struct resource res; 1531 int count = 0, rc; 1532 1533 if (nfit_spa->is_registered) 1534 return 0; 1535 1536 if (spa->range_index == 0) { 1537 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1538 __func__); 1539 return 0; 1540 } 1541 1542 memset(&res, 0, sizeof(res)); 1543 memset(&nd_mappings, 0, sizeof(nd_mappings)); 1544 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 1545 res.start = spa->address; 1546 res.end = res.start + spa->length - 1; 1547 ndr_desc = &ndbr_desc.ndr_desc; 1548 ndr_desc->res = &res; 1549 ndr_desc->provider_data = nfit_spa; 1550 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 1551 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 1552 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 1553 spa->proximity_domain); 1554 else 1555 ndr_desc->numa_node = NUMA_NO_NODE; 1556 1557 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1558 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1559 struct nd_mapping *nd_mapping; 1560 1561 if (memdev->range_index != spa->range_index) 1562 continue; 1563 if (count >= ND_MAX_MAPPINGS) { 1564 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 1565 spa->range_index, ND_MAX_MAPPINGS); 1566 return -ENXIO; 1567 } 1568 nd_mapping = &nd_mappings[count++]; 1569 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 1570 memdev, spa); 1571 if (rc) 1572 return rc; 1573 } 1574 1575 ndr_desc->nd_mapping = nd_mappings; 1576 ndr_desc->num_mappings = count; 1577 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 1578 if (rc) 1579 return rc; 1580 1581 nvdimm_bus = acpi_desc->nvdimm_bus; 1582 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 1583 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) 1584 return -ENOMEM; 1585 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 1586 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) 1587 return -ENOMEM; 1588 } 1589 1590 nfit_spa->is_registered = 1; 1591 return 0; 1592 } 1593 1594 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 1595 { 1596 struct nfit_spa *nfit_spa; 1597 1598 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1599 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 1600 1601 if (rc) 1602 return rc; 1603 } 1604 return 0; 1605 } 1606 1607 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 1608 struct nfit_table_prev *prev) 1609 { 1610 struct device *dev = acpi_desc->dev; 1611 1612 if (!list_empty(&prev->spas) || 1613 !list_empty(&prev->memdevs) || 1614 !list_empty(&prev->dcrs) || 1615 !list_empty(&prev->bdws) || 1616 !list_empty(&prev->idts) || 1617 !list_empty(&prev->flushes)) { 1618 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 1619 return -ENXIO; 1620 } 1621 return 0; 1622 } 1623 1624 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1625 { 1626 struct device *dev = acpi_desc->dev; 1627 struct nfit_table_prev prev; 1628 const void *end; 1629 u8 *data; 1630 int rc; 1631 1632 mutex_lock(&acpi_desc->init_mutex); 1633 1634 INIT_LIST_HEAD(&prev.spas); 1635 INIT_LIST_HEAD(&prev.memdevs); 1636 INIT_LIST_HEAD(&prev.dcrs); 1637 INIT_LIST_HEAD(&prev.bdws); 1638 INIT_LIST_HEAD(&prev.idts); 1639 INIT_LIST_HEAD(&prev.flushes); 1640 1641 list_cut_position(&prev.spas, &acpi_desc->spas, 1642 acpi_desc->spas.prev); 1643 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 1644 acpi_desc->memdevs.prev); 1645 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 1646 acpi_desc->dcrs.prev); 1647 list_cut_position(&prev.bdws, &acpi_desc->bdws, 1648 acpi_desc->bdws.prev); 1649 list_cut_position(&prev.idts, &acpi_desc->idts, 1650 acpi_desc->idts.prev); 1651 list_cut_position(&prev.flushes, &acpi_desc->flushes, 1652 acpi_desc->flushes.prev); 1653 1654 data = (u8 *) acpi_desc->nfit; 1655 end = data + sz; 1656 data += sizeof(struct acpi_table_nfit); 1657 while (!IS_ERR_OR_NULL(data)) 1658 data = add_table(acpi_desc, &prev, data, end); 1659 1660 if (IS_ERR(data)) { 1661 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 1662 PTR_ERR(data)); 1663 rc = PTR_ERR(data); 1664 goto out_unlock; 1665 } 1666 1667 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 1668 if (rc) 1669 goto out_unlock; 1670 1671 if (nfit_mem_init(acpi_desc) != 0) { 1672 rc = -ENOMEM; 1673 goto out_unlock; 1674 } 1675 1676 acpi_nfit_init_dsms(acpi_desc); 1677 1678 rc = acpi_nfit_register_dimms(acpi_desc); 1679 if (rc) 1680 goto out_unlock; 1681 1682 rc = acpi_nfit_register_regions(acpi_desc); 1683 1684 out_unlock: 1685 mutex_unlock(&acpi_desc->init_mutex); 1686 return rc; 1687 } 1688 EXPORT_SYMBOL_GPL(acpi_nfit_init); 1689 1690 static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev) 1691 { 1692 struct nvdimm_bus_descriptor *nd_desc; 1693 struct acpi_nfit_desc *acpi_desc; 1694 struct device *dev = &adev->dev; 1695 1696 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 1697 if (!acpi_desc) 1698 return ERR_PTR(-ENOMEM); 1699 1700 dev_set_drvdata(dev, acpi_desc); 1701 acpi_desc->dev = dev; 1702 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 1703 nd_desc = &acpi_desc->nd_desc; 1704 nd_desc->provider_name = "ACPI.NFIT"; 1705 nd_desc->ndctl = acpi_nfit_ctl; 1706 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1707 1708 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc); 1709 if (!acpi_desc->nvdimm_bus) { 1710 devm_kfree(dev, acpi_desc); 1711 return ERR_PTR(-ENXIO); 1712 } 1713 1714 INIT_LIST_HEAD(&acpi_desc->spa_maps); 1715 INIT_LIST_HEAD(&acpi_desc->spas); 1716 INIT_LIST_HEAD(&acpi_desc->dcrs); 1717 INIT_LIST_HEAD(&acpi_desc->bdws); 1718 INIT_LIST_HEAD(&acpi_desc->idts); 1719 INIT_LIST_HEAD(&acpi_desc->flushes); 1720 INIT_LIST_HEAD(&acpi_desc->memdevs); 1721 INIT_LIST_HEAD(&acpi_desc->dimms); 1722 mutex_init(&acpi_desc->spa_map_mutex); 1723 mutex_init(&acpi_desc->init_mutex); 1724 1725 return acpi_desc; 1726 } 1727 1728 static int acpi_nfit_add(struct acpi_device *adev) 1729 { 1730 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1731 struct acpi_nfit_desc *acpi_desc; 1732 struct device *dev = &adev->dev; 1733 struct acpi_table_header *tbl; 1734 acpi_status status = AE_OK; 1735 acpi_size sz; 1736 int rc; 1737 1738 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); 1739 if (ACPI_FAILURE(status)) { 1740 /* This is ok, we could have an nvdimm hotplugged later */ 1741 dev_dbg(dev, "failed to find NFIT at startup\n"); 1742 return 0; 1743 } 1744 1745 acpi_desc = acpi_nfit_desc_init(adev); 1746 if (IS_ERR(acpi_desc)) { 1747 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1748 __func__, PTR_ERR(acpi_desc)); 1749 return PTR_ERR(acpi_desc); 1750 } 1751 1752 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1753 1754 /* Evaluate _FIT and override with that if present */ 1755 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1756 if (ACPI_SUCCESS(status) && buf.length > 0) { 1757 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1758 sz = buf.length; 1759 } 1760 1761 rc = acpi_nfit_init(acpi_desc, sz); 1762 if (rc) { 1763 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1764 return rc; 1765 } 1766 return 0; 1767 } 1768 1769 static int acpi_nfit_remove(struct acpi_device *adev) 1770 { 1771 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1772 1773 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1774 return 0; 1775 } 1776 1777 static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 1778 { 1779 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1780 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1781 struct acpi_table_nfit *nfit_saved; 1782 struct device *dev = &adev->dev; 1783 acpi_status status; 1784 int ret; 1785 1786 dev_dbg(dev, "%s: event: %d\n", __func__, event); 1787 1788 device_lock(dev); 1789 if (!dev->driver) { 1790 /* dev->driver may be null if we're being removed */ 1791 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1792 return; 1793 } 1794 1795 if (!acpi_desc) { 1796 acpi_desc = acpi_nfit_desc_init(adev); 1797 if (IS_ERR(acpi_desc)) { 1798 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1799 __func__, PTR_ERR(acpi_desc)); 1800 goto out_unlock; 1801 } 1802 } 1803 1804 /* Evaluate _FIT */ 1805 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1806 if (ACPI_FAILURE(status)) { 1807 dev_err(dev, "failed to evaluate _FIT\n"); 1808 goto out_unlock; 1809 } 1810 1811 nfit_saved = acpi_desc->nfit; 1812 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1813 ret = acpi_nfit_init(acpi_desc, buf.length); 1814 if (!ret) { 1815 /* Merge failed, restore old nfit, and exit */ 1816 acpi_desc->nfit = nfit_saved; 1817 dev_err(dev, "failed to merge updated NFIT\n"); 1818 } 1819 kfree(buf.pointer); 1820 1821 out_unlock: 1822 device_unlock(dev); 1823 } 1824 1825 static const struct acpi_device_id acpi_nfit_ids[] = { 1826 { "ACPI0012", 0 }, 1827 { "", 0 }, 1828 }; 1829 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 1830 1831 static struct acpi_driver acpi_nfit_driver = { 1832 .name = KBUILD_MODNAME, 1833 .ids = acpi_nfit_ids, 1834 .ops = { 1835 .add = acpi_nfit_add, 1836 .remove = acpi_nfit_remove, 1837 .notify = acpi_nfit_notify, 1838 }, 1839 }; 1840 1841 static __init int nfit_init(void) 1842 { 1843 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 1844 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 1845 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 1846 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 1847 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 1848 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 1849 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 1850 1851 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 1852 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 1853 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 1854 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 1855 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 1856 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 1857 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 1858 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 1859 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 1860 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 1861 1862 return acpi_bus_register_driver(&acpi_nfit_driver); 1863 } 1864 1865 static __exit void nfit_exit(void) 1866 { 1867 acpi_bus_unregister_driver(&acpi_nfit_driver); 1868 } 1869 1870 module_init(nfit_init); 1871 module_exit(nfit_exit); 1872 MODULE_LICENSE("GPL v2"); 1873 MODULE_AUTHOR("Intel Corporation"); 1874 1875 1876 1877 1878 1879 /* LDV_COMMENT_BEGIN_MAIN */ 1880 #ifdef LDV_MAIN0_sequence_infinite_withcheck_stateful 1881 1882 /*###########################################################################*/ 1883 1884 /*############## Driver Environment Generator 0.2 output ####################*/ 1885 1886 /*###########################################################################*/ 1887 1888 1889 1890 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test if all kernel resources are correctly released by driver before driver will be unloaded. */ 1891 void ldv_check_final_state(void); 1892 1893 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result. */ 1894 void ldv_check_return_value(int res); 1895 1896 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Test correct return result of probe() function. */ 1897 void ldv_check_return_value_probe(int res); 1898 1899 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Initializes the model. */ 1900 void ldv_initialize(void); 1901 1902 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Reinitializes the model between distinct model function calls. */ 1903 void ldv_handler_precall(void); 1904 1905 /* LDV_COMMENT_FUNCTION_DECLARE_LDV Special function for LDV verifier. Returns arbitrary interger value. */ 1906 int nondet_int(void); 1907 1908 /* LDV_COMMENT_VAR_DECLARE_LDV Special variable for LDV verifier. */ 1909 int LDV_IN_INTERRUPT; 1910 1911 /* LDV_COMMENT_FUNCTION_MAIN Main function for LDV verifier. */ 1912 void ldv_main0_sequence_infinite_withcheck_stateful(void) { 1913 1914 1915 1916 /* LDV_COMMENT_BEGIN_VARIABLE_DECLARATION_PART */ 1917 /*============================= VARIABLE DECLARATION PART =============================*/ 1918 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 1919 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 1920 /* LDV_COMMENT_END_PREP */ 1921 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1922 struct kobject * var_group1; 1923 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1924 struct attribute * var_group2; 1925 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_dimm_attr_visible" */ 1926 int var_acpi_nfit_dimm_attr_visible_29_p2; 1927 1928 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 1929 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 1930 /* LDV_COMMENT_END_PREP */ 1931 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_add" */ 1932 struct acpi_device * var_group3; 1933 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 1934 /* LDV_COMMENT_END_PREP */ 1935 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 1936 /* LDV_COMMENT_END_PREP */ 1937 /* LDV_COMMENT_VAR_DECLARE Variable declaration for function "acpi_nfit_notify" */ 1938 u32 var_acpi_nfit_notify_62_p1; 1939 1940 1941 1942 1943 /* LDV_COMMENT_END_VARIABLE_DECLARATION_PART */ 1944 /* LDV_COMMENT_BEGIN_VARIABLE_INITIALIZING_PART */ 1945 /*============================= VARIABLE INITIALIZING PART =============================*/ 1946 LDV_IN_INTERRUPT=1; 1947 1948 1949 1950 1951 /* LDV_COMMENT_END_VARIABLE_INITIALIZING_PART */ 1952 /* LDV_COMMENT_BEGIN_FUNCTION_CALL_SECTION */ 1953 /*============================= FUNCTION CALL SECTION =============================*/ 1954 /* LDV_COMMENT_FUNCTION_CALL Initialize LDV model. */ 1955 ldv_initialize(); 1956 1957 /** INIT: init_type: ST_MODULE_INIT **/ 1958 /* content: static __init int nfit_init(void)*/ 1959 /* LDV_COMMENT_END_PREP */ 1960 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver init function after driver loading to kernel. This function declared as "MODULE_INIT(function name)". */ 1961 ldv_handler_precall(); 1962 if(nfit_init()) 1963 goto ldv_final; 1964 1965 1966 int ldv_s_acpi_nfit_driver_acpi_driver = 0; 1967 1968 1969 1970 while( nondet_int() 1971 || !(ldv_s_acpi_nfit_driver_acpi_driver == 0) 1972 ) { 1973 1974 switch(nondet_int()) { 1975 1976 case 0: { 1977 1978 /** STRUCT: struct type: attribute_group, struct name: acpi_nfit_dimm_attribute_group **/ 1979 1980 1981 /* content: static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n)*/ 1982 /* LDV_COMMENT_END_PREP */ 1983 /* LDV_COMMENT_FUNCTION_CALL Function from field "is_visible" from driver structure with callbacks "acpi_nfit_dimm_attribute_group" */ 1984 ldv_handler_precall(); 1985 acpi_nfit_dimm_attr_visible( var_group1, var_group2, var_acpi_nfit_dimm_attr_visible_29_p2); 1986 1987 1988 1989 1990 } 1991 1992 break; 1993 case 1: { 1994 1995 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 1996 if(ldv_s_acpi_nfit_driver_acpi_driver==0) { 1997 1998 /* content: static int acpi_nfit_remove(struct acpi_device *adev)*/ 1999 /* LDV_COMMENT_END_PREP */ 2000 /* LDV_COMMENT_FUNCTION_CALL Function from field "remove" from driver structure with callbacks "acpi_nfit_driver" */ 2001 ldv_handler_precall(); 2002 acpi_nfit_remove( var_group3); 2003 ldv_s_acpi_nfit_driver_acpi_driver=0; 2004 2005 } 2006 2007 } 2008 2009 break; 2010 case 2: { 2011 2012 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2013 2014 2015 /* content: static int acpi_nfit_add(struct acpi_device *adev)*/ 2016 /* LDV_COMMENT_END_PREP */ 2017 /* LDV_COMMENT_FUNCTION_CALL Function from field "add" from driver structure with callbacks "acpi_nfit_driver" */ 2018 ldv_handler_precall(); 2019 acpi_nfit_add( var_group3); 2020 2021 2022 2023 2024 } 2025 2026 break; 2027 case 3: { 2028 2029 /** STRUCT: struct type: acpi_driver, struct name: acpi_nfit_driver **/ 2030 2031 2032 /* content: static void acpi_nfit_notify(struct acpi_device *adev, u32 event)*/ 2033 /* LDV_COMMENT_END_PREP */ 2034 /* LDV_COMMENT_FUNCTION_CALL Function from field "notify" from driver structure with callbacks "acpi_nfit_driver" */ 2035 ldv_handler_precall(); 2036 acpi_nfit_notify( var_group3, var_acpi_nfit_notify_62_p1); 2037 2038 2039 2040 2041 } 2042 2043 break; 2044 default: break; 2045 2046 } 2047 2048 } 2049 2050 ldv_module_exit: 2051 2052 /** INIT: init_type: ST_MODULE_EXIT **/ 2053 /* content: static __exit void nfit_exit(void)*/ 2054 /* LDV_COMMENT_END_PREP */ 2055 /* LDV_COMMENT_FUNCTION_CALL Kernel calls driver release function before driver will be uploaded from kernel. This function declared as "MODULE_EXIT(function name)". */ 2056 ldv_handler_precall(); 2057 nfit_exit(); 2058 2059 /* LDV_COMMENT_FUNCTION_CALL Checks that all resources and locks are correctly released before the driver will be unloaded. */ 2060 ldv_final: ldv_check_final_state(); 2061 2062 /* LDV_COMMENT_END_FUNCTION_CALL_SECTION */ 2063 return; 2064 2065 } 2066 #endif 2067 2068 /* LDV_COMMENT_END_MAIN */

Here is an explanation of a rule violation arisen while checking your driver against a corresponding kernel.

Note that it may be false positive, i.e. there isn't a real error indeed. Please analyze a given error trace and related source code to understand whether there is an error in your driver.

Error trace column contains a path on which the given rule is violated. You can expand/collapse some entity classes by clicking on corresponding checkboxes in a main menu or in an advanced Others menu. Also you can expand/collapse each particular entity by clicking on +/-. In hovering on some entities you can see some tips. Also the error trace is bound with related source code. Line numbers may be shown as links on the left. You can click on them to open corresponding lines in source code.

Source code column contains a content of files related with the error trace. There is source code of your driver (note that there are some LDV modifications at the end), kernel headers and rule model. Tabs show a currently opened file and other available files. In hovering on them you can see full file names. On clicking a corresponding file content will be shown.

Ядро Модуль Правило Верификатор Вердикт Статус Время создания Описание проблемы
linux-4.4-rc1.tar.xz drivers/acpi/nfit.ko 32_7a CPAchecker Bug Fixed 2015-12-11 23:29:51 L0212

Комментарий

Reported: 11 Dec 2015

[В начало]