]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/edac/i7core_edac.c
Net: ceph: Makefile: remove deprecated kbuild goal definitions
[net-next-2.6.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports yhe memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
a0c36a1f
MCC
34#include <linux/edac.h>
35#include <linux/mmzone.h>
d5381642 36#include <linux/edac_mce.h>
f4742949 37#include <linux/smp.h>
14d2c083 38#include <asm/processor.h>
a0c36a1f
MCC
39
40#include "edac_core.h"
41
18c29002
MCC
42/* Static vars */
43static LIST_HEAD(i7core_edac_list);
44static DEFINE_MUTEX(i7core_edac_lock);
45static int probed;
46
54a08ab1
MCC
47static int use_pci_fixup;
48module_param(use_pci_fixup, int, 0444);
49MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
50/*
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
55 */
56#define MAX_SOCKET_BUSES 2
57
58
a0c36a1f
MCC
59/*
60 * Alter this version for the module when modifications are made
61 */
62#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
63#define EDAC_MOD_STR "i7core_edac"
64
a0c36a1f
MCC
65/*
66 * Debug macros
67 */
68#define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
70
71#define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
73
74/*
75 * i7core Memory Controller Registers
76 */
77
e9bd2e73
MCC
78 /* OFFSETS for Device 0 Function 0 */
79
80#define MC_CFG_CONTROL 0x90
81
a0c36a1f
MCC
82 /* OFFSETS for Device 3 Function 0 */
83
84#define MC_CONTROL 0x48
85#define MC_STATUS 0x4c
86#define MC_MAX_DOD 0x64
87
442305b1
MCC
88/*
89 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
90 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
91 */
92
93#define MC_TEST_ERR_RCV1 0x60
94 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
95
96#define MC_TEST_ERR_RCV0 0x64
97 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
98 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
99
b4e8f0b6
MCC
100/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
101#define MC_COR_ECC_CNT_0 0x80
102#define MC_COR_ECC_CNT_1 0x84
103#define MC_COR_ECC_CNT_2 0x88
104#define MC_COR_ECC_CNT_3 0x8c
105#define MC_COR_ECC_CNT_4 0x90
106#define MC_COR_ECC_CNT_5 0x94
107
108#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
109#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
110
111
a0c36a1f
MCC
112 /* OFFSETS for Devices 4,5 and 6 Function 0 */
113
0b2b7b7e
MCC
114#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
115 #define THREE_DIMMS_PRESENT (1 << 24)
116 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
117 #define QUAD_RANK_PRESENT (1 << 22)
118 #define REGISTERED_DIMM (1 << 15)
119
f122a892
MCC
120#define MC_CHANNEL_MAPPER 0x60
121 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
122 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
123
0b2b7b7e
MCC
124#define MC_CHANNEL_RANK_PRESENT 0x7c
125 #define RANK_PRESENT_MASK 0xffff
126
a0c36a1f 127#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
128#define MC_CHANNEL_ERROR_MASK 0xf8
129#define MC_CHANNEL_ERROR_INJECT 0xfc
130 #define INJECT_ADDR_PARITY 0x10
131 #define INJECT_ECC 0x08
132 #define MASK_CACHELINE 0x06
133 #define MASK_FULL_CACHELINE 0x06
134 #define MASK_MSB32_CACHELINE 0x04
135 #define MASK_LSB32_CACHELINE 0x02
136 #define NO_MASK_CACHELINE 0x00
137 #define REPEAT_EN 0x01
a0c36a1f 138
0b2b7b7e 139 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 140
0b2b7b7e
MCC
141#define MC_DOD_CH_DIMM0 0x48
142#define MC_DOD_CH_DIMM1 0x4c
143#define MC_DOD_CH_DIMM2 0x50
144 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
145 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
146 #define DIMM_PRESENT_MASK (1 << 9)
147 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
148 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
149 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
150 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
151 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 152 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 153 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
154 #define MC_DOD_NUMCOL_MASK 3
155 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 156
f122a892
MCC
157#define MC_RANK_PRESENT 0x7c
158
0b2b7b7e
MCC
159#define MC_SAG_CH_0 0x80
160#define MC_SAG_CH_1 0x84
161#define MC_SAG_CH_2 0x88
162#define MC_SAG_CH_3 0x8c
163#define MC_SAG_CH_4 0x90
164#define MC_SAG_CH_5 0x94
165#define MC_SAG_CH_6 0x98
166#define MC_SAG_CH_7 0x9c
167
168#define MC_RIR_LIMIT_CH_0 0x40
169#define MC_RIR_LIMIT_CH_1 0x44
170#define MC_RIR_LIMIT_CH_2 0x48
171#define MC_RIR_LIMIT_CH_3 0x4C
172#define MC_RIR_LIMIT_CH_4 0x50
173#define MC_RIR_LIMIT_CH_5 0x54
174#define MC_RIR_LIMIT_CH_6 0x58
175#define MC_RIR_LIMIT_CH_7 0x5C
176#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
177
178#define MC_RIR_WAY_CH 0x80
179 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
180 #define MC_RIR_WAY_RANK_MASK 0x7
181
a0c36a1f
MCC
182/*
183 * i7core structs
184 */
185
186#define NUM_CHANS 3
442305b1
MCC
187#define MAX_DIMMS 3 /* Max DIMMS per channel */
188#define MAX_MCR_FUNC 4
189#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
190
191struct i7core_info {
192 u32 mc_control;
193 u32 mc_status;
194 u32 max_dod;
f122a892 195 u32 ch_map;
a0c36a1f
MCC
196};
197
194a40fe
MCC
198
199struct i7core_inject {
200 int enable;
201
202 u32 section;
203 u32 type;
204 u32 eccmask;
205
206 /* Error address mask */
207 int channel, dimm, rank, bank, page, col;
208};
209
0b2b7b7e 210struct i7core_channel {
442305b1
MCC
211 u32 ranks;
212 u32 dimms;
0b2b7b7e
MCC
213};
214
8f331907 215struct pci_id_descr {
66607706
MCC
216 int dev;
217 int func;
218 int dev_id;
de06eeef 219 int optional;
8f331907
MCC
220};
221
bd9e19ca 222struct pci_id_table {
1288c18f
MCC
223 const struct pci_id_descr *descr;
224 int n_devs;
bd9e19ca
VM
225};
226
f4742949
MCC
227struct i7core_dev {
228 struct list_head list;
229 u8 socket;
230 struct pci_dev **pdev;
de06eeef 231 int n_devs;
f4742949
MCC
232 struct mem_ctl_info *mci;
233};
234
a0c36a1f 235struct i7core_pvt {
f4742949
MCC
236 struct pci_dev *pci_noncore;
237 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
238 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
239
240 struct i7core_dev *i7core_dev;
67166af4 241
a0c36a1f 242 struct i7core_info info;
194a40fe 243 struct i7core_inject inject;
f4742949 244 struct i7core_channel channel[NUM_CHANS];
67166af4 245
f4742949
MCC
246 int ce_count_available;
247 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
248
249 /* ECC corrected errors counts per udimm */
f4742949
MCC
250 unsigned long udimm_ce_count[MAX_DIMMS];
251 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 252 /* ECC corrected errors counts per rdimm */
f4742949
MCC
253 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
254 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 255
f4742949 256 unsigned int is_registered;
14d2c083 257
d5381642
MCC
258 /* mcelog glue */
259 struct edac_mce edac_mce;
ca9c90ba
MCC
260
261 /* Fifo double buffers */
d5381642 262 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
263 struct mce mce_outentry[MCE_LOG_LEN];
264
265 /* Fifo in/out counters */
266 unsigned mce_in, mce_out;
267
268 /* Count indicator to show errors not got */
269 unsigned mce_overrun;
939747bd
MCC
270
271 /* Struct to control EDAC polling */
272 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
273};
274
8f331907
MCC
275#define PCI_DESCR(device, function, device_id) \
276 .dev = (device), \
277 .func = (function), \
278 .dev_id = (device_id)
279
1288c18f 280static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
281 /* Memory controller */
282 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
283 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
79daef20
MCC
284
285 /* Exists only for RDIMM */
de06eeef 286 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
287 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
288
289 /* Channel 0 */
290 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
291 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
292 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
293 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
294
295 /* Channel 1 */
296 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
297 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
298 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
299 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
300
301 /* Channel 2 */
302 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
303 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
304 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
305 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
a0c36a1f 306};
8f331907 307
1288c18f 308static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
309 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
310 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
311 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
312
313 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
314 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
315 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
316 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
317
508fa179
MCC
318 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
319 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
320 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
321 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
52a2e4fc
MCC
322};
323
1288c18f 324static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
325 /* Memory controller */
326 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
327 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
328 /* Exists only for RDIMM */
329 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
330 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
331
332 /* Channel 0 */
333 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
334 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
335 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
336 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
337
338 /* Channel 1 */
339 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
340 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
341 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
342 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
343
344 /* Channel 2 */
345 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
346 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
347 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
348 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
bd9e19ca
VM
349};
350
1288c18f
MCC
351#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
352static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
353 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
354 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
355 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
3c52cc57 356 {0,} /* 0 terminated list. */
bd9e19ca
VM
357};
358
8f331907
MCC
359/*
360 * pci_device_id table for which devices we are looking for
8f331907
MCC
361 */
362static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
d1fd4fb6 363 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 364 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
365 {0,} /* 0 terminated list. */
366};
367
a0c36a1f
MCC
368/****************************************************************************
369 Anciliary status routines
370 ****************************************************************************/
371
372 /* MC_CONTROL bits */
ef708b53
MCC
373#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
374#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
375
376 /* MC_STATUS bits */
61053fde 377#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 378#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
379
380 /* MC_MAX_DOD read functions */
854d3349 381static inline int numdimms(u32 dimms)
a0c36a1f 382{
854d3349 383 return (dimms & 0x3) + 1;
a0c36a1f
MCC
384}
385
854d3349 386static inline int numrank(u32 rank)
a0c36a1f
MCC
387{
388 static int ranks[4] = { 1, 2, 4, -EINVAL };
389
854d3349 390 return ranks[rank & 0x3];
a0c36a1f
MCC
391}
392
854d3349 393static inline int numbank(u32 bank)
a0c36a1f
MCC
394{
395 static int banks[4] = { 4, 8, 16, -EINVAL };
396
854d3349 397 return banks[bank & 0x3];
a0c36a1f
MCC
398}
399
854d3349 400static inline int numrow(u32 row)
a0c36a1f
MCC
401{
402 static int rows[8] = {
403 1 << 12, 1 << 13, 1 << 14, 1 << 15,
404 1 << 16, -EINVAL, -EINVAL, -EINVAL,
405 };
406
854d3349 407 return rows[row & 0x7];
a0c36a1f
MCC
408}
409
854d3349 410static inline int numcol(u32 col)
a0c36a1f
MCC
411{
412 static int cols[8] = {
413 1 << 10, 1 << 11, 1 << 12, -EINVAL,
414 };
854d3349 415 return cols[col & 0x3];
a0c36a1f
MCC
416}
417
f4742949 418static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
419{
420 struct i7core_dev *i7core_dev;
421
422 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
423 if (i7core_dev->socket == socket)
424 return i7core_dev;
425 }
426
427 return NULL;
428}
429
848b2f7e
HS
430static struct i7core_dev *alloc_i7core_dev(u8 socket,
431 const struct pci_id_table *table)
432{
433 struct i7core_dev *i7core_dev;
434
435 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
436 if (!i7core_dev)
437 return NULL;
438
439 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
440 GFP_KERNEL);
441 if (!i7core_dev->pdev) {
442 kfree(i7core_dev);
443 return NULL;
444 }
445
446 i7core_dev->socket = socket;
447 i7core_dev->n_devs = table->n_devs;
448 list_add_tail(&i7core_dev->list, &i7core_edac_list);
449
450 return i7core_dev;
451}
452
2aa9be44
HS
453static void free_i7core_dev(struct i7core_dev *i7core_dev)
454{
455 list_del(&i7core_dev->list);
456 kfree(i7core_dev->pdev);
457 kfree(i7core_dev);
458}
459
a0c36a1f
MCC
460/****************************************************************************
461 Memory check routines
462 ****************************************************************************/
67166af4
MCC
463static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
464 unsigned func)
ef708b53 465{
66607706 466 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 467 int i;
ef708b53 468
66607706
MCC
469 if (!i7core_dev)
470 return NULL;
471
de06eeef 472 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 473 if (!i7core_dev->pdev[i])
ef708b53
MCC
474 continue;
475
66607706
MCC
476 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
477 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
478 return i7core_dev->pdev[i];
ef708b53
MCC
479 }
480 }
481
eb94fc40
MCC
482 return NULL;
483}
484
ec6df24c
MCC
485/**
486 * i7core_get_active_channels() - gets the number of channels and csrows
487 * @socket: Quick Path Interconnect socket
488 * @channels: Number of channels that will be returned
489 * @csrows: Number of csrows found
490 *
491 * Since EDAC core needs to know in advance the number of available channels
492 * and csrows, in order to allocate memory for csrows/channels, it is needed
493 * to run two similar steps. At the first step, implemented on this function,
494 * it checks the number of csrows/channels present at one socket.
495 * this is used in order to properly allocate the size of mci components.
496 *
497 * It should be noticed that none of the current available datasheets explain
498 * or even mention how csrows are seen by the memory controller. So, we need
499 * to add a fake description for csrows.
500 * So, this driver is attributing one DIMM memory for one csrow.
501 */
1288c18f 502static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 503 unsigned *csrows)
eb94fc40
MCC
504{
505 struct pci_dev *pdev = NULL;
506 int i, j;
507 u32 status, control;
508
509 *channels = 0;
510 *csrows = 0;
511
67166af4 512 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 513 if (!pdev) {
67166af4
MCC
514 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
515 socket);
ef708b53 516 return -ENODEV;
b7c76151 517 }
ef708b53
MCC
518
519 /* Device 3 function 0 reads */
520 pci_read_config_dword(pdev, MC_STATUS, &status);
521 pci_read_config_dword(pdev, MC_CONTROL, &control);
522
523 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 524 u32 dimm_dod[3];
ef708b53
MCC
525 /* Check if the channel is active */
526 if (!(control & (1 << (8 + i))))
527 continue;
528
529 /* Check if the channel is disabled */
41fcb7fe 530 if (status & (1 << i))
ef708b53 531 continue;
ef708b53 532
67166af4 533 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 534 if (!pdev) {
67166af4
MCC
535 i7core_printk(KERN_ERR, "Couldn't find socket %d "
536 "fn %d.%d!!!\n",
537 socket, i + 4, 1);
eb94fc40
MCC
538 return -ENODEV;
539 }
540 /* Devices 4-6 function 1 */
541 pci_read_config_dword(pdev,
542 MC_DOD_CH_DIMM0, &dimm_dod[0]);
543 pci_read_config_dword(pdev,
544 MC_DOD_CH_DIMM1, &dimm_dod[1]);
545 pci_read_config_dword(pdev,
546 MC_DOD_CH_DIMM2, &dimm_dod[2]);
547
ef708b53 548 (*channels)++;
eb94fc40
MCC
549
550 for (j = 0; j < 3; j++) {
551 if (!DIMM_PRESENT(dimm_dod[j]))
552 continue;
553 (*csrows)++;
554 }
ef708b53
MCC
555 }
556
c77720b9 557 debugf0("Number of active channels on socket %d: %d\n",
67166af4 558 socket, *channels);
1c6fed80 559
ef708b53
MCC
560 return 0;
561}
562
2e5185f7 563static int get_dimm_config(const struct mem_ctl_info *mci)
a0c36a1f
MCC
564{
565 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 566 struct csrow_info *csr;
854d3349 567 struct pci_dev *pdev;
ba6c5c62 568 int i, j;
2e5185f7 569 int csrow = 0;
5566cb7c 570 unsigned long last_page = 0;
1c6fed80 571 enum edac_type mode;
854d3349 572 enum mem_type mtype;
a0c36a1f 573
854d3349 574 /* Get data from the MC register, function 0 */
f4742949 575 pdev = pvt->pci_mcr[0];
7dd6953c 576 if (!pdev)
8f331907
MCC
577 return -ENODEV;
578
f122a892 579 /* Device 3 function 0 reads */
7dd6953c
MCC
580 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
581 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
582 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
583 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 584
17cb7b0c 585 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 586 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 587 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 588
1c6fed80 589 if (ECC_ENABLED(pvt)) {
41fcb7fe 590 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
591 if (ECCx8(pvt))
592 mode = EDAC_S8ECD8ED;
593 else
594 mode = EDAC_S4ECD4ED;
595 } else {
a0c36a1f 596 debugf0("ECC disabled\n");
1c6fed80
MCC
597 mode = EDAC_NONE;
598 }
a0c36a1f
MCC
599
600 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
601 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
602 "x%x x 0x%x\n",
854d3349
MCC
603 numdimms(pvt->info.max_dod),
604 numrank(pvt->info.max_dod >> 2),
276b824c 605 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
606 numrow(pvt->info.max_dod >> 6),
607 numcol(pvt->info.max_dod >> 9));
a0c36a1f 608
0b2b7b7e 609 for (i = 0; i < NUM_CHANS; i++) {
854d3349 610 u32 data, dimm_dod[3], value[8];
0b2b7b7e 611
52a2e4fc
MCC
612 if (!pvt->pci_ch[i][0])
613 continue;
614
0b2b7b7e
MCC
615 if (!CH_ACTIVE(pvt, i)) {
616 debugf0("Channel %i is not active\n", i);
617 continue;
618 }
619 if (CH_DISABLED(pvt, i)) {
620 debugf0("Channel %i is disabled\n", i);
621 continue;
622 }
623
f122a892 624 /* Devices 4-6 function 0 */
f4742949 625 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
626 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
627
f4742949 628 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 629 4 : 2;
0b2b7b7e 630
854d3349
MCC
631 if (data & REGISTERED_DIMM)
632 mtype = MEM_RDDR3;
14d2c083 633 else
854d3349
MCC
634 mtype = MEM_DDR3;
635#if 0
0b2b7b7e
MCC
636 if (data & THREE_DIMMS_PRESENT)
637 pvt->channel[i].dimms = 3;
638 else if (data & SINGLE_QUAD_RANK_PRESENT)
639 pvt->channel[i].dimms = 1;
640 else
641 pvt->channel[i].dimms = 2;
854d3349
MCC
642#endif
643
644 /* Devices 4-6 function 1 */
f4742949 645 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 646 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 647 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 648 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 649 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 650 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 651
1c6fed80 652 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 653 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
654 i,
655 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
656 data,
f4742949 657 pvt->channel[i].ranks,
41fcb7fe 658 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
659
660 for (j = 0; j < 3; j++) {
661 u32 banks, ranks, rows, cols;
5566cb7c 662 u32 size, npages;
854d3349
MCC
663
664 if (!DIMM_PRESENT(dimm_dod[j]))
665 continue;
666
667 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
668 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
669 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
670 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
671
5566cb7c
MCC
672 /* DDR3 has 8 I/O banks */
673 size = (rows * cols * banks * ranks) >> (20 - 3);
674
f4742949 675 pvt->channel[i].dimms++;
854d3349 676
17cb7b0c
MCC
677 debugf0("\tdimm %d %d Mb offset: %x, "
678 "bank: %d, rank: %d, row: %#x, col: %#x\n",
679 j, size,
854d3349
MCC
680 RANKOFFSET(dimm_dod[j]),
681 banks, ranks, rows, cols);
682
e9144601 683 npages = MiB_TO_PAGES(size);
5566cb7c 684
2e5185f7 685 csr = &mci->csrows[csrow];
5566cb7c
MCC
686 csr->first_page = last_page + 1;
687 last_page += npages;
688 csr->last_page = last_page;
689 csr->nr_pages = npages;
690
854d3349 691 csr->page_mask = 0;
eb94fc40 692 csr->grain = 8;
2e5185f7 693 csr->csrow_idx = csrow;
eb94fc40
MCC
694 csr->nr_channels = 1;
695
696 csr->channels[0].chan_idx = i;
697 csr->channels[0].ce_count = 0;
854d3349 698
2e5185f7 699 pvt->csrow_map[i][j] = csrow;
b4e8f0b6 700
854d3349
MCC
701 switch (banks) {
702 case 4:
703 csr->dtype = DEV_X4;
704 break;
705 case 8:
706 csr->dtype = DEV_X8;
707 break;
708 case 16:
709 csr->dtype = DEV_X16;
710 break;
711 default:
712 csr->dtype = DEV_UNKNOWN;
713 }
714
715 csr->edac_mode = mode;
716 csr->mtype = mtype;
717
2e5185f7 718 csrow++;
854d3349 719 }
1c6fed80 720
854d3349
MCC
721 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
722 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
723 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
724 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
725 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
726 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
727 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
728 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 729 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 730 for (j = 0; j < 8; j++)
17cb7b0c 731 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
732 (value[j] >> 27) & 0x1,
733 (value[j] >> 24) & 0x7,
734 (value[j] && ((1 << 24) - 1)));
0b2b7b7e
MCC
735 }
736
a0c36a1f
MCC
737 return 0;
738}
739
194a40fe
MCC
740/****************************************************************************
741 Error insertion routines
742 ****************************************************************************/
743
744/* The i7core has independent error injection features per channel.
745 However, to have a simpler code, we don't allow enabling error injection
746 on more than one channel.
747 Also, since a change at an inject parameter will be applied only at enable,
748 we're disabling error injection on all write calls to the sysfs nodes that
749 controls the error code injection.
750 */
1288c18f 751static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
752{
753 struct i7core_pvt *pvt = mci->pvt_info;
754
755 pvt->inject.enable = 0;
756
f4742949 757 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
758 return -ENODEV;
759
f4742949 760 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 761 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
762
763 return 0;
194a40fe
MCC
764}
765
766/*
767 * i7core inject inject.section
768 *
769 * accept and store error injection inject.section value
770 * bit 0 - refers to the lower 32-byte half cacheline
771 * bit 1 - refers to the upper 32-byte half cacheline
772 */
773static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
774 const char *data, size_t count)
775{
776 struct i7core_pvt *pvt = mci->pvt_info;
777 unsigned long value;
778 int rc;
779
780 if (pvt->inject.enable)
41fcb7fe 781 disable_inject(mci);
194a40fe
MCC
782
783 rc = strict_strtoul(data, 10, &value);
784 if ((rc < 0) || (value > 3))
2068def5 785 return -EIO;
194a40fe
MCC
786
787 pvt->inject.section = (u32) value;
788 return count;
789}
790
791static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
792 char *data)
793{
794 struct i7core_pvt *pvt = mci->pvt_info;
795 return sprintf(data, "0x%08x\n", pvt->inject.section);
796}
797
798/*
799 * i7core inject.type
800 *
801 * accept and store error injection inject.section value
802 * bit 0 - repeat enable - Enable error repetition
803 * bit 1 - inject ECC error
804 * bit 2 - inject parity error
805 */
806static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
807 const char *data, size_t count)
808{
809 struct i7core_pvt *pvt = mci->pvt_info;
810 unsigned long value;
811 int rc;
812
813 if (pvt->inject.enable)
41fcb7fe 814 disable_inject(mci);
194a40fe
MCC
815
816 rc = strict_strtoul(data, 10, &value);
817 if ((rc < 0) || (value > 7))
2068def5 818 return -EIO;
194a40fe
MCC
819
820 pvt->inject.type = (u32) value;
821 return count;
822}
823
824static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
825 char *data)
826{
827 struct i7core_pvt *pvt = mci->pvt_info;
828 return sprintf(data, "0x%08x\n", pvt->inject.type);
829}
830
831/*
832 * i7core_inject_inject.eccmask_store
833 *
834 * The type of error (UE/CE) will depend on the inject.eccmask value:
835 * Any bits set to a 1 will flip the corresponding ECC bit
836 * Correctable errors can be injected by flipping 1 bit or the bits within
837 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
838 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
839 * uncorrectable error to be injected.
840 */
841static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
842 const char *data, size_t count)
843{
844 struct i7core_pvt *pvt = mci->pvt_info;
845 unsigned long value;
846 int rc;
847
848 if (pvt->inject.enable)
41fcb7fe 849 disable_inject(mci);
194a40fe
MCC
850
851 rc = strict_strtoul(data, 10, &value);
852 if (rc < 0)
2068def5 853 return -EIO;
194a40fe
MCC
854
855 pvt->inject.eccmask = (u32) value;
856 return count;
857}
858
859static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
860 char *data)
861{
862 struct i7core_pvt *pvt = mci->pvt_info;
863 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
864}
865
866/*
867 * i7core_addrmatch
868 *
869 * The type of error (UE/CE) will depend on the inject.eccmask value:
870 * Any bits set to a 1 will flip the corresponding ECC bit
871 * Correctable errors can be injected by flipping 1 bit or the bits within
872 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
873 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
874 * uncorrectable error to be injected.
875 */
194a40fe 876
a5538e53
MCC
877#define DECLARE_ADDR_MATCH(param, limit) \
878static ssize_t i7core_inject_store_##param( \
879 struct mem_ctl_info *mci, \
880 const char *data, size_t count) \
881{ \
cc301b3a 882 struct i7core_pvt *pvt; \
a5538e53
MCC
883 long value; \
884 int rc; \
885 \
cc301b3a
MCC
886 debugf1("%s()\n", __func__); \
887 pvt = mci->pvt_info; \
888 \
a5538e53
MCC
889 if (pvt->inject.enable) \
890 disable_inject(mci); \
891 \
4f87fad1 892 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
893 value = -1; \
894 else { \
895 rc = strict_strtoul(data, 10, &value); \
896 if ((rc < 0) || (value >= limit)) \
897 return -EIO; \
898 } \
899 \
900 pvt->inject.param = value; \
901 \
902 return count; \
903} \
904 \
905static ssize_t i7core_inject_show_##param( \
906 struct mem_ctl_info *mci, \
907 char *data) \
908{ \
cc301b3a
MCC
909 struct i7core_pvt *pvt; \
910 \
911 pvt = mci->pvt_info; \
912 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
913 if (pvt->inject.param < 0) \
914 return sprintf(data, "any\n"); \
915 else \
916 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
917}
918
a5538e53
MCC
919#define ATTR_ADDR_MATCH(param) \
920 { \
921 .attr = { \
922 .name = #param, \
923 .mode = (S_IRUGO | S_IWUSR) \
924 }, \
925 .show = i7core_inject_show_##param, \
926 .store = i7core_inject_store_##param, \
927 }
194a40fe 928
a5538e53
MCC
929DECLARE_ADDR_MATCH(channel, 3);
930DECLARE_ADDR_MATCH(dimm, 3);
931DECLARE_ADDR_MATCH(rank, 4);
932DECLARE_ADDR_MATCH(bank, 32);
933DECLARE_ADDR_MATCH(page, 0x10000);
934DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 935
1288c18f 936static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
937{
938 u32 read;
939 int count;
940
4157d9f5
MCC
941 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
942 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
943 where, val);
944
276b824c
MCC
945 for (count = 0; count < 10; count++) {
946 if (count)
b990538a 947 msleep(100);
276b824c
MCC
948 pci_write_config_dword(dev, where, val);
949 pci_read_config_dword(dev, where, &read);
950
951 if (read == val)
952 return 0;
953 }
954
4157d9f5
MCC
955 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
956 "write=%08x. Read=%08x\n",
957 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
958 where, val, read);
276b824c
MCC
959
960 return -EINVAL;
961}
962
194a40fe
MCC
963/*
964 * This routine prepares the Memory Controller for error injection.
965 * The error will be injected when some process tries to write to the
966 * memory that matches the given criteria.
967 * The criteria can be set in terms of a mask where dimm, rank, bank, page
968 * and col can be specified.
969 * A -1 value for any of the mask items will make the MCU to ignore
970 * that matching criteria for error injection.
971 *
972 * It should be noticed that the error will only happen after a write operation
973 * on a memory that matches the condition. if REPEAT_EN is not enabled at
974 * inject mask, then it will produce just one error. Otherwise, it will repeat
975 * until the injectmask would be cleaned.
976 *
977 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
978 * is reliable enough to check if the MC is using the
979 * three channels. However, this is not clear at the datasheet.
980 */
981static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
982 const char *data, size_t count)
983{
984 struct i7core_pvt *pvt = mci->pvt_info;
985 u32 injectmask;
986 u64 mask = 0;
987 int rc;
988 long enable;
989
f4742949 990 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
991 return 0;
992
194a40fe
MCC
993 rc = strict_strtoul(data, 10, &enable);
994 if ((rc < 0))
995 return 0;
996
997 if (enable) {
998 pvt->inject.enable = 1;
999 } else {
1000 disable_inject(mci);
1001 return count;
1002 }
1003
1004 /* Sets pvt->inject.dimm mask */
1005 if (pvt->inject.dimm < 0)
486dd09f 1006 mask |= 1LL << 41;
194a40fe 1007 else {
f4742949 1008 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1009 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1010 else
486dd09f 1011 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1012 }
1013
1014 /* Sets pvt->inject.rank mask */
1015 if (pvt->inject.rank < 0)
486dd09f 1016 mask |= 1LL << 40;
194a40fe 1017 else {
f4742949 1018 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1019 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1020 else
486dd09f 1021 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1022 }
1023
1024 /* Sets pvt->inject.bank mask */
1025 if (pvt->inject.bank < 0)
486dd09f 1026 mask |= 1LL << 39;
194a40fe 1027 else
486dd09f 1028 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1029
1030 /* Sets pvt->inject.page mask */
1031 if (pvt->inject.page < 0)
486dd09f 1032 mask |= 1LL << 38;
194a40fe 1033 else
486dd09f 1034 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1035
1036 /* Sets pvt->inject.column mask */
1037 if (pvt->inject.col < 0)
486dd09f 1038 mask |= 1LL << 37;
194a40fe 1039 else
486dd09f 1040 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1041
276b824c
MCC
1042 /*
1043 * bit 0: REPEAT_EN
1044 * bits 1-2: MASK_HALF_CACHELINE
1045 * bit 3: INJECT_ECC
1046 * bit 4: INJECT_ADDR_PARITY
1047 */
1048
1049 injectmask = (pvt->inject.type & 1) |
1050 (pvt->inject.section & 0x3) << 1 |
1051 (pvt->inject.type & 0x6) << (3 - 1);
1052
1053 /* Unlock writes to registers - this register is write only */
f4742949 1054 pci_write_config_dword(pvt->pci_noncore,
67166af4 1055 MC_CFG_CONTROL, 0x2);
e9bd2e73 1056
f4742949 1057 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1058 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1059 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1060 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1061
f4742949 1062 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1063 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1064
f4742949 1065 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1066 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1067
194a40fe 1068 /*
276b824c
MCC
1069 * This is something undocumented, based on my tests
1070 * Without writing 8 to this register, errors aren't injected. Not sure
1071 * why.
194a40fe 1072 */
f4742949 1073 pci_write_config_dword(pvt->pci_noncore,
276b824c 1074 MC_CFG_CONTROL, 8);
194a40fe 1075
41fcb7fe
MCC
1076 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1077 " inject 0x%08x\n",
194a40fe
MCC
1078 mask, pvt->inject.eccmask, injectmask);
1079
7b029d03 1080
194a40fe
MCC
1081 return count;
1082}
1083
1084static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1085 char *data)
1086{
1087 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1088 u32 injectmask;
1089
52a2e4fc
MCC
1090 if (!pvt->pci_ch[pvt->inject.channel][0])
1091 return 0;
1092
f4742949 1093 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1094 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1095
1096 debugf0("Inject error read: 0x%018x\n", injectmask);
1097
1098 if (injectmask & 0x0c)
1099 pvt->inject.enable = 1;
1100
194a40fe
MCC
1101 return sprintf(data, "%d\n", pvt->inject.enable);
1102}
1103
f338d736
MCC
1104#define DECLARE_COUNTER(param) \
1105static ssize_t i7core_show_counter_##param( \
1106 struct mem_ctl_info *mci, \
1107 char *data) \
1108{ \
1109 struct i7core_pvt *pvt = mci->pvt_info; \
1110 \
1111 debugf1("%s() \n", __func__); \
1112 if (!pvt->ce_count_available || (pvt->is_registered)) \
1113 return sprintf(data, "data unavailable\n"); \
1114 return sprintf(data, "%lu\n", \
1115 pvt->udimm_ce_count[param]); \
1116}
442305b1 1117
f338d736
MCC
1118#define ATTR_COUNTER(param) \
1119 { \
1120 .attr = { \
1121 .name = __stringify(udimm##param), \
1122 .mode = (S_IRUGO | S_IWUSR) \
1123 }, \
1124 .show = i7core_show_counter_##param \
d88b8507 1125 }
442305b1 1126
f338d736
MCC
1127DECLARE_COUNTER(0);
1128DECLARE_COUNTER(1);
1129DECLARE_COUNTER(2);
442305b1 1130
194a40fe
MCC
1131/*
1132 * Sysfs struct
1133 */
a5538e53 1134
1288c18f 1135static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1136 ATTR_ADDR_MATCH(channel),
1137 ATTR_ADDR_MATCH(dimm),
1138 ATTR_ADDR_MATCH(rank),
1139 ATTR_ADDR_MATCH(bank),
1140 ATTR_ADDR_MATCH(page),
1141 ATTR_ADDR_MATCH(col),
1288c18f 1142 { } /* End of list */
a5538e53
MCC
1143};
1144
1288c18f 1145static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1146 .name = "inject_addrmatch",
1147 .mcidev_attr = i7core_addrmatch_attrs,
1148};
1149
1288c18f 1150static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1151 ATTR_COUNTER(0),
1152 ATTR_COUNTER(1),
1153 ATTR_COUNTER(2),
64aab720 1154 { .attr = { .name = NULL } }
f338d736
MCC
1155};
1156
1288c18f 1157static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1158 .name = "all_channel_counts",
1159 .mcidev_attr = i7core_udimm_counters_attrs,
1160};
1161
1288c18f 1162static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1163 {
1164 .attr = {
1165 .name = "inject_section",
1166 .mode = (S_IRUGO | S_IWUSR)
1167 },
1168 .show = i7core_inject_section_show,
1169 .store = i7core_inject_section_store,
1170 }, {
1171 .attr = {
1172 .name = "inject_type",
1173 .mode = (S_IRUGO | S_IWUSR)
1174 },
1175 .show = i7core_inject_type_show,
1176 .store = i7core_inject_type_store,
1177 }, {
1178 .attr = {
1179 .name = "inject_eccmask",
1180 .mode = (S_IRUGO | S_IWUSR)
1181 },
1182 .show = i7core_inject_eccmask_show,
1183 .store = i7core_inject_eccmask_store,
1184 }, {
a5538e53 1185 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1186 }, {
1187 .attr = {
1188 .name = "inject_enable",
1189 .mode = (S_IRUGO | S_IWUSR)
1190 },
1191 .show = i7core_inject_enable_show,
1192 .store = i7core_inject_enable_store,
1193 },
1288c18f
MCC
1194 { } /* End of list */
1195};
1196
1197static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1198 {
1199 .attr = {
1200 .name = "inject_section",
1201 .mode = (S_IRUGO | S_IWUSR)
1202 },
1203 .show = i7core_inject_section_show,
1204 .store = i7core_inject_section_store,
1205 }, {
1206 .attr = {
1207 .name = "inject_type",
1208 .mode = (S_IRUGO | S_IWUSR)
1209 },
1210 .show = i7core_inject_type_show,
1211 .store = i7core_inject_type_store,
1212 }, {
1213 .attr = {
1214 .name = "inject_eccmask",
1215 .mode = (S_IRUGO | S_IWUSR)
1216 },
1217 .show = i7core_inject_eccmask_show,
1218 .store = i7core_inject_eccmask_store,
1219 }, {
1220 .grp = &i7core_inject_addrmatch,
1221 }, {
1222 .attr = {
1223 .name = "inject_enable",
1224 .mode = (S_IRUGO | S_IWUSR)
1225 },
1226 .show = i7core_inject_enable_show,
1227 .store = i7core_inject_enable_store,
1228 }, {
1229 .grp = &i7core_udimm_counters,
1230 },
1231 { } /* End of list */
194a40fe
MCC
1232};
1233
a0c36a1f
MCC
1234/****************************************************************************
1235 Device initialization routines: put/get, init/exit
1236 ****************************************************************************/
1237
1238/*
64c10f6e 1239 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1240 * reserved via 'get'
1241 */
13d6e9b6 1242static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1243{
13d6e9b6 1244 int i;
a0c36a1f 1245
22e6bcbd 1246 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1247 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1248 struct pci_dev *pdev = i7core_dev->pdev[i];
1249 if (!pdev)
1250 continue;
1251 debugf0("Removing dev %02x:%02x.%d\n",
1252 pdev->bus->number,
1253 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1254 pci_dev_put(pdev);
1255 }
13d6e9b6 1256}
66607706 1257
13d6e9b6
MCC
1258static void i7core_put_all_devices(void)
1259{
42538680 1260 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1261
39300e71 1262 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1263 i7core_put_devices(i7core_dev);
2aa9be44 1264 free_i7core_dev(i7core_dev);
39300e71 1265 }
a0c36a1f
MCC
1266}
1267
1288c18f 1268static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1269{
1270 struct pci_dev *pdev = NULL;
1271 int i;
54a08ab1 1272
bc2d7245
KM
1273 /*
1274 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1275 * aren't announced by acpi. So, we need to use a legacy scan probing
1276 * to detect them
1277 */
bd9e19ca
VM
1278 while (table && table->descr) {
1279 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1280 if (unlikely(!pdev)) {
1281 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1282 pcibios_scan_specific_bus(255-i);
1283 }
bda14289 1284 pci_dev_put(pdev);
bd9e19ca 1285 table++;
bc2d7245
KM
1286 }
1287}
1288
bda14289
MCC
1289static unsigned i7core_pci_lastbus(void)
1290{
1291 int last_bus = 0, bus;
1292 struct pci_bus *b = NULL;
1293
1294 while ((b = pci_find_next_bus(b)) != NULL) {
1295 bus = b->number;
1296 debugf0("Found bus %d\n", bus);
1297 if (bus > last_bus)
1298 last_bus = bus;
1299 }
1300
1301 debugf0("Last bus %d\n", last_bus);
1302
1303 return last_bus;
1304}
1305
a0c36a1f 1306/*
64c10f6e 1307 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1308 * device/functions we want to reference for this driver
1309 *
1310 * Need to 'get' device 16 func 1 and func 2
1311 */
b197cba0
HS
1312static int i7core_get_onedevice(struct pci_dev **prev,
1313 const struct pci_id_table *table,
1314 const unsigned devno,
1315 const unsigned last_bus)
a0c36a1f 1316{
66607706 1317 struct i7core_dev *i7core_dev;
b197cba0 1318 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1319
8f331907 1320 struct pci_dev *pdev = NULL;
67166af4
MCC
1321 u8 bus = 0;
1322 u8 socket = 0;
a0c36a1f 1323
c77720b9 1324 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1325 dev_descr->dev_id, *prev);
c77720b9 1326
c77720b9
MCC
1327 if (!pdev) {
1328 if (*prev) {
1329 *prev = pdev;
1330 return 0;
d1fd4fb6
MCC
1331 }
1332
de06eeef 1333 if (dev_descr->optional)
c77720b9 1334 return 0;
310cbb72 1335
bd9e19ca
VM
1336 if (devno == 0)
1337 return -ENODEV;
1338
ab089374 1339 i7core_printk(KERN_INFO,
c77720b9 1340 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1341 dev_descr->dev, dev_descr->func,
1342 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1343
c77720b9
MCC
1344 /* End of list, leave */
1345 return -ENODEV;
1346 }
1347 bus = pdev->bus->number;
67166af4 1348
bda14289 1349 socket = last_bus - bus;
c77720b9 1350
66607706
MCC
1351 i7core_dev = get_i7core_dev(socket);
1352 if (!i7core_dev) {
848b2f7e 1353 i7core_dev = alloc_i7core_dev(socket, table);
2896637b
HS
1354 if (!i7core_dev) {
1355 pci_dev_put(pdev);
66607706 1356 return -ENOMEM;
2896637b 1357 }
c77720b9 1358 }
67166af4 1359
66607706 1360 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1361 i7core_printk(KERN_ERR,
1362 "Duplicated device for "
1363 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1364 bus, dev_descr->dev, dev_descr->func,
1365 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1366 pci_dev_put(pdev);
1367 return -ENODEV;
1368 }
67166af4 1369
66607706 1370 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1371
1372 /* Sanity check */
de06eeef
MCC
1373 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1374 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1375 i7core_printk(KERN_ERR,
1376 "Device PCI ID %04x:%04x "
1377 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1378 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1379 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1380 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1381 return -ENODEV;
1382 }
ef708b53 1383
c77720b9
MCC
1384 /* Be sure that the device is enabled */
1385 if (unlikely(pci_enable_device(pdev) < 0)) {
1386 i7core_printk(KERN_ERR,
1387 "Couldn't enable "
1388 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1389 bus, dev_descr->dev, dev_descr->func,
1390 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1391 return -ENODEV;
1392 }
ef708b53 1393
d4c27795 1394 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1395 socket, bus, dev_descr->dev,
1396 dev_descr->func,
1397 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1398
a3e15416
MCC
1399 /*
1400 * As stated on drivers/pci/search.c, the reference count for
1401 * @from is always decremented if it is not %NULL. So, as we need
1402 * to get all devices up to null, we need to do a get for the device
1403 */
1404 pci_dev_get(pdev);
1405
c77720b9 1406 *prev = pdev;
ef708b53 1407
c77720b9
MCC
1408 return 0;
1409}
a0c36a1f 1410
64c10f6e 1411static int i7core_get_all_devices(void)
c77720b9 1412{
3c52cc57 1413 int i, rc, last_bus;
c77720b9 1414 struct pci_dev *pdev = NULL;
3c52cc57 1415 const struct pci_id_table *table = pci_dev_table;
bd9e19ca 1416
bda14289
MCC
1417 last_bus = i7core_pci_lastbus();
1418
3c52cc57 1419 while (table && table->descr) {
bd9e19ca
VM
1420 for (i = 0; i < table->n_devs; i++) {
1421 pdev = NULL;
1422 do {
b197cba0 1423 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1424 last_bus);
bd9e19ca
VM
1425 if (rc < 0) {
1426 if (i == 0) {
1427 i = table->n_devs;
1428 break;
1429 }
1430 i7core_put_all_devices();
1431 return -ENODEV;
1432 }
1433 } while (pdev);
1434 }
3c52cc57 1435 table++;
c77720b9 1436 }
66607706 1437
ef708b53 1438 return 0;
ef708b53
MCC
1439}
1440
f4742949
MCC
1441static int mci_bind_devs(struct mem_ctl_info *mci,
1442 struct i7core_dev *i7core_dev)
ef708b53
MCC
1443{
1444 struct i7core_pvt *pvt = mci->pvt_info;
1445 struct pci_dev *pdev;
f4742949 1446 int i, func, slot;
ef708b53 1447
f4742949 1448 pvt->is_registered = 0;
de06eeef 1449 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1450 pdev = i7core_dev->pdev[i];
1451 if (!pdev)
66607706
MCC
1452 continue;
1453
f4742949
MCC
1454 func = PCI_FUNC(pdev->devfn);
1455 slot = PCI_SLOT(pdev->devfn);
1456 if (slot == 3) {
1457 if (unlikely(func > MAX_MCR_FUNC))
1458 goto error;
1459 pvt->pci_mcr[func] = pdev;
1460 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1461 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1462 goto error;
f4742949
MCC
1463 pvt->pci_ch[slot - 4][func] = pdev;
1464 } else if (!slot && !func)
1465 pvt->pci_noncore = pdev;
1466 else
1467 goto error;
ef708b53 1468
f4742949
MCC
1469 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1470 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1471 pdev, i7core_dev->socket);
14d2c083 1472
f4742949
MCC
1473 if (PCI_SLOT(pdev->devfn) == 3 &&
1474 PCI_FUNC(pdev->devfn) == 2)
1475 pvt->is_registered = 1;
a0c36a1f 1476 }
e9bd2e73 1477
a0c36a1f 1478 return 0;
ef708b53
MCC
1479
1480error:
1481 i7core_printk(KERN_ERR, "Device %d, function %d "
1482 "is out of the expected range\n",
1483 slot, func);
1484 return -EINVAL;
a0c36a1f
MCC
1485}
1486
442305b1
MCC
1487/****************************************************************************
1488 Error check routines
1489 ****************************************************************************/
f4742949 1490static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1491 const int chan,
1492 const int dimm,
1493 const int add)
b4e8f0b6
MCC
1494{
1495 char *msg;
1496 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1497 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1498
1499 for (i = 0; i < add; i++) {
1500 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1501 "(Socket=%d channel=%d dimm=%d)",
1502 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1503
1504 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1505 kfree (msg);
1506 }
1507}
1508
1509static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1510 const int chan,
1511 const int new0,
1512 const int new1,
1513 const int new2)
b4e8f0b6
MCC
1514{
1515 struct i7core_pvt *pvt = mci->pvt_info;
1516 int add0 = 0, add1 = 0, add2 = 0;
1517 /* Updates CE counters if it is not the first time here */
f4742949 1518 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1519 /* Updates CE counters */
1520
f4742949
MCC
1521 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1522 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1523 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1524
1525 if (add2 < 0)
1526 add2 += 0x7fff;
f4742949 1527 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1528
1529 if (add1 < 0)
1530 add1 += 0x7fff;
f4742949 1531 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1532
1533 if (add0 < 0)
1534 add0 += 0x7fff;
f4742949 1535 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1536 } else
f4742949 1537 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1538
1539 /* Store the new values */
f4742949
MCC
1540 pvt->rdimm_last_ce_count[chan][2] = new2;
1541 pvt->rdimm_last_ce_count[chan][1] = new1;
1542 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1543
1544 /*updated the edac core */
1545 if (add0 != 0)
f4742949 1546 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1547 if (add1 != 0)
f4742949 1548 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1549 if (add2 != 0)
f4742949 1550 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1551
1552}
1553
f4742949 1554static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1555{
1556 struct i7core_pvt *pvt = mci->pvt_info;
1557 u32 rcv[3][2];
1558 int i, new0, new1, new2;
1559
1560 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1561 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1562 &rcv[0][0]);
f4742949 1563 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1564 &rcv[0][1]);
f4742949 1565 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1566 &rcv[1][0]);
f4742949 1567 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1568 &rcv[1][1]);
f4742949 1569 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1570 &rcv[2][0]);
f4742949 1571 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1572 &rcv[2][1]);
1573 for (i = 0 ; i < 3; i++) {
1574 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1575 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1576 /*if the channel has 3 dimms*/
f4742949 1577 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1578 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1579 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1580 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1581 } else {
1582 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1583 DIMM_BOT_COR_ERR(rcv[i][0]);
1584 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1585 DIMM_BOT_COR_ERR(rcv[i][1]);
1586 new2 = 0;
1587 }
1588
f4742949 1589 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1590 }
1591}
442305b1
MCC
1592
1593/* This function is based on the device 3 function 4 registers as described on:
1594 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1595 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1596 * also available at:
1597 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1598 */
f4742949 1599static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1600{
1601 struct i7core_pvt *pvt = mci->pvt_info;
1602 u32 rcv1, rcv0;
1603 int new0, new1, new2;
1604
f4742949 1605 if (!pvt->pci_mcr[4]) {
b990538a 1606 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1607 return;
1608 }
1609
b4e8f0b6 1610 /* Corrected test errors */
f4742949
MCC
1611 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1612 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1613
1614 /* Store the new values */
1615 new2 = DIMM2_COR_ERR(rcv1);
1616 new1 = DIMM1_COR_ERR(rcv0);
1617 new0 = DIMM0_COR_ERR(rcv0);
1618
442305b1 1619 /* Updates CE counters if it is not the first time here */
f4742949 1620 if (pvt->ce_count_available) {
442305b1
MCC
1621 /* Updates CE counters */
1622 int add0, add1, add2;
1623
f4742949
MCC
1624 add2 = new2 - pvt->udimm_last_ce_count[2];
1625 add1 = new1 - pvt->udimm_last_ce_count[1];
1626 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1627
1628 if (add2 < 0)
1629 add2 += 0x7fff;
f4742949 1630 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1631
1632 if (add1 < 0)
1633 add1 += 0x7fff;
f4742949 1634 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1635
1636 if (add0 < 0)
1637 add0 += 0x7fff;
f4742949 1638 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1639
1640 if (add0 | add1 | add2)
1641 i7core_printk(KERN_ERR, "New Corrected error(s): "
1642 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1643 add0, add1, add2);
442305b1 1644 } else
f4742949 1645 pvt->ce_count_available = 1;
442305b1
MCC
1646
1647 /* Store the new values */
f4742949
MCC
1648 pvt->udimm_last_ce_count[2] = new2;
1649 pvt->udimm_last_ce_count[1] = new1;
1650 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1651}
1652
8a2f118e
MCC
1653/*
1654 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1655 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1656 * Nehalem are defined as family 0x06, model 0x1a
1657 *
1658 * The MCA registers used here are the following ones:
8a2f118e 1659 * struct mce field MCA Register
f237fcf2
MCC
1660 * m->status MSR_IA32_MC8_STATUS
1661 * m->addr MSR_IA32_MC8_ADDR
1662 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1663 * In the case of Nehalem, the error information is masked at .status and .misc
1664 * fields
1665 */
d5381642 1666static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1667 const struct mce *m)
d5381642 1668{
b4e8f0b6 1669 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1670 char *type, *optype, *err, *msg;
8a2f118e 1671 unsigned long error = m->status & 0x1ff0000l;
a639539f 1672 u32 optypenum = (m->status >> 4) & 0x07;
8a2f118e
MCC
1673 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1674 u32 dimm = (m->misc >> 16) & 0x3;
1675 u32 channel = (m->misc >> 18) & 0x3;
1676 u32 syndrome = m->misc >> 32;
1677 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1678 int csrow;
8a2f118e 1679
c5d34528
MCC
1680 if (m->mcgstatus & 1)
1681 type = "FATAL";
1682 else
1683 type = "NON_FATAL";
1684
a639539f 1685 switch (optypenum) {
b990538a
MCC
1686 case 0:
1687 optype = "generic undef request";
1688 break;
1689 case 1:
1690 optype = "read error";
1691 break;
1692 case 2:
1693 optype = "write error";
1694 break;
1695 case 3:
1696 optype = "addr/cmd error";
1697 break;
1698 case 4:
1699 optype = "scrubbing error";
1700 break;
1701 default:
1702 optype = "reserved";
1703 break;
a639539f
MCC
1704 }
1705
8a2f118e
MCC
1706 switch (errnum) {
1707 case 16:
1708 err = "read ECC error";
1709 break;
1710 case 17:
1711 err = "RAS ECC error";
1712 break;
1713 case 18:
1714 err = "write parity error";
1715 break;
1716 case 19:
1717 err = "redundacy loss";
1718 break;
1719 case 20:
1720 err = "reserved";
1721 break;
1722 case 21:
1723 err = "memory range error";
1724 break;
1725 case 22:
1726 err = "RTID out of range";
1727 break;
1728 case 23:
1729 err = "address parity error";
1730 break;
1731 case 24:
1732 err = "byte enable parity error";
1733 break;
1734 default:
1735 err = "unknown";
d5381642 1736 }
d5381642 1737
f237fcf2 1738 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1739 msg = kasprintf(GFP_ATOMIC,
f4742949 1740 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1741 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1742 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1743 syndrome, core_err_cnt, (long long)m->status,
1744 (long long)m->misc, optype, err);
8a2f118e
MCC
1745
1746 debugf0("%s", msg);
d5381642 1747
f4742949 1748 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1749
d5381642 1750 /* Call the helper to output message */
b4e8f0b6
MCC
1751 if (m->mcgstatus & 1)
1752 edac_mc_handle_fbd_ue(mci, csrow, 0,
1753 0 /* FIXME: should be channel here */, msg);
f4742949 1754 else if (!pvt->is_registered)
b4e8f0b6
MCC
1755 edac_mc_handle_fbd_ce(mci, csrow,
1756 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1757
1758 kfree(msg);
d5381642
MCC
1759}
1760
87d1d272
MCC
1761/*
1762 * i7core_check_error Retrieve and process errors reported by the
1763 * hardware. Called by the Core module.
1764 */
1765static void i7core_check_error(struct mem_ctl_info *mci)
1766{
d5381642
MCC
1767 struct i7core_pvt *pvt = mci->pvt_info;
1768 int i;
1769 unsigned count = 0;
ca9c90ba 1770 struct mce *m;
d5381642 1771
ca9c90ba
MCC
1772 /*
1773 * MCE first step: Copy all mce errors into a temporary buffer
1774 * We use a double buffering here, to reduce the risk of
1775 * loosing an error.
1776 */
1777 smp_rmb();
321ece4d
MCC
1778 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1779 % MCE_LOG_LEN;
ca9c90ba 1780 if (!count)
8a311e17 1781 goto check_ce_error;
f4742949 1782
ca9c90ba 1783 m = pvt->mce_outentry;
321ece4d
MCC
1784 if (pvt->mce_in + count > MCE_LOG_LEN) {
1785 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1786
ca9c90ba
MCC
1787 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1788 smp_wmb();
1789 pvt->mce_in = 0;
1790 count -= l;
1791 m += l;
1792 }
1793 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1794 smp_wmb();
1795 pvt->mce_in += count;
1796
1797 smp_rmb();
1798 if (pvt->mce_overrun) {
1799 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1800 pvt->mce_overrun);
1801 smp_wmb();
1802 pvt->mce_overrun = 0;
1803 }
d5381642 1804
ca9c90ba
MCC
1805 /*
1806 * MCE second step: parse errors and display
1807 */
d5381642 1808 for (i = 0; i < count; i++)
ca9c90ba 1809 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1810
ca9c90ba
MCC
1811 /*
1812 * Now, let's increment CE error counts
1813 */
8a311e17 1814check_ce_error:
f4742949
MCC
1815 if (!pvt->is_registered)
1816 i7core_udimm_check_mc_ecc_err(mci);
1817 else
1818 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1819}
1820
d5381642
MCC
1821/*
1822 * i7core_mce_check_error Replicates mcelog routine to get errors
1823 * This routine simply queues mcelog errors, and
1824 * return. The error itself should be handled later
1825 * by i7core_check_error.
6e103be1
MCC
1826 * WARNING: As this routine should be called at NMI time, extra care should
1827 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642
MCC
1828 */
1829static int i7core_mce_check_error(void *priv, struct mce *mce)
1830{
c5d34528
MCC
1831 struct mem_ctl_info *mci = priv;
1832 struct i7core_pvt *pvt = mci->pvt_info;
d5381642 1833
8a2f118e
MCC
1834 /*
1835 * Just let mcelog handle it if the error is
1836 * outside the memory controller
1837 */
1838 if (((mce->status & 0xffff) >> 7) != 1)
1839 return 0;
1840
f237fcf2
MCC
1841 /* Bank 8 registers are the only ones that we know how to handle */
1842 if (mce->bank != 8)
1843 return 0;
1844
3b918c12 1845#ifdef CONFIG_SMP
f4742949 1846 /* Only handle if it is the right mc controller */
6e103be1 1847 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
f4742949 1848 return 0;
3b918c12 1849#endif
f4742949 1850
ca9c90ba 1851 smp_rmb();
321ece4d 1852 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1853 smp_wmb();
1854 pvt->mce_overrun++;
1855 return 0;
d5381642 1856 }
6e103be1
MCC
1857
1858 /* Copy memory error at the ringbuffer */
1859 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1860 smp_wmb();
321ece4d 1861 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1862
c5d34528
MCC
1863 /* Handle fatal errors immediately */
1864 if (mce->mcgstatus & 1)
1865 i7core_check_error(mci);
1866
d5381642 1867 /* Advice mcelog that the error were handled */
8a2f118e 1868 return 1;
d5381642
MCC
1869}
1870
a3aa0a4a
HS
1871static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
1872{
1873 pvt->i7core_pci = edac_pci_create_generic_ctl(
1874 &pvt->i7core_dev->pdev[0]->dev,
1875 EDAC_MOD_STR);
1876 if (unlikely(!pvt->i7core_pci))
1877 pr_warn("Unable to setup PCI error report via EDAC\n");
1878}
1879
1880static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
1881{
1882 if (likely(pvt->i7core_pci))
1883 edac_pci_release_generic_ctl(pvt->i7core_pci);
1884 else
1885 i7core_printk(KERN_ERR,
1886 "Couldn't find mem_ctl_info for socket %d\n",
1887 pvt->i7core_dev->socket);
1888 pvt->i7core_pci = NULL;
1889}
1890
1c6edbbe
HS
1891static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
1892{
1893 struct mem_ctl_info *mci = i7core_dev->mci;
1894 struct i7core_pvt *pvt;
1895
1896 if (unlikely(!mci || !mci->pvt_info)) {
1897 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
1898 __func__, &i7core_dev->pdev[0]->dev);
1899
1900 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
1901 return;
1902 }
1903
1904 pvt = mci->pvt_info;
1905
1906 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1907 __func__, mci, &i7core_dev->pdev[0]->dev);
1908
1909 /* Disable MCE NMI handler */
1910 edac_mce_unregister(&pvt->edac_mce);
1911
1912 /* Disable EDAC polling */
1913 i7core_pci_ctl_release(pvt);
1914
1915 /* Remove MC sysfs nodes */
1916 edac_mc_del_mc(mci->dev);
1917
1918 debugf1("%s: free mci struct\n", mci->ctl_name);
1919 kfree(mci->ctl_name);
1920 edac_mc_free(mci);
1921 i7core_dev->mci = NULL;
1922}
1923
aace4283 1924static int i7core_register_mci(struct i7core_dev *i7core_dev)
a0c36a1f
MCC
1925{
1926 struct mem_ctl_info *mci;
1927 struct i7core_pvt *pvt;
aace4283
HS
1928 int rc, channels, csrows;
1929
1930 /* Check the number of active and not disabled channels */
1931 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
1932 if (unlikely(rc < 0))
1933 return rc;
a0c36a1f 1934
a0c36a1f 1935 /* allocate a new MC control structure */
aace4283 1936 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
f4742949
MCC
1937 if (unlikely(!mci))
1938 return -ENOMEM;
a0c36a1f 1939
3cfd0146
MCC
1940 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1941 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 1942
a0c36a1f 1943 pvt = mci->pvt_info;
ef708b53 1944 memset(pvt, 0, sizeof(*pvt));
67166af4 1945
6d37d240
MCC
1946 /* Associates i7core_dev and mci for future usage */
1947 pvt->i7core_dev = i7core_dev;
1948 i7core_dev->mci = mci;
1949
41fcb7fe
MCC
1950 /*
1951 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1952 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1953 * memory channels
1954 */
1955 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
1956 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1957 mci->edac_cap = EDAC_FLAG_NONE;
1958 mci->mod_name = "i7core_edac.c";
1959 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
1960 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
1961 i7core_dev->socket);
1962 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 1963 mci->ctl_page_to_phys = NULL;
1288c18f 1964
ef708b53 1965 /* Store pci devices at mci for faster access */
f4742949 1966 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 1967 if (unlikely(rc < 0))
628c5ddf 1968 goto fail0;
ef708b53 1969
5939813b
HS
1970 if (pvt->is_registered)
1971 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
1972 else
1973 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
1974
ef708b53 1975 /* Get dimm basic config */
2e5185f7 1976 get_dimm_config(mci);
5939813b
HS
1977 /* record ptr to the generic device */
1978 mci->dev = &i7core_dev->pdev[0]->dev;
1979 /* Set the function pointer to an actual operation function */
1980 mci->edac_check = i7core_check_error;
ef708b53 1981
a0c36a1f 1982 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 1983 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
1984 debugf0("MC: " __FILE__
1985 ": %s(): failed edac_mc_add_mc()\n", __func__);
1986 /* FIXME: perhaps some code should go here that disables error
1987 * reporting if we just enabled it
1988 */
b7c76151
MCC
1989
1990 rc = -EINVAL;
628c5ddf 1991 goto fail0;
a0c36a1f
MCC
1992 }
1993
194a40fe 1994 /* Default error mask is any memory */
ef708b53 1995 pvt->inject.channel = 0;
194a40fe
MCC
1996 pvt->inject.dimm = -1;
1997 pvt->inject.rank = -1;
1998 pvt->inject.bank = -1;
1999 pvt->inject.page = -1;
2000 pvt->inject.col = -1;
2001
a3aa0a4a
HS
2002 /* allocating generic PCI control info */
2003 i7core_pci_ctl_create(pvt);
2004
d5381642 2005 /* Registers on edac_mce in order to receive memory errors */
c5d34528 2006 pvt->edac_mce.priv = mci;
d5381642 2007 pvt->edac_mce.check_error = i7core_mce_check_error;
d5381642 2008 rc = edac_mce_register(&pvt->edac_mce);
b990538a 2009 if (unlikely(rc < 0)) {
d5381642
MCC
2010 debugf0("MC: " __FILE__
2011 ": %s(): failed edac_mce_register()\n", __func__);
628c5ddf 2012 goto fail1;
f4742949
MCC
2013 }
2014
628c5ddf
HS
2015 return 0;
2016
2017fail1:
2018 i7core_pci_ctl_release(pvt);
2019 edac_mc_del_mc(mci->dev);
2020fail0:
2021 kfree(mci->ctl_name);
2022 edac_mc_free(mci);
1c6edbbe 2023 i7core_dev->mci = NULL;
f4742949
MCC
2024 return rc;
2025}
2026
2027/*
2028 * i7core_probe Probe for ONE instance of device to see if it is
2029 * present.
2030 * return:
2031 * 0 for FOUND a device
2032 * < 0 for error code
2033 */
2d95d815 2034
f4742949
MCC
2035static int __devinit i7core_probe(struct pci_dev *pdev,
2036 const struct pci_device_id *id)
2037{
f4742949
MCC
2038 int rc;
2039 struct i7core_dev *i7core_dev;
2040
2d95d815
MCC
2041 /* get the pci devices we want to reserve for our use */
2042 mutex_lock(&i7core_edac_lock);
2043
f4742949 2044 /*
d4c27795 2045 * All memory controllers are allocated at the first pass.
f4742949 2046 */
2d95d815
MCC
2047 if (unlikely(probed >= 1)) {
2048 mutex_unlock(&i7core_edac_lock);
76a7bd81 2049 return -ENODEV;
2d95d815
MCC
2050 }
2051 probed++;
de06eeef 2052
64c10f6e 2053 rc = i7core_get_all_devices();
f4742949
MCC
2054 if (unlikely(rc < 0))
2055 goto fail0;
2056
2057 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
aace4283 2058 rc = i7core_register_mci(i7core_dev);
d4c27795
MCC
2059 if (unlikely(rc < 0))
2060 goto fail1;
d5381642
MCC
2061 }
2062
ef708b53 2063 i7core_printk(KERN_INFO, "Driver loaded.\n");
8f331907 2064
66607706 2065 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2066 return 0;
2067
66607706 2068fail1:
88ef5ea9
MCC
2069 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2070 i7core_unregister_mci(i7core_dev);
2071
13d6e9b6 2072 i7core_put_all_devices();
66607706
MCC
2073fail0:
2074 mutex_unlock(&i7core_edac_lock);
b7c76151 2075 return rc;
a0c36a1f
MCC
2076}
2077
2078/*
2079 * i7core_remove destructor for one instance of device
2080 *
2081 */
2082static void __devexit i7core_remove(struct pci_dev *pdev)
2083{
64c10f6e 2084 struct i7core_dev *i7core_dev;
a0c36a1f
MCC
2085
2086 debugf0(__FILE__ ": %s()\n", __func__);
2087
22e6bcbd
MCC
2088 /*
2089 * we have a trouble here: pdev value for removal will be wrong, since
2090 * it will point to the X58 register used to detect that the machine
2091 * is a Nehalem or upper design. However, due to the way several PCI
2092 * devices are grouped together to provide MC functionality, we need
2093 * to use a different method for releasing the devices
2094 */
87d1d272 2095
66607706 2096 mutex_lock(&i7core_edac_lock);
71fe0170
HS
2097
2098 if (unlikely(!probed)) {
2099 mutex_unlock(&i7core_edac_lock);
2100 return;
2101 }
2102
88ef5ea9
MCC
2103 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2104 i7core_unregister_mci(i7core_dev);
64c10f6e
HS
2105
2106 /* Release PCI resources */
2107 i7core_put_all_devices();
2108
2d95d815
MCC
2109 probed--;
2110
66607706 2111 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2112}
2113
a0c36a1f
MCC
2114MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2115
2116/*
2117 * i7core_driver pci_driver structure for this module
2118 *
2119 */
2120static struct pci_driver i7core_driver = {
2121 .name = "i7core_edac",
2122 .probe = i7core_probe,
2123 .remove = __devexit_p(i7core_remove),
2124 .id_table = i7core_pci_tbl,
2125};
2126
2127/*
2128 * i7core_init Module entry function
2129 * Try to initialize this module for its devices
2130 */
2131static int __init i7core_init(void)
2132{
2133 int pci_rc;
2134
2135 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2136
2137 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2138 opstate_init();
2139
54a08ab1
MCC
2140 if (use_pci_fixup)
2141 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2142
a0c36a1f
MCC
2143 pci_rc = pci_register_driver(&i7core_driver);
2144
3ef288a9
MCC
2145 if (pci_rc >= 0)
2146 return 0;
2147
2148 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2149 pci_rc);
2150
2151 return pci_rc;
a0c36a1f
MCC
2152}
2153
2154/*
2155 * i7core_exit() Module exit function
2156 * Unregister the driver
2157 */
2158static void __exit i7core_exit(void)
2159{
2160 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2161 pci_unregister_driver(&i7core_driver);
2162}
2163
2164module_init(i7core_init);
2165module_exit(i7core_exit);
2166
2167MODULE_LICENSE("GPL");
2168MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2169MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2170MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2171 I7CORE_REVISION);
2172
2173module_param(edac_op_state, int, 0444);
2174MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");