]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/crypto/hifn_795x.c
[CRYPTO] des: Create header file for common macros
[net-next-2.6.git] / drivers / crypto / hifn_795x.c
CommitLineData
f7d0561e
EP
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/mod_devicetable.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/mm.h>
28#include <linux/highmem.h>
29#include <linux/crypto.h>
30
31#include <crypto/algapi.h>
32
33#include <asm/kmap_types.h>
34
35#undef dprintk
36
37#define HIFN_TEST
38//#define HIFN_DEBUG
39
40#ifdef HIFN_DEBUG
41#define dprintk(f, a...) printk(f, ##a)
42#else
43#define dprintk(f, a...) do {} while (0)
44#endif
45
46static atomic_t hifn_dev_number;
47
48#define ACRYPTO_OP_DECRYPT 0
49#define ACRYPTO_OP_ENCRYPT 1
50#define ACRYPTO_OP_HMAC 2
51#define ACRYPTO_OP_RNG 3
52
53#define ACRYPTO_MODE_ECB 0
54#define ACRYPTO_MODE_CBC 1
55#define ACRYPTO_MODE_CFB 2
56#define ACRYPTO_MODE_OFB 3
57
58#define ACRYPTO_TYPE_AES_128 0
59#define ACRYPTO_TYPE_AES_192 1
60#define ACRYPTO_TYPE_AES_256 2
61#define ACRYPTO_TYPE_3DES 3
62#define ACRYPTO_TYPE_DES 4
63
64#define PCI_VENDOR_ID_HIFN 0x13A3
65#define PCI_DEVICE_ID_HIFN_7955 0x0020
66#define PCI_DEVICE_ID_HIFN_7956 0x001d
67
68/* I/O region sizes */
69
70#define HIFN_BAR0_SIZE 0x1000
71#define HIFN_BAR1_SIZE 0x2000
72#define HIFN_BAR2_SIZE 0x8000
73
74/* DMA registres */
75
76#define HIFN_DMA_CRA 0x0C /* DMA Command Ring Address */
77#define HIFN_DMA_SDRA 0x1C /* DMA Source Data Ring Address */
78#define HIFN_DMA_RRA 0x2C /* DMA Result Ring Address */
79#define HIFN_DMA_DDRA 0x3C /* DMA Destination Data Ring Address */
80#define HIFN_DMA_STCTL 0x40 /* DMA Status and Control */
81#define HIFN_DMA_INTREN 0x44 /* DMA Interrupt Enable */
82#define HIFN_DMA_CFG1 0x48 /* DMA Configuration #1 */
83#define HIFN_DMA_CFG2 0x6C /* DMA Configuration #2 */
84#define HIFN_CHIP_ID 0x98 /* Chip ID */
85
86/*
87 * Processing Unit Registers (offset from BASEREG0)
88 */
89#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
90#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
91#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
92#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
93#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
94#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
95#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
96#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
97#define HIFN_0_SPACESIZE 0x20 /* Register space size */
98
99/* Processing Unit Control Register (HIFN_0_PUCTRL) */
100#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
101#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
102#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
103#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
104#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
105
106/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
107#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
108#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
109#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
110#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
111#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
112#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
113#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
114#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
115#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
116#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
117
118/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
119#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
120#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
121#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
122#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
123#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
124#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
125#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
126#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
127#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
128#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
129#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
130#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
131#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
132#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
133#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
134#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
135#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
136#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
137#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
138#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
139#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
140#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
141#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
142
143/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
144#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
145#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
146#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
147#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
148#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
149#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
150#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
151#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
152#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
153#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
154
155/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
156#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
157#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
158#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
159#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
160#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
161#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
162#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
163#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
164#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
165#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
166#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
167#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
168#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
169#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
170#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
171#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
172#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
173
174/* FIFO Status Register (HIFN_0_FIFOSTAT) */
175#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
176#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
177
178/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
179#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as 1 */
180
181/*
182 * DMA Interface Registers (offset from BASEREG1)
183 */
184#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
185#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
186#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
187#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
188#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
189#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
190#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
191#define HIFN_1_PLL 0x4c /* 795x: PLL config */
192#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
193#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
194#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
195#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
196#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
197#define HIFN_1_REVID 0x98 /* Revision ID */
198#define HIFN_1_UNLOCK_SECRET1 0xf4
199#define HIFN_1_UNLOCK_SECRET2 0xfc
200#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
201#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
202#define HIFN_1_PUB_OPLEN 0x304 /* Public Operand Length */
203#define HIFN_1_PUB_OP 0x308 /* Public Operand */
204#define HIFN_1_PUB_STATUS 0x30c /* Public Status */
205#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
206#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
207#define HIFN_1_RNG_DATA 0x318 /* RNG data */
208#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
209#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
210
211/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
212#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
213#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
214#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
215#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
216#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
217#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
218#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
219#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
220#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
221#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
222#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
223#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
224#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
225#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
226#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
227#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
228#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
229#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
230#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
231#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
232#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
233#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
234#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
235#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
236#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
237#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
238#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
239#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
240#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
241#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
242#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
243#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
244#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
245#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
246#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
247#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
248#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
249#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
250
251/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
252#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
253#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
254#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
255#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
256#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
257#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
258#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
259#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
260#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
261#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
262#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
263#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
264#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
265#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
266#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
267#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
268#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
269#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
270#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
271#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
272#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
273#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
274
275/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
276#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
277#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
278#define HIFN_DMACNFG_UNLOCK 0x00000800
279#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
280#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
281#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
282#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
283#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
284
285#define HIFN_PLL_7956 0x00001d18 /* 7956 PLL config value */
286
287/* Public key reset register (HIFN_1_PUB_RESET) */
288#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
289
290/* Public base address register (HIFN_1_PUB_BASE) */
291#define HIFN_PUBBASE_ADDR 0x00003fff /* base address */
292
293/* Public operand length register (HIFN_1_PUB_OPLEN) */
294#define HIFN_PUBOPLEN_MOD_M 0x0000007f /* modulus length mask */
295#define HIFN_PUBOPLEN_MOD_S 0 /* modulus length shift */
296#define HIFN_PUBOPLEN_EXP_M 0x0003ff80 /* exponent length mask */
297#define HIFN_PUBOPLEN_EXP_S 7 /* exponent lenght shift */
298#define HIFN_PUBOPLEN_RED_M 0x003c0000 /* reducend length mask */
299#define HIFN_PUBOPLEN_RED_S 18 /* reducend length shift */
300
301/* Public operation register (HIFN_1_PUB_OP) */
302#define HIFN_PUBOP_AOFFSET_M 0x0000007f /* A offset mask */
303#define HIFN_PUBOP_AOFFSET_S 0 /* A offset shift */
304#define HIFN_PUBOP_BOFFSET_M 0x00000f80 /* B offset mask */
305#define HIFN_PUBOP_BOFFSET_S 7 /* B offset shift */
306#define HIFN_PUBOP_MOFFSET_M 0x0003f000 /* M offset mask */
307#define HIFN_PUBOP_MOFFSET_S 12 /* M offset shift */
308#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
309#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
310#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
311#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
312#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
313#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
314#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
315#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
316#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
317#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
318#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
319#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
320#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular RED */
321#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular EXP */
322
323/* Public status register (HIFN_1_PUB_STATUS) */
324#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
325#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
326
327/* Public interrupt enable register (HIFN_1_PUB_IEN) */
328#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
329
330/* Random number generator config register (HIFN_1_RNG_CONFIG) */
331#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
332
333#define HIFN_NAMESIZE 32
334#define HIFN_MAX_RESULT_ORDER 5
335
336#define HIFN_D_CMD_RSIZE 24*4
337#define HIFN_D_SRC_RSIZE 80*4
338#define HIFN_D_DST_RSIZE 80*4
339#define HIFN_D_RES_RSIZE 24*4
340
341#define HIFN_QUEUE_LENGTH HIFN_D_CMD_RSIZE-5
342
343#define AES_MIN_KEY_SIZE 16
344#define AES_MAX_KEY_SIZE 32
345
346#define HIFN_DES_KEY_LENGTH 8
347#define HIFN_3DES_KEY_LENGTH 24
348#define HIFN_MAX_CRYPT_KEY_LENGTH AES_MAX_KEY_SIZE
349#define HIFN_IV_LENGTH 8
350#define HIFN_AES_IV_LENGTH 16
351#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
352
353#define HIFN_MAC_KEY_LENGTH 64
354#define HIFN_MD5_LENGTH 16
355#define HIFN_SHA1_LENGTH 20
356#define HIFN_MAC_TRUNC_LENGTH 12
357
358#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
359#define HIFN_MAX_RESULT (8 + 4 + 4 + 20 + 4)
360#define HIFN_USED_RESULT 12
361
362struct hifn_desc
363{
364 volatile u32 l;
365 volatile u32 p;
366};
367
368struct hifn_dma {
369 struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
370 struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
371 struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
372 struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
373
374 u8 command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
375 u8 result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
376
377 u64 test_src, test_dst;
378
379 /*
380 * Our current positions for insertion and removal from the descriptor
381 * rings.
382 */
383 volatile int cmdi, srci, dsti, resi;
384 volatile int cmdu, srcu, dstu, resu;
385 int cmdk, srck, dstk, resk;
386};
387
388#define HIFN_FLAG_CMD_BUSY (1<<0)
389#define HIFN_FLAG_SRC_BUSY (1<<1)
390#define HIFN_FLAG_DST_BUSY (1<<2)
391#define HIFN_FLAG_RES_BUSY (1<<3)
392#define HIFN_FLAG_OLD_KEY (1<<4)
393
394#define HIFN_DEFAULT_ACTIVE_NUM 5
395
396struct hifn_device
397{
398 char name[HIFN_NAMESIZE];
399
400 int irq;
401
402 struct pci_dev *pdev;
403 void __iomem *bar[3];
404
405 unsigned long result_mem;
406 dma_addr_t dst;
407
408 void *desc_virt;
409 dma_addr_t desc_dma;
410
411 u32 dmareg;
412
413 void *sa[HIFN_D_RES_RSIZE];
414
415 spinlock_t lock;
416
417 void *priv;
418
419 u32 flags;
420 int active, started;
421 struct delayed_work work;
422 unsigned long reset;
423 unsigned long success;
424 unsigned long prev_success;
425
426 u8 snum;
427
428 struct crypto_queue queue;
429 struct list_head alg_list;
430};
431
432#define HIFN_D_LENGTH 0x0000ffff
433#define HIFN_D_NOINVALID 0x01000000
434#define HIFN_D_MASKDONEIRQ 0x02000000
435#define HIFN_D_DESTOVER 0x04000000
436#define HIFN_D_OVER 0x08000000
437#define HIFN_D_LAST 0x20000000
438#define HIFN_D_JUMP 0x40000000
439#define HIFN_D_VALID 0x80000000
440
441struct hifn_base_command
442{
443 volatile u16 masks;
444 volatile u16 session_num;
445 volatile u16 total_source_count;
446 volatile u16 total_dest_count;
447};
448
449#define HIFN_BASE_CMD_COMP 0x0100 /* enable compression engine */
450#define HIFN_BASE_CMD_PAD 0x0200 /* enable padding engine */
451#define HIFN_BASE_CMD_MAC 0x0400 /* enable MAC engine */
452#define HIFN_BASE_CMD_CRYPT 0x0800 /* enable crypt engine */
453#define HIFN_BASE_CMD_DECODE 0x2000
454#define HIFN_BASE_CMD_SRCLEN_M 0xc000
455#define HIFN_BASE_CMD_SRCLEN_S 14
456#define HIFN_BASE_CMD_DSTLEN_M 0x3000
457#define HIFN_BASE_CMD_DSTLEN_S 12
458#define HIFN_BASE_CMD_LENMASK_HI 0x30000
459#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
460
461/*
462 * Structure to help build up the command data structure.
463 */
464struct hifn_crypt_command
465{
466 volatile u16 masks;
467 volatile u16 header_skip;
468 volatile u16 source_count;
469 volatile u16 reserved;
470};
471
472#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
473#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
474#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
475#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
476#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
477#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
478#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
479#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
480#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
481#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
482#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
483#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
484#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
485#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
486#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
487#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
488#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
489#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
490#define HIFN_CRYPT_CMD_SRCLEN_S 14
491
492/*
493 * Structure to help build up the command data structure.
494 */
495struct hifn_mac_command
496{
497 volatile u16 masks;
498 volatile u16 header_skip;
499 volatile u16 source_count;
500 volatile u16 reserved;
501};
502
503#define HIFN_MAC_CMD_ALG_MASK 0x0001
504#define HIFN_MAC_CMD_ALG_SHA1 0x0000
505#define HIFN_MAC_CMD_ALG_MD5 0x0001
506#define HIFN_MAC_CMD_MODE_MASK 0x000c
507#define HIFN_MAC_CMD_MODE_HMAC 0x0000
508#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
509#define HIFN_MAC_CMD_MODE_HASH 0x0008
510#define HIFN_MAC_CMD_MODE_FULL 0x0004
511#define HIFN_MAC_CMD_TRUNC 0x0010
512#define HIFN_MAC_CMD_RESULT 0x0020
513#define HIFN_MAC_CMD_APPEND 0x0040
514#define HIFN_MAC_CMD_SRCLEN_M 0xc000
515#define HIFN_MAC_CMD_SRCLEN_S 14
516
517/*
518 * MAC POS IPsec initiates authentication after encryption on encodes
519 * and before decryption on decodes.
520 */
521#define HIFN_MAC_CMD_POS_IPSEC 0x0200
522#define HIFN_MAC_CMD_NEW_KEY 0x0800
523
524struct hifn_comp_command
525{
526 volatile u16 masks;
527 volatile u16 header_skip;
528 volatile u16 source_count;
529 volatile u16 reserved;
530};
531
532#define HIFN_COMP_CMD_SRCLEN_M 0xc000
533#define HIFN_COMP_CMD_SRCLEN_S 14
534#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */
535#define HIFN_COMP_CMD_CLEARHIST 0x0010 /* clear history */
536#define HIFN_COMP_CMD_UPDATEHIST 0x0008 /* update history */
537#define HIFN_COMP_CMD_LZS_STRIP0 0x0004 /* LZS: strip zero */
538#define HIFN_COMP_CMD_MPPC_RESTART 0x0004 /* MPPC: restart */
539#define HIFN_COMP_CMD_ALG_MASK 0x0001 /* compression mode: */
540#define HIFN_COMP_CMD_ALG_MPPC 0x0001 /* MPPC */
541#define HIFN_COMP_CMD_ALG_LZS 0x0000 /* LZS */
542
543struct hifn_base_result
544{
545 volatile u16 flags;
546 volatile u16 session;
547 volatile u16 src_cnt; /* 15:0 of source count */
548 volatile u16 dst_cnt; /* 15:0 of dest count */
549};
550
551#define HIFN_BASE_RES_DSTOVERRUN 0x0200 /* destination overrun */
552#define HIFN_BASE_RES_SRCLEN_M 0xc000 /* 17:16 of source count */
553#define HIFN_BASE_RES_SRCLEN_S 14
554#define HIFN_BASE_RES_DSTLEN_M 0x3000 /* 17:16 of dest count */
555#define HIFN_BASE_RES_DSTLEN_S 12
556
557struct hifn_comp_result
558{
559 volatile u16 flags;
560 volatile u16 crc;
561};
562
563#define HIFN_COMP_RES_LCB_M 0xff00 /* longitudinal check byte */
564#define HIFN_COMP_RES_LCB_S 8
565#define HIFN_COMP_RES_RESTART 0x0004 /* MPPC: restart */
566#define HIFN_COMP_RES_ENDMARKER 0x0002 /* LZS: end marker seen */
567#define HIFN_COMP_RES_SRC_NOTZERO 0x0001 /* source expired */
568
569struct hifn_mac_result
570{
571 volatile u16 flags;
572 volatile u16 reserved;
573 /* followed by 0, 6, 8, or 10 u16's of the MAC, then crypt */
574};
575
576#define HIFN_MAC_RES_MISCOMPARE 0x0002 /* compare failed */
577#define HIFN_MAC_RES_SRC_NOTZERO 0x0001 /* source expired */
578
579struct hifn_crypt_result
580{
581 volatile u16 flags;
582 volatile u16 reserved;
583};
584
585#define HIFN_CRYPT_RES_SRC_NOTZERO 0x0001 /* source expired */
586
587#ifndef HIFN_POLL_FREQUENCY
588#define HIFN_POLL_FREQUENCY 0x1
589#endif
590
591#ifndef HIFN_POLL_SCALAR
592#define HIFN_POLL_SCALAR 0x0
593#endif
594
595#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
596#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
597
598struct hifn_crypto_alg
599{
600 struct list_head entry;
601 struct crypto_alg alg;
602 struct hifn_device *dev;
603};
604
605#define ASYNC_SCATTERLIST_CACHE 16
606
607#define ASYNC_FLAGS_MISALIGNED (1<<0)
608
609struct ablkcipher_walk
610{
611 struct scatterlist cache[ASYNC_SCATTERLIST_CACHE];
612 u32 flags;
613 int num;
614};
615
616struct hifn_context
617{
618 u8 key[HIFN_MAX_CRYPT_KEY_LENGTH], *iv;
619 struct hifn_device *dev;
620 unsigned int keysize, ivsize;
621 u8 op, type, mode, unused;
622 struct ablkcipher_walk walk;
623 atomic_t sg_num;
624};
625
626#define crypto_alg_to_hifn(alg) container_of(alg, struct hifn_crypto_alg, alg)
627
628static inline u32 hifn_read_0(struct hifn_device *dev, u32 reg)
629{
630 u32 ret;
631
632 ret = readl((char *)(dev->bar[0]) + reg);
633
634 return ret;
635}
636
637static inline u32 hifn_read_1(struct hifn_device *dev, u32 reg)
638{
639 u32 ret;
640
641 ret = readl((char *)(dev->bar[1]) + reg);
642
643 return ret;
644}
645
646static inline void hifn_write_0(struct hifn_device *dev, u32 reg, u32 val)
647{
648 writel(val, (char *)(dev->bar[0]) + reg);
649}
650
651static inline void hifn_write_1(struct hifn_device *dev, u32 reg, u32 val)
652{
653 writel(val, (char *)(dev->bar[1]) + reg);
654}
655
656static void hifn_wait_puc(struct hifn_device *dev)
657{
658 int i;
659 u32 ret;
660
661 for (i=10000; i > 0; --i) {
662 ret = hifn_read_0(dev, HIFN_0_PUCTRL);
663 if (!(ret & HIFN_PUCTRL_RESET))
664 break;
665
666 udelay(1);
667 }
668
669 if (!i)
670 dprintk("%s: Failed to reset PUC unit.\n", dev->name);
671}
672
673static void hifn_reset_puc(struct hifn_device *dev)
674{
675 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
676 hifn_wait_puc(dev);
677}
678
679static void hifn_stop_device(struct hifn_device *dev)
680{
681 hifn_write_1(dev, HIFN_1_DMA_CSR,
682 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
683 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS);
684 hifn_write_0(dev, HIFN_0_PUIER, 0);
685 hifn_write_1(dev, HIFN_1_DMA_IER, 0);
686}
687
688static void hifn_reset_dma(struct hifn_device *dev, int full)
689{
690 hifn_stop_device(dev);
691
692 /*
693 * Setting poll frequency and others to 0.
694 */
695 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
696 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
697 mdelay(1);
698
699 /*
700 * Reset DMA.
701 */
702 if (full) {
703 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
704 mdelay(1);
705 } else {
706 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE |
707 HIFN_DMACNFG_MSTRESET);
708 hifn_reset_puc(dev);
709 }
710
711 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
712 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
713
714 hifn_reset_puc(dev);
715}
716
717static u32 hifn_next_signature(u_int32_t a, u_int cnt)
718{
719 int i;
720 u32 v;
721
722 for (i = 0; i < cnt; i++) {
723
724 /* get the parity */
725 v = a & 0x80080125;
726 v ^= v >> 16;
727 v ^= v >> 8;
728 v ^= v >> 4;
729 v ^= v >> 2;
730 v ^= v >> 1;
731
732 a = (v & 1) ^ (a << 1);
733 }
734
735 return a;
736}
737
738static struct pci2id {
739 u_short pci_vendor;
740 u_short pci_prod;
741 char card_id[13];
742} pci2id[] = {
743 {
744 PCI_VENDOR_ID_HIFN,
745 PCI_DEVICE_ID_HIFN_7955,
746 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x00, 0x00, 0x00, 0x00 }
748 },
749 {
750 PCI_VENDOR_ID_HIFN,
751 PCI_DEVICE_ID_HIFN_7956,
752 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
753 0x00, 0x00, 0x00, 0x00, 0x00 }
754 }
755};
756
757static int hifn_init_pubrng(struct hifn_device *dev)
758{
759 int i;
760
761 hifn_write_1(dev, HIFN_1_PUB_RESET, hifn_read_1(dev, HIFN_1_PUB_RESET) |
762 HIFN_PUBRST_RESET);
763
764 for (i=100; i > 0; --i) {
765 mdelay(1);
766
767 if ((hifn_read_1(dev, HIFN_1_PUB_RESET) & HIFN_PUBRST_RESET) == 0)
768 break;
769 }
770
771 if (!i)
772 dprintk("Chip %s: Failed to initialise public key engine.\n",
773 dev->name);
774 else {
775 hifn_write_1(dev, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
776 dev->dmareg |= HIFN_DMAIER_PUBDONE;
777 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
778
779 dprintk("Chip %s: Public key engine has been sucessfully "
780 "initialised.\n", dev->name);
781 }
782
783 /*
784 * Enable RNG engine.
785 */
786
787 hifn_write_1(dev, HIFN_1_RNG_CONFIG,
788 hifn_read_1(dev, HIFN_1_RNG_CONFIG) | HIFN_RNGCFG_ENA);
789 dprintk("Chip %s: RNG engine has been successfully initialised.\n",
790 dev->name);
791
792 return 0;
793}
794
795static int hifn_enable_crypto(struct hifn_device *dev)
796{
797 u32 dmacfg, addr;
798 char *offtbl = NULL;
799 int i;
800
801 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
802 if (pci2id[i].pci_vendor == dev->pdev->vendor &&
803 pci2id[i].pci_prod == dev->pdev->device) {
804 offtbl = pci2id[i].card_id;
805 break;
806 }
807 }
808
809 if (offtbl == NULL) {
810 dprintk("Chip %s: Unknown card!\n", dev->name);
811 return -ENODEV;
812 }
813
814 dmacfg = hifn_read_1(dev, HIFN_1_DMA_CNFG);
815
816 hifn_write_1(dev, HIFN_1_DMA_CNFG,
817 HIFN_DMACNFG_UNLOCK | HIFN_DMACNFG_MSTRESET |
818 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
819 mdelay(1);
820 addr = hifn_read_1(dev, HIFN_1_UNLOCK_SECRET1);
821 mdelay(1);
822 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, 0);
823 mdelay(1);
824
825 for (i=0; i<12; ++i) {
826 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
827 hifn_write_1(dev, HIFN_1_UNLOCK_SECRET2, addr);
828
829 mdelay(1);
830 }
831 hifn_write_1(dev, HIFN_1_DMA_CNFG, dmacfg);
832
833 dprintk("Chip %s: %s.\n", dev->name, pci_name(dev->pdev));
834
835 return 0;
836}
837
838static void hifn_init_dma(struct hifn_device *dev)
839{
840 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
841 u32 dptr = dev->desc_dma;
842 int i;
843
844 for (i=0; i<HIFN_D_CMD_RSIZE; ++i)
845 dma->cmdr[i].p = __cpu_to_le32(dptr +
846 offsetof(struct hifn_dma, command_bufs[i][0]));
847 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
848 dma->resr[i].p = __cpu_to_le32(dptr +
849 offsetof(struct hifn_dma, result_bufs[i][0]));
850
851 /*
852 * Setup LAST descriptors.
853 */
854 dma->cmdr[HIFN_D_CMD_RSIZE].p = __cpu_to_le32(dptr +
855 offsetof(struct hifn_dma, cmdr[0]));
856 dma->srcr[HIFN_D_SRC_RSIZE].p = __cpu_to_le32(dptr +
857 offsetof(struct hifn_dma, srcr[0]));
858 dma->dstr[HIFN_D_DST_RSIZE].p = __cpu_to_le32(dptr +
859 offsetof(struct hifn_dma, dstr[0]));
860 dma->resr[HIFN_D_RES_RSIZE].p = __cpu_to_le32(dptr +
861 offsetof(struct hifn_dma, resr[0]));
862
863 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
864 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
865 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
866}
867
868static void hifn_init_registers(struct hifn_device *dev)
869{
870 u32 dptr = dev->desc_dma;
871
872 /* Initialization magic... */
873 hifn_write_0(dev, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
874 hifn_write_0(dev, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
875 hifn_write_0(dev, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
876
877 /* write all 4 ring address registers */
878 hifn_write_1(dev, HIFN_1_DMA_CRAR, __cpu_to_le32(dptr +
879 offsetof(struct hifn_dma, cmdr[0])));
880 hifn_write_1(dev, HIFN_1_DMA_SRAR, __cpu_to_le32(dptr +
881 offsetof(struct hifn_dma, srcr[0])));
882 hifn_write_1(dev, HIFN_1_DMA_DRAR, __cpu_to_le32(dptr +
883 offsetof(struct hifn_dma, dstr[0])));
884 hifn_write_1(dev, HIFN_1_DMA_RRAR, __cpu_to_le32(dptr +
885 offsetof(struct hifn_dma, resr[0])));
886
887 mdelay(2);
888#if 0
889 hifn_write_1(dev, HIFN_1_DMA_CSR,
890 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
891 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
892 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
893 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
894 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
895 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
896 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
897 HIFN_DMACSR_S_WAIT |
898 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
899 HIFN_DMACSR_C_WAIT |
900 HIFN_DMACSR_ENGINE |
901 HIFN_DMACSR_PUBDONE);
902#else
903 hifn_write_1(dev, HIFN_1_DMA_CSR,
904 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
905 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA |
906 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
907 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
908 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
909 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
910 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
911 HIFN_DMACSR_S_WAIT |
912 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
913 HIFN_DMACSR_C_WAIT |
914 HIFN_DMACSR_ENGINE |
915 HIFN_DMACSR_PUBDONE);
916#endif
917 hifn_read_1(dev, HIFN_1_DMA_CSR);
918
919 dev->dmareg |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
920 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
921 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
922 HIFN_DMAIER_ENGINE;
923 dev->dmareg &= ~HIFN_DMAIER_C_WAIT;
924
925 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
926 hifn_read_1(dev, HIFN_1_DMA_IER);
927#if 0
928 hifn_write_0(dev, HIFN_0_PUCNFG, HIFN_PUCNFG_ENCCNFG |
929 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
930 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
931 HIFN_PUCNFG_DRAM);
932#else
933 hifn_write_0(dev, HIFN_0_PUCNFG, 0x10342);
934#endif
935 hifn_write_1(dev, HIFN_1_PLL, HIFN_PLL_7956);
936
937 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
938 hifn_write_1(dev, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
939 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
940 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
941 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
942}
943
944static int hifn_setup_base_command(struct hifn_device *dev, u8 *buf,
945 unsigned dlen, unsigned slen, u16 mask, u8 snum)
946{
947 struct hifn_base_command *base_cmd;
948 u8 *buf_pos = buf;
949
950 base_cmd = (struct hifn_base_command *)buf_pos;
951 base_cmd->masks = __cpu_to_le16(mask);
952 base_cmd->total_source_count =
953 __cpu_to_le16(slen & HIFN_BASE_CMD_LENMASK_LO);
954 base_cmd->total_dest_count =
955 __cpu_to_le16(dlen & HIFN_BASE_CMD_LENMASK_LO);
956
957 dlen >>= 16;
958 slen >>= 16;
959 base_cmd->session_num = __cpu_to_le16(snum |
960 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
961 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
962
963 return sizeof(struct hifn_base_command);
964}
965
966static int hifn_setup_crypto_command(struct hifn_device *dev,
967 u8 *buf, unsigned dlen, unsigned slen,
968 u8 *key, int keylen, u8 *iv, int ivsize, u16 mode)
969{
970 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
971 struct hifn_crypt_command *cry_cmd;
972 u8 *buf_pos = buf;
973 u16 cmd_len;
974
975 cry_cmd = (struct hifn_crypt_command *)buf_pos;
976
977 cry_cmd->source_count = __cpu_to_le16(dlen & 0xffff);
978 dlen >>= 16;
979 cry_cmd->masks = __cpu_to_le16(mode |
980 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) &
981 HIFN_CRYPT_CMD_SRCLEN_M));
982 cry_cmd->header_skip = 0;
983 cry_cmd->reserved = 0;
984
985 buf_pos += sizeof(struct hifn_crypt_command);
986
987 dma->cmdu++;
988 if (dma->cmdu > 1) {
989 dev->dmareg |= HIFN_DMAIER_C_WAIT;
990 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
991 }
992
993 if (keylen) {
994 memcpy(buf_pos, key, keylen);
995 buf_pos += keylen;
996 }
997 if (ivsize) {
998 memcpy(buf_pos, iv, ivsize);
999 buf_pos += ivsize;
1000 }
1001
1002 cmd_len = buf_pos - buf;
1003
1004 return cmd_len;
1005}
1006
1007static int hifn_setup_src_desc(struct hifn_device *dev, struct page *page,
1008 unsigned int offset, unsigned int size)
1009{
1010 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1011 int idx;
1012 dma_addr_t addr;
1013
1014 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_TODEVICE);
1015
1016 idx = dma->srci;
1017
1018 dma->srcr[idx].p = __cpu_to_le32(addr);
1019 dma->srcr[idx].l = __cpu_to_le32(size) | HIFN_D_VALID |
1020 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST;
1021
1022 if (++idx == HIFN_D_SRC_RSIZE) {
1023 dma->srcr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1024 HIFN_D_JUMP |
1025 HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1026 idx = 0;
1027 }
1028
1029 dma->srci = idx;
1030 dma->srcu++;
1031
1032 if (!(dev->flags & HIFN_FLAG_SRC_BUSY)) {
1033 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1034 dev->flags |= HIFN_FLAG_SRC_BUSY;
1035 }
1036
1037 return size;
1038}
1039
1040static void hifn_setup_res_desc(struct hifn_device *dev)
1041{
1042 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1043
1044 dma->resr[dma->resi].l = __cpu_to_le32(HIFN_USED_RESULT |
1045 HIFN_D_VALID | HIFN_D_LAST);
1046 /*
1047 * dma->resr[dma->resi].l = __cpu_to_le32(HIFN_MAX_RESULT | HIFN_D_VALID |
1048 * HIFN_D_LAST | HIFN_D_NOINVALID);
1049 */
1050
1051 if (++dma->resi == HIFN_D_RES_RSIZE) {
1052 dma->resr[HIFN_D_RES_RSIZE].l = __cpu_to_le32(HIFN_D_VALID |
1053 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ | HIFN_D_LAST);
1054 dma->resi = 0;
1055 }
1056
1057 dma->resu++;
1058
1059 if (!(dev->flags & HIFN_FLAG_RES_BUSY)) {
1060 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1061 dev->flags |= HIFN_FLAG_RES_BUSY;
1062 }
1063}
1064
1065static void hifn_setup_dst_desc(struct hifn_device *dev, struct page *page,
1066 unsigned offset, unsigned size)
1067{
1068 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1069 int idx;
1070 dma_addr_t addr;
1071
1072 addr = pci_map_page(dev->pdev, page, offset, size, PCI_DMA_FROMDEVICE);
1073
1074 idx = dma->dsti;
1075 dma->dstr[idx].p = __cpu_to_le32(addr);
1076 dma->dstr[idx].l = __cpu_to_le32(size | HIFN_D_VALID |
1077 HIFN_D_MASKDONEIRQ | HIFN_D_NOINVALID | HIFN_D_LAST);
1078
1079 if (++idx == HIFN_D_DST_RSIZE) {
1080 dma->dstr[idx].l = __cpu_to_le32(HIFN_D_VALID |
1081 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ |
1082 HIFN_D_LAST | HIFN_D_NOINVALID);
1083 idx = 0;
1084 }
1085 dma->dsti = idx;
1086 dma->dstu++;
1087
1088 if (!(dev->flags & HIFN_FLAG_DST_BUSY)) {
1089 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1090 dev->flags |= HIFN_FLAG_DST_BUSY;
1091 }
1092}
1093
1094static int hifn_setup_dma(struct hifn_device *dev, struct page *spage, unsigned int soff,
1095 struct page *dpage, unsigned int doff, unsigned int nbytes, void *priv,
1096 struct hifn_context *ctx)
1097{
1098 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1099 int cmd_len, sa_idx;
1100 u8 *buf, *buf_pos;
1101 u16 mask;
1102
1103 dprintk("%s: spage: %p, soffset: %u, dpage: %p, doffset: %u, nbytes: %u, priv: %p, ctx: %p.\n",
1104 dev->name, spage, soff, dpage, doff, nbytes, priv, ctx);
1105
1106 sa_idx = dma->resi;
1107
1108 hifn_setup_src_desc(dev, spage, soff, nbytes);
1109
1110 buf_pos = buf = dma->command_bufs[dma->cmdi];
1111
1112 mask = 0;
1113 switch (ctx->op) {
1114 case ACRYPTO_OP_DECRYPT:
1115 mask = HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE;
1116 break;
1117 case ACRYPTO_OP_ENCRYPT:
1118 mask = HIFN_BASE_CMD_CRYPT;
1119 break;
1120 case ACRYPTO_OP_HMAC:
1121 mask = HIFN_BASE_CMD_MAC;
1122 break;
1123 default:
1124 goto err_out;
1125 }
1126
1127 buf_pos += hifn_setup_base_command(dev, buf_pos, nbytes,
1128 nbytes, mask, dev->snum);
1129
1130 if (ctx->op == ACRYPTO_OP_ENCRYPT || ctx->op == ACRYPTO_OP_DECRYPT) {
1131 u16 md = 0;
1132
1133 if (ctx->keysize)
1134 md |= HIFN_CRYPT_CMD_NEW_KEY;
1135 if (ctx->iv && ctx->mode != ACRYPTO_MODE_ECB)
1136 md |= HIFN_CRYPT_CMD_NEW_IV;
1137
1138 switch (ctx->mode) {
1139 case ACRYPTO_MODE_ECB:
1140 md |= HIFN_CRYPT_CMD_MODE_ECB;
1141 break;
1142 case ACRYPTO_MODE_CBC:
1143 md |= HIFN_CRYPT_CMD_MODE_CBC;
1144 break;
1145 case ACRYPTO_MODE_CFB:
1146 md |= HIFN_CRYPT_CMD_MODE_CFB;
1147 break;
1148 case ACRYPTO_MODE_OFB:
1149 md |= HIFN_CRYPT_CMD_MODE_OFB;
1150 break;
1151 default:
1152 goto err_out;
1153 }
1154
1155 switch (ctx->type) {
1156 case ACRYPTO_TYPE_AES_128:
1157 if (ctx->keysize != 16)
1158 goto err_out;
1159 md |= HIFN_CRYPT_CMD_KSZ_128 |
1160 HIFN_CRYPT_CMD_ALG_AES;
1161 break;
1162 case ACRYPTO_TYPE_AES_192:
1163 if (ctx->keysize != 24)
1164 goto err_out;
1165 md |= HIFN_CRYPT_CMD_KSZ_192 |
1166 HIFN_CRYPT_CMD_ALG_AES;
1167 break;
1168 case ACRYPTO_TYPE_AES_256:
1169 if (ctx->keysize != 32)
1170 goto err_out;
1171 md |= HIFN_CRYPT_CMD_KSZ_256 |
1172 HIFN_CRYPT_CMD_ALG_AES;
1173 break;
1174 case ACRYPTO_TYPE_3DES:
1175 if (ctx->keysize != 24)
1176 goto err_out;
1177 md |= HIFN_CRYPT_CMD_ALG_3DES;
1178 break;
1179 case ACRYPTO_TYPE_DES:
1180 if (ctx->keysize != 8)
1181 goto err_out;
1182 md |= HIFN_CRYPT_CMD_ALG_DES;
1183 break;
1184 default:
1185 goto err_out;
1186 }
1187
1188 buf_pos += hifn_setup_crypto_command(dev, buf_pos,
1189 nbytes, nbytes, ctx->key, ctx->keysize,
1190 ctx->iv, ctx->ivsize, md);
1191 }
1192
1193 dev->sa[sa_idx] = priv;
1194
1195 cmd_len = buf_pos - buf;
1196 dma->cmdr[dma->cmdi].l = __cpu_to_le32(cmd_len | HIFN_D_VALID |
1197 HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1198
1199 if (++dma->cmdi == HIFN_D_CMD_RSIZE) {
1200 dma->cmdr[dma->cmdi].l = __cpu_to_le32(HIFN_MAX_COMMAND |
1201 HIFN_D_VALID | HIFN_D_LAST |
1202 HIFN_D_MASKDONEIRQ | HIFN_D_JUMP);
1203 dma->cmdi = 0;
1204 } else
1205 dma->cmdr[dma->cmdi-1].l |= __cpu_to_le32(HIFN_D_VALID);
1206
1207 if (!(dev->flags & HIFN_FLAG_CMD_BUSY)) {
1208 hifn_write_1(dev, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1209 dev->flags |= HIFN_FLAG_CMD_BUSY;
1210 }
1211
1212 hifn_setup_dst_desc(dev, dpage, doff, nbytes);
1213 hifn_setup_res_desc(dev);
1214
1215 return 0;
1216
1217err_out:
1218 return -EINVAL;
1219}
1220
1221static int ablkcipher_walk_init(struct ablkcipher_walk *w,
1222 int num, gfp_t gfp_flags)
1223{
1224 int i;
1225
1226 num = min(ASYNC_SCATTERLIST_CACHE, num);
1227 sg_init_table(w->cache, num);
1228
1229 w->num = 0;
1230 for (i=0; i<num; ++i) {
1231 struct page *page = alloc_page(gfp_flags);
1232 struct scatterlist *s;
1233
1234 if (!page)
1235 break;
1236
1237 s = &w->cache[i];
1238
1239 sg_set_page(s, page, PAGE_SIZE, 0);
1240 w->num++;
1241 }
1242
1243 return i;
1244}
1245
1246static void ablkcipher_walk_exit(struct ablkcipher_walk *w)
1247{
1248 int i;
1249
1250 for (i=0; i<w->num; ++i) {
1251 struct scatterlist *s = &w->cache[i];
1252
1253 __free_page(sg_page(s));
1254
1255 s->length = 0;
1256 }
1257
1258 w->num = 0;
1259}
1260
1261static int ablkcipher_add(void *daddr, unsigned int *drestp, struct scatterlist *src,
1262 unsigned int size, unsigned int *nbytesp)
1263{
1264 unsigned int copy, drest = *drestp, nbytes = *nbytesp;
1265 int idx = 0;
1266 void *saddr;
1267
1268 if (drest < size || size > nbytes)
1269 return -EINVAL;
1270
1271 while (size) {
1272 copy = min(drest, src->length);
1273
1274 saddr = kmap_atomic(sg_page(src), KM_SOFTIRQ1);
1275 memcpy(daddr, saddr + src->offset, copy);
1276 kunmap_atomic(saddr, KM_SOFTIRQ1);
1277
1278 size -= copy;
1279 drest -= copy;
1280 nbytes -= copy;
1281 daddr += copy;
1282
1283 dprintk("%s: copy: %u, size: %u, drest: %u, nbytes: %u.\n",
1284 __func__, copy, size, drest, nbytes);
1285
1286 src++;
1287 idx++;
1288 }
1289
1290 *nbytesp = nbytes;
1291 *drestp = drest;
1292
1293 return idx;
1294}
1295
1296static int ablkcipher_walk(struct ablkcipher_request *req,
1297 struct ablkcipher_walk *w)
1298{
1299 unsigned blocksize =
1300 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1301 unsigned alignmask =
1302 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1303 struct scatterlist *src, *dst, *t;
1304 void *daddr;
1305 unsigned int nbytes = req->nbytes, offset, copy, diff;
1306 int idx, tidx, err;
1307
1308 tidx = idx = 0;
1309 offset = 0;
1310 while (nbytes) {
1311 if (idx >= w->num && (w->flags & ASYNC_FLAGS_MISALIGNED))
1312 return -EINVAL;
1313
1314 src = &req->src[idx];
1315 dst = &req->dst[idx];
1316
1317 dprintk("\n%s: slen: %u, dlen: %u, soff: %u, doff: %u, offset: %u, "
1318 "blocksize: %u, nbytes: %u.\n",
1319 __func__, src->length, dst->length, src->offset,
1320 dst->offset, offset, blocksize, nbytes);
1321
1322 if (src->length & (blocksize - 1) ||
1323 src->offset & (alignmask - 1) ||
1324 dst->length & (blocksize - 1) ||
1325 dst->offset & (alignmask - 1) ||
1326 offset) {
1327 unsigned slen = src->length - offset;
1328 unsigned dlen = PAGE_SIZE;
1329
1330 t = &w->cache[idx];
1331
1332 daddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0);
1333 err = ablkcipher_add(daddr, &dlen, src, slen, &nbytes);
1334 if (err < 0)
1335 goto err_out_unmap;
1336
1337 idx += err;
1338
1339 copy = slen & ~(blocksize - 1);
1340 diff = slen & (blocksize - 1);
1341
1342 if (dlen < nbytes) {
1343 /*
1344 * Destination page does not have enough space
1345 * to put there additional blocksized chunk,
1346 * so we mark that page as containing only
1347 * blocksize aligned chunks:
1348 * t->length = (slen & ~(blocksize - 1));
1349 * and increase number of bytes to be processed
1350 * in next chunk:
1351 * nbytes += diff;
1352 */
1353 nbytes += diff;
1354
1355 /*
1356 * Temporary of course...
1357 * Kick author if you will catch this one.
1358 */
1359 printk(KERN_ERR "%s: dlen: %u, nbytes: %u,"
1360 "slen: %u, offset: %u.\n",
1361 __func__, dlen, nbytes, slen, offset);
1362 printk(KERN_ERR "%s: please contact author to fix this "
1363 "issue, generally you should not catch "
1364 "this path under any condition but who "
1365 "knows how did you use crypto code.\n"
1366 "Thank you.\n", __func__);
1367 BUG();
1368 } else {
1369 copy += diff + nbytes;
1370
1371 src = &req->src[idx];
1372
1373 err = ablkcipher_add(daddr + slen, &dlen, src, nbytes, &nbytes);
1374 if (err < 0)
1375 goto err_out_unmap;
1376
1377 idx += err;
1378 }
1379
1380 t->length = copy;
1381 t->offset = offset;
1382
1383 kunmap_atomic(daddr, KM_SOFTIRQ0);
1384 } else {
1385 nbytes -= src->length;
1386 idx++;
1387 }
1388
1389 tidx++;
1390 }
1391
1392 return tidx;
1393
1394err_out_unmap:
1395 kunmap_atomic(daddr, KM_SOFTIRQ0);
1396 return err;
1397}
1398
1399static int hifn_setup_session(struct ablkcipher_request *req)
1400{
1401 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1402 struct hifn_device *dev = ctx->dev;
1403 struct page *spage, *dpage;
1404 unsigned long soff, doff, flags;
1405 unsigned int nbytes = req->nbytes, idx = 0, len;
1406 int err = -EINVAL, sg_num;
1407 struct scatterlist *src, *dst, *t;
1408 unsigned blocksize =
1409 crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(req));
1410 unsigned alignmask =
1411 crypto_ablkcipher_alignmask(crypto_ablkcipher_reqtfm(req));
1412
1413 if (ctx->iv && !ctx->ivsize && ctx->mode != ACRYPTO_MODE_ECB)
1414 goto err_out_exit;
1415
1416 ctx->walk.flags = 0;
1417
1418 while (nbytes) {
1419 src = &req->src[idx];
1420 dst = &req->dst[idx];
1421
1422 if (src->length & (blocksize - 1) ||
1423 src->offset & (alignmask - 1) ||
1424 dst->length & (blocksize - 1) ||
1425 dst->offset & (alignmask - 1)) {
1426 ctx->walk.flags |= ASYNC_FLAGS_MISALIGNED;
1427 }
1428
1429 nbytes -= src->length;
1430 idx++;
1431 }
1432
1433 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1434 err = ablkcipher_walk_init(&ctx->walk, idx, GFP_ATOMIC);
1435 if (err < 0)
1436 return err;
1437 }
1438
1439 nbytes = req->nbytes;
1440 idx = 0;
1441
1442 sg_num = ablkcipher_walk(req, &ctx->walk);
1443
1444 atomic_set(&ctx->sg_num, sg_num);
1445
1446 spin_lock_irqsave(&dev->lock, flags);
1447 if (dev->started + sg_num > HIFN_QUEUE_LENGTH) {
1448 err = -EAGAIN;
1449 goto err_out;
1450 }
1451
1452 dev->snum++;
1453 dev->started += sg_num;
1454
1455 while (nbytes) {
1456 src = &req->src[idx];
1457 dst = &req->dst[idx];
1458 t = &ctx->walk.cache[idx];
1459
1460 if (t->length) {
1461 spage = dpage = sg_page(t);
1462 soff = doff = 0;
1463 len = t->length;
1464 } else {
1465 spage = sg_page(src);
1466 soff = src->offset;
1467
1468 dpage = sg_page(dst);
1469 doff = dst->offset;
1470
1471 len = dst->length;
1472 }
1473
1474 idx++;
1475
1476 err = hifn_setup_dma(dev, spage, soff, dpage, doff, nbytes,
1477 req, ctx);
1478 if (err)
1479 goto err_out;
1480
1481 nbytes -= len;
1482 }
1483
1484 dev->active = HIFN_DEFAULT_ACTIVE_NUM;
1485 spin_unlock_irqrestore(&dev->lock, flags);
1486
1487 return 0;
1488
1489err_out:
1490 spin_unlock_irqrestore(&dev->lock, flags);
1491err_out_exit:
1492 if (err && printk_ratelimit())
1493 dprintk("%s: iv: %p [%d], key: %p [%d], mode: %u, op: %u, "
1494 "type: %u, err: %d.\n",
1495 dev->name, ctx->iv, ctx->ivsize,
1496 ctx->key, ctx->keysize,
1497 ctx->mode, ctx->op, ctx->type, err);
1498
1499 return err;
1500}
1501
1502static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
1503{
1504 int n, err;
1505 u8 src[16];
1506 struct hifn_context ctx;
1507 u8 fips_aes_ecb_from_zero[16] = {
1508 0x66, 0xE9, 0x4B, 0xD4,
1509 0xEF, 0x8A, 0x2C, 0x3B,
1510 0x88, 0x4C, 0xFA, 0x59,
1511 0xCA, 0x34, 0x2B, 0x2E};
1512
1513 memset(src, 0, sizeof(src));
1514 memset(ctx.key, 0, sizeof(ctx.key));
1515
1516 ctx.dev = dev;
1517 ctx.keysize = 16;
1518 ctx.ivsize = 0;
1519 ctx.iv = NULL;
1520 ctx.op = (encdec)?ACRYPTO_OP_ENCRYPT:ACRYPTO_OP_DECRYPT;
1521 ctx.mode = ACRYPTO_MODE_ECB;
1522 ctx.type = ACRYPTO_TYPE_AES_128;
1523 atomic_set(&ctx.sg_num, 1);
1524
1525 err = hifn_setup_dma(dev,
1526 virt_to_page(src), offset_in_page(src),
1527 virt_to_page(src), offset_in_page(src),
1528 sizeof(src), NULL, &ctx);
1529 if (err)
1530 goto err_out;
1531
1532 msleep(200);
1533
1534 dprintk("%s: decoded: ", dev->name);
1535 for (n=0; n<sizeof(src); ++n)
1536 dprintk("%02x ", src[n]);
1537 dprintk("\n");
1538 dprintk("%s: FIPS : ", dev->name);
1539 for (n=0; n<sizeof(fips_aes_ecb_from_zero); ++n)
1540 dprintk("%02x ", fips_aes_ecb_from_zero[n]);
1541 dprintk("\n");
1542
1543 if (!memcmp(src, fips_aes_ecb_from_zero, sizeof(fips_aes_ecb_from_zero))) {
1544 printk(KERN_INFO "%s: AES 128 ECB test has been successfully "
1545 "passed.\n", dev->name);
1546 return 0;
1547 }
1548
1549err_out:
1550 printk(KERN_INFO "%s: AES 128 ECB test has been failed.\n", dev->name);
1551 return -1;
1552}
1553
1554static int hifn_start_device(struct hifn_device *dev)
1555{
1556 int err;
1557
1558 hifn_reset_dma(dev, 1);
1559
1560 err = hifn_enable_crypto(dev);
1561 if (err)
1562 return err;
1563
1564 hifn_reset_puc(dev);
1565
1566 hifn_init_dma(dev);
1567
1568 hifn_init_registers(dev);
1569
1570 hifn_init_pubrng(dev);
1571
1572 return 0;
1573}
1574
1575static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset,
1576 struct scatterlist *dst, unsigned int size, unsigned int *nbytesp)
1577{
1578 unsigned int srest = *srestp, nbytes = *nbytesp, copy;
1579 void *daddr;
1580 int idx = 0;
1581
1582 if (srest < size || size > nbytes)
1583 return -EINVAL;
1584
1585 while (size) {
1586
1587 copy = min(dst->length, srest);
1588
1589 daddr = kmap_atomic(sg_page(dst), KM_IRQ0);
1590 memcpy(daddr + dst->offset + offset, saddr, copy);
1591 kunmap_atomic(daddr, KM_IRQ0);
1592
1593 nbytes -= copy;
1594 size -= copy;
1595 srest -= copy;
1596 saddr += copy;
1597 offset = 0;
1598
1599 dprintk("%s: copy: %u, size: %u, srest: %u, nbytes: %u.\n",
1600 __func__, copy, size, srest, nbytes);
1601
1602 dst++;
1603 idx++;
1604 }
1605
1606 *nbytesp = nbytes;
1607 *srestp = srest;
1608
1609 return idx;
1610}
1611
1612static void hifn_process_ready(struct ablkcipher_request *req, int error)
1613{
1614 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1615 struct hifn_device *dev;
1616
1617 dprintk("%s: req: %p, ctx: %p.\n", __func__, req, ctx);
1618
1619 dev = ctx->dev;
1620 dprintk("%s: req: %p, started: %d, sg_num: %d.\n",
1621 __func__, req, dev->started, atomic_read(&ctx->sg_num));
1622
1623 if (--dev->started < 0)
1624 BUG();
1625
1626 if (atomic_dec_and_test(&ctx->sg_num)) {
1627 unsigned int nbytes = req->nbytes;
1628 int idx = 0, err;
1629 struct scatterlist *dst, *t;
1630 void *saddr;
1631
1632 if (ctx->walk.flags & ASYNC_FLAGS_MISALIGNED) {
1633 while (nbytes) {
1634 t = &ctx->walk.cache[idx];
1635 dst = &req->dst[idx];
1636
1637 dprintk("\n%s: sg_page(t): %p, t->length: %u, "
1638 "sg_page(dst): %p, dst->length: %u, "
1639 "nbytes: %u.\n",
1640 __func__, sg_page(t), t->length,
1641 sg_page(dst), dst->length, nbytes);
1642
1643 if (!t->length) {
1644 nbytes -= dst->length;
1645 idx++;
1646 continue;
1647 }
1648
1649 saddr = kmap_atomic(sg_page(t), KM_IRQ1);
1650
1651 err = ablkcipher_get(saddr, &t->length, t->offset,
1652 dst, nbytes, &nbytes);
1653 if (err < 0) {
1654 kunmap_atomic(saddr, KM_IRQ1);
1655 break;
1656 }
1657
1658 idx += err;
1659 kunmap_atomic(saddr, KM_IRQ1);
1660 }
1661
1662 ablkcipher_walk_exit(&ctx->walk);
1663 }
1664
1665 req->base.complete(&req->base, error);
1666 }
1667}
1668
1669static void hifn_check_for_completion(struct hifn_device *dev, int error)
1670{
1671 int i;
1672 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1673
1674 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
1675 struct hifn_desc *d = &dma->resr[i];
1676
1677 if (!(d->l & __cpu_to_le32(HIFN_D_VALID)) && dev->sa[i]) {
1678 dev->success++;
1679 dev->reset = 0;
1680 hifn_process_ready(dev->sa[i], error);
1681 dev->sa[i] = NULL;
1682 }
1683
1684 if (d->l & __cpu_to_le32(HIFN_D_DESTOVER | HIFN_D_OVER))
1685 if (printk_ratelimit())
1686 printk("%s: overflow detected [d: %u, o: %u] "
1687 "at %d resr: l: %08x, p: %08x.\n",
1688 dev->name,
1689 !!(d->l & __cpu_to_le32(HIFN_D_DESTOVER)),
1690 !!(d->l & __cpu_to_le32(HIFN_D_OVER)),
1691 i, d->l, d->p);
1692 }
1693}
1694
1695static void hifn_clear_rings(struct hifn_device *dev)
1696{
1697 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1698 int i, u;
1699
1700 dprintk("%s: ring cleanup 1: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1701 "k: %d.%d.%d.%d.\n",
1702 dev->name,
1703 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1704 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1705 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1706
1707 i = dma->resk; u = dma->resu;
1708 while (u != 0) {
1709 if (dma->resr[i].l & __cpu_to_le32(HIFN_D_VALID))
1710 break;
1711
1712 if (i != HIFN_D_RES_RSIZE)
1713 u--;
1714
1715 if (++i == (HIFN_D_RES_RSIZE + 1))
1716 i = 0;
1717 }
1718 dma->resk = i; dma->resu = u;
1719
1720 i = dma->srck; u = dma->srcu;
1721 while (u != 0) {
1722 if (i == HIFN_D_SRC_RSIZE)
1723 i = 0;
1724 if (dma->srcr[i].l & __cpu_to_le32(HIFN_D_VALID))
1725 break;
1726 i++, u--;
1727 }
1728 dma->srck = i; dma->srcu = u;
1729
1730 i = dma->cmdk; u = dma->cmdu;
1731 while (u != 0) {
1732 if (dma->cmdr[i].l & __cpu_to_le32(HIFN_D_VALID))
1733 break;
1734 if (i != HIFN_D_CMD_RSIZE)
1735 u--;
1736 if (++i == (HIFN_D_CMD_RSIZE + 1))
1737 i = 0;
1738 }
1739 dma->cmdk = i; dma->cmdu = u;
1740
1741 i = dma->dstk; u = dma->dstu;
1742 while (u != 0) {
1743 if (i == HIFN_D_DST_RSIZE)
1744 i = 0;
1745 if (dma->dstr[i].l & __cpu_to_le32(HIFN_D_VALID))
1746 break;
1747 i++, u--;
1748 }
1749 dma->dstk = i; dma->dstu = u;
1750
1751 dprintk("%s: ring cleanup 2: i: %d.%d.%d.%d, u: %d.%d.%d.%d, "
1752 "k: %d.%d.%d.%d.\n",
1753 dev->name,
1754 dma->cmdi, dma->srci, dma->dsti, dma->resi,
1755 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1756 dma->cmdk, dma->srck, dma->dstk, dma->resk);
1757}
1758
1759static void hifn_work(struct work_struct *work)
1760{
1761 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1762 struct hifn_device *dev = container_of(dw, struct hifn_device, work);
1763 unsigned long flags;
1764 int reset = 0;
1765 u32 r = 0;
1766
1767 spin_lock_irqsave(&dev->lock, flags);
1768 if (dev->active == 0) {
1769 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1770
1771 if (dma->cmdu == 0 && (dev->flags & HIFN_FLAG_CMD_BUSY)) {
1772 dev->flags &= ~HIFN_FLAG_CMD_BUSY;
1773 r |= HIFN_DMACSR_C_CTRL_DIS;
1774 }
1775 if (dma->srcu == 0 && (dev->flags & HIFN_FLAG_SRC_BUSY)) {
1776 dev->flags &= ~HIFN_FLAG_SRC_BUSY;
1777 r |= HIFN_DMACSR_S_CTRL_DIS;
1778 }
1779 if (dma->dstu == 0 && (dev->flags & HIFN_FLAG_DST_BUSY)) {
1780 dev->flags &= ~HIFN_FLAG_DST_BUSY;
1781 r |= HIFN_DMACSR_D_CTRL_DIS;
1782 }
1783 if (dma->resu == 0 && (dev->flags & HIFN_FLAG_RES_BUSY)) {
1784 dev->flags &= ~HIFN_FLAG_RES_BUSY;
1785 r |= HIFN_DMACSR_R_CTRL_DIS;
1786 }
1787 if (r)
1788 hifn_write_1(dev, HIFN_1_DMA_CSR, r);
1789 } else
1790 dev->active--;
1791
1792 if (dev->prev_success == dev->success && dev->started)
1793 reset = 1;
1794 dev->prev_success = dev->success;
1795 spin_unlock_irqrestore(&dev->lock, flags);
1796
1797 if (reset) {
1798 dprintk("%s: r: %08x, active: %d, started: %d, "
1799 "success: %lu: reset: %d.\n",
1800 dev->name, r, dev->active, dev->started,
1801 dev->success, reset);
1802
1803 if (++dev->reset >= 5) {
1804 dprintk("%s: really hard reset.\n", dev->name);
1805 hifn_reset_dma(dev, 1);
1806 hifn_stop_device(dev);
1807 hifn_start_device(dev);
1808 dev->reset = 0;
1809 }
1810
1811 spin_lock_irqsave(&dev->lock, flags);
1812 hifn_check_for_completion(dev, -EBUSY);
1813 hifn_clear_rings(dev);
1814 dev->started = 0;
1815 spin_unlock_irqrestore(&dev->lock, flags);
1816 }
1817
1818 schedule_delayed_work(&dev->work, HZ);
1819}
1820
1821static irqreturn_t hifn_interrupt(int irq, void *data)
1822{
1823 struct hifn_device *dev = (struct hifn_device *)data;
1824 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1825 u32 dmacsr, restart;
1826
1827 dmacsr = hifn_read_1(dev, HIFN_1_DMA_CSR);
1828
1829 dprintk("%s: 1 dmacsr: %08x, dmareg: %08x, res: %08x [%d], "
1830 "i: %d.%d.%d.%d, u: %d.%d.%d.%d.\n",
1831 dev->name, dmacsr, dev->dmareg, dmacsr & dev->dmareg, dma->cmdi,
1832 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1833 dma->cmdi, dma->srci, dma->dsti, dma->resi);
1834
1835 if ((dmacsr & dev->dmareg) == 0)
1836 return IRQ_NONE;
1837
1838 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & dev->dmareg);
1839
1840 if (dmacsr & HIFN_DMACSR_ENGINE)
1841 hifn_write_0(dev, HIFN_0_PUISR, hifn_read_0(dev, HIFN_0_PUISR));
1842 if (dmacsr & HIFN_DMACSR_PUBDONE)
1843 hifn_write_1(dev, HIFN_1_PUB_STATUS,
1844 hifn_read_1(dev, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1845
1846 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1847 if (restart) {
1848 u32 puisr = hifn_read_0(dev, HIFN_0_PUISR);
1849
1850 if (printk_ratelimit())
1851 printk("%s: overflow: r: %d, d: %d, puisr: %08x, d: %u.\n",
1852 dev->name, !!(dmacsr & HIFN_DMACSR_R_OVER),
1853 !!(dmacsr & HIFN_DMACSR_D_OVER),
1854 puisr, !!(puisr & HIFN_PUISR_DSTOVER));
1855 if (!!(puisr & HIFN_PUISR_DSTOVER))
1856 hifn_write_0(dev, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1857 hifn_write_1(dev, HIFN_1_DMA_CSR, dmacsr & (HIFN_DMACSR_R_OVER |
1858 HIFN_DMACSR_D_OVER));
1859 }
1860
1861 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1862 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1863 if (restart) {
1864 if (printk_ratelimit())
1865 printk("%s: abort: c: %d, s: %d, d: %d, r: %d.\n",
1866 dev->name, !!(dmacsr & HIFN_DMACSR_C_ABORT),
1867 !!(dmacsr & HIFN_DMACSR_S_ABORT),
1868 !!(dmacsr & HIFN_DMACSR_D_ABORT),
1869 !!(dmacsr & HIFN_DMACSR_R_ABORT));
1870 hifn_reset_dma(dev, 1);
1871 hifn_init_dma(dev);
1872 hifn_init_registers(dev);
1873 }
1874
1875 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
1876 dprintk("%s: wait on command.\n", dev->name);
1877 dev->dmareg &= ~(HIFN_DMAIER_C_WAIT);
1878 hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg);
1879 }
1880
1881 hifn_check_for_completion(dev, 0);
1882 hifn_clear_rings(dev);
1883
1884 return IRQ_HANDLED;
1885}
1886
1887static void hifn_flush(struct hifn_device *dev)
1888{
1889 unsigned long flags;
1890 struct crypto_async_request *async_req;
1891 struct hifn_context *ctx;
1892 struct ablkcipher_request *req;
1893 struct hifn_dma *dma = (struct hifn_dma *)dev->desc_virt;
1894 int i;
1895
1896 spin_lock_irqsave(&dev->lock, flags);
1897 for (i=0; i<HIFN_D_RES_RSIZE; ++i) {
1898 struct hifn_desc *d = &dma->resr[i];
1899
1900 if (dev->sa[i]) {
1901 hifn_process_ready(dev->sa[i],
1902 (d->l & __cpu_to_le32(HIFN_D_VALID))?-ENODEV:0);
1903 }
1904 }
1905
1906 while ((async_req = crypto_dequeue_request(&dev->queue))) {
1907 ctx = crypto_tfm_ctx(async_req->tfm);
1908 req = container_of(async_req, struct ablkcipher_request, base);
1909
1910 hifn_process_ready(req, -ENODEV);
1911 }
1912 spin_unlock_irqrestore(&dev->lock, flags);
1913}
1914
1915static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1916 unsigned int len)
1917{
1918 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1919 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
1920 struct hifn_device *dev = ctx->dev;
1921
1922 if (len > HIFN_MAX_CRYPT_KEY_LENGTH) {
1923 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1924 return -1;
1925 }
1926
1927 dev->flags &= ~HIFN_FLAG_OLD_KEY;
1928
1929 memcpy(ctx->key, key, len);
1930 ctx->keysize = len;
1931
1932 return 0;
1933}
1934
1935static int hifn_handle_req(struct ablkcipher_request *req)
1936{
1937 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1938 struct hifn_device *dev = ctx->dev;
1939 int err = -EAGAIN;
1940
1941 if (dev->started + DIV_ROUND_UP(req->nbytes, PAGE_SIZE) <= HIFN_QUEUE_LENGTH)
1942 err = hifn_setup_session(req);
1943
1944 if (err == -EAGAIN) {
1945 unsigned long flags;
1946
1947 spin_lock_irqsave(&dev->lock, flags);
1948 err = ablkcipher_enqueue_request(&dev->queue, req);
1949 spin_unlock_irqrestore(&dev->lock, flags);
1950 }
1951
1952 return err;
1953}
1954
1955static int hifn_setup_crypto_req(struct ablkcipher_request *req, u8 op,
1956 u8 type, u8 mode)
1957{
1958 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
1959 unsigned ivsize;
1960
1961 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1962
1963 if (req->info && mode != ACRYPTO_MODE_ECB) {
1964 if (type == ACRYPTO_TYPE_AES_128)
1965 ivsize = HIFN_AES_IV_LENGTH;
1966 else if (type == ACRYPTO_TYPE_DES)
1967 ivsize = HIFN_DES_KEY_LENGTH;
1968 else if (type == ACRYPTO_TYPE_3DES)
1969 ivsize = HIFN_3DES_KEY_LENGTH;
1970 }
1971
1972 if (ctx->keysize != 16 && type == ACRYPTO_TYPE_AES_128) {
1973 if (ctx->keysize == 24)
1974 type = ACRYPTO_TYPE_AES_192;
1975 else if (ctx->keysize == 32)
1976 type = ACRYPTO_TYPE_AES_256;
1977 }
1978
1979 ctx->op = op;
1980 ctx->mode = mode;
1981 ctx->type = type;
1982 ctx->iv = req->info;
1983 ctx->ivsize = ivsize;
1984
1985 /*
1986 * HEAVY TODO: needs to kick Herbert XU to write documentation.
1987 * HEAVY TODO: needs to kick Herbert XU to write documentation.
1988 * HEAVY TODO: needs to kick Herbert XU to write documentation.
1989 */
1990
1991 return hifn_handle_req(req);
1992}
1993
1994static int hifn_process_queue(struct hifn_device *dev)
1995{
1996 struct crypto_async_request *async_req;
1997 struct hifn_context *ctx;
1998 struct ablkcipher_request *req;
1999 unsigned long flags;
2000 int err = 0;
2001
2002 while (dev->started < HIFN_QUEUE_LENGTH) {
2003 spin_lock_irqsave(&dev->lock, flags);
2004 async_req = crypto_dequeue_request(&dev->queue);
2005 spin_unlock_irqrestore(&dev->lock, flags);
2006
2007 if (!async_req)
2008 break;
2009
2010 ctx = crypto_tfm_ctx(async_req->tfm);
2011 req = container_of(async_req, struct ablkcipher_request, base);
2012
2013 err = hifn_handle_req(req);
2014 if (err)
2015 break;
2016 }
2017
2018 return err;
2019}
2020
2021static int hifn_setup_crypto(struct ablkcipher_request *req, u8 op,
2022 u8 type, u8 mode)
2023{
2024 int err;
2025 struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
2026 struct hifn_device *dev = ctx->dev;
2027
2028 err = hifn_setup_crypto_req(req, op, type, mode);
2029 if (err)
2030 return err;
2031
2032 if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen)
2033 err = hifn_process_queue(dev);
2034
2035 return err;
2036}
2037
2038/*
2039 * AES ecryption functions.
2040 */
2041static inline int hifn_encrypt_aes_ecb(struct ablkcipher_request *req)
2042{
2043 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2044 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2045}
2046static inline int hifn_encrypt_aes_cbc(struct ablkcipher_request *req)
2047{
2048 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2049 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2050}
2051static inline int hifn_encrypt_aes_cfb(struct ablkcipher_request *req)
2052{
2053 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2054 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2055}
2056static inline int hifn_encrypt_aes_ofb(struct ablkcipher_request *req)
2057{
2058 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2059 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2060}
2061
2062/*
2063 * AES decryption functions.
2064 */
2065static inline int hifn_decrypt_aes_ecb(struct ablkcipher_request *req)
2066{
2067 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2068 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_ECB);
2069}
2070static inline int hifn_decrypt_aes_cbc(struct ablkcipher_request *req)
2071{
2072 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2073 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
2074}
2075static inline int hifn_decrypt_aes_cfb(struct ablkcipher_request *req)
2076{
2077 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2078 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
2079}
2080static inline int hifn_decrypt_aes_ofb(struct ablkcipher_request *req)
2081{
2082 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2083 ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
2084}
2085
2086/*
2087 * DES ecryption functions.
2088 */
2089static inline int hifn_encrypt_des_ecb(struct ablkcipher_request *req)
2090{
2091 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2092 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2093}
2094static inline int hifn_encrypt_des_cbc(struct ablkcipher_request *req)
2095{
2096 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2097 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2098}
2099static inline int hifn_encrypt_des_cfb(struct ablkcipher_request *req)
2100{
2101 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2102 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2103}
2104static inline int hifn_encrypt_des_ofb(struct ablkcipher_request *req)
2105{
2106 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2107 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2108}
2109
2110/*
2111 * DES decryption functions.
2112 */
2113static inline int hifn_decrypt_des_ecb(struct ablkcipher_request *req)
2114{
2115 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2116 ACRYPTO_TYPE_DES, ACRYPTO_MODE_ECB);
2117}
2118static inline int hifn_decrypt_des_cbc(struct ablkcipher_request *req)
2119{
2120 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2121 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
2122}
2123static inline int hifn_decrypt_des_cfb(struct ablkcipher_request *req)
2124{
2125 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2126 ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
2127}
2128static inline int hifn_decrypt_des_ofb(struct ablkcipher_request *req)
2129{
2130 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2131 ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
2132}
2133
2134/*
2135 * 3DES ecryption functions.
2136 */
2137static inline int hifn_encrypt_3des_ecb(struct ablkcipher_request *req)
2138{
2139 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2140 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2141}
2142static inline int hifn_encrypt_3des_cbc(struct ablkcipher_request *req)
2143{
2144 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2145 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2146}
2147static inline int hifn_encrypt_3des_cfb(struct ablkcipher_request *req)
2148{
2149 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2150 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2151}
2152static inline int hifn_encrypt_3des_ofb(struct ablkcipher_request *req)
2153{
2154 return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
2155 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2156}
2157
2158/*
2159 * 3DES decryption functions.
2160 */
2161static inline int hifn_decrypt_3des_ecb(struct ablkcipher_request *req)
2162{
2163 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2164 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_ECB);
2165}
2166static inline int hifn_decrypt_3des_cbc(struct ablkcipher_request *req)
2167{
2168 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2169 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
2170}
2171static inline int hifn_decrypt_3des_cfb(struct ablkcipher_request *req)
2172{
2173 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2174 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
2175}
2176static inline int hifn_decrypt_3des_ofb(struct ablkcipher_request *req)
2177{
2178 return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
2179 ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
2180}
2181
2182struct hifn_alg_template
2183{
2184 char name[CRYPTO_MAX_ALG_NAME];
2185 char drv_name[CRYPTO_MAX_ALG_NAME];
2186 unsigned int bsize;
2187 struct ablkcipher_alg ablkcipher;
2188};
2189
2190static struct hifn_alg_template hifn_alg_templates[] = {
2191 /*
2192 * 3DES ECB, CBC, CFB and OFB modes.
2193 */
2194 {
2195 .name = "cfb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2196 .ablkcipher = {
2197 .min_keysize = HIFN_3DES_KEY_LENGTH,
2198 .max_keysize = HIFN_3DES_KEY_LENGTH,
2199 .setkey = hifn_setkey,
2200 .encrypt = hifn_encrypt_3des_cfb,
2201 .decrypt = hifn_decrypt_3des_cfb,
2202 },
2203 },
2204 {
2205 .name = "ofb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2206 .ablkcipher = {
2207 .min_keysize = HIFN_3DES_KEY_LENGTH,
2208 .max_keysize = HIFN_3DES_KEY_LENGTH,
2209 .setkey = hifn_setkey,
2210 .encrypt = hifn_encrypt_3des_ofb,
2211 .decrypt = hifn_decrypt_3des_ofb,
2212 },
2213 },
2214 {
2215 .name = "cbc(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2216 .ablkcipher = {
2217 .min_keysize = HIFN_3DES_KEY_LENGTH,
2218 .max_keysize = HIFN_3DES_KEY_LENGTH,
2219 .setkey = hifn_setkey,
2220 .encrypt = hifn_encrypt_3des_cbc,
2221 .decrypt = hifn_decrypt_3des_cbc,
2222 },
2223 },
2224 {
2225 .name = "ecb(des3_ede)", .drv_name = "hifn-3des", .bsize = 8,
2226 .ablkcipher = {
2227 .min_keysize = HIFN_3DES_KEY_LENGTH,
2228 .max_keysize = HIFN_3DES_KEY_LENGTH,
2229 .setkey = hifn_setkey,
2230 .encrypt = hifn_encrypt_3des_ecb,
2231 .decrypt = hifn_decrypt_3des_ecb,
2232 },
2233 },
2234
2235 /*
2236 * DES ECB, CBC, CFB and OFB modes.
2237 */
2238 {
2239 .name = "cfb(des)", .drv_name = "hifn-des", .bsize = 8,
2240 .ablkcipher = {
2241 .min_keysize = HIFN_DES_KEY_LENGTH,
2242 .max_keysize = HIFN_DES_KEY_LENGTH,
2243 .setkey = hifn_setkey,
2244 .encrypt = hifn_encrypt_des_cfb,
2245 .decrypt = hifn_decrypt_des_cfb,
2246 },
2247 },
2248 {
2249 .name = "ofb(des)", .drv_name = "hifn-des", .bsize = 8,
2250 .ablkcipher = {
2251 .min_keysize = HIFN_DES_KEY_LENGTH,
2252 .max_keysize = HIFN_DES_KEY_LENGTH,
2253 .setkey = hifn_setkey,
2254 .encrypt = hifn_encrypt_des_ofb,
2255 .decrypt = hifn_decrypt_des_ofb,
2256 },
2257 },
2258 {
2259 .name = "cbc(des)", .drv_name = "hifn-des", .bsize = 8,
2260 .ablkcipher = {
2261 .min_keysize = HIFN_DES_KEY_LENGTH,
2262 .max_keysize = HIFN_DES_KEY_LENGTH,
2263 .setkey = hifn_setkey,
2264 .encrypt = hifn_encrypt_des_cbc,
2265 .decrypt = hifn_decrypt_des_cbc,
2266 },
2267 },
2268 {
2269 .name = "ecb(des)", .drv_name = "hifn-des", .bsize = 8,
2270 .ablkcipher = {
2271 .min_keysize = HIFN_DES_KEY_LENGTH,
2272 .max_keysize = HIFN_DES_KEY_LENGTH,
2273 .setkey = hifn_setkey,
2274 .encrypt = hifn_encrypt_des_ecb,
2275 .decrypt = hifn_decrypt_des_ecb,
2276 },
2277 },
2278
2279 /*
2280 * AES ECB, CBC, CFB and OFB modes.
2281 */
2282 {
2283 .name = "ecb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2284 .ablkcipher = {
2285 .min_keysize = AES_MIN_KEY_SIZE,
2286 .max_keysize = AES_MAX_KEY_SIZE,
2287 .setkey = hifn_setkey,
2288 .encrypt = hifn_encrypt_aes_ecb,
2289 .decrypt = hifn_decrypt_aes_ecb,
2290 },
2291 },
2292 {
2293 .name = "cbc(aes)", .drv_name = "hifn-aes", .bsize = 16,
2294 .ablkcipher = {
2295 .min_keysize = AES_MIN_KEY_SIZE,
2296 .max_keysize = AES_MAX_KEY_SIZE,
2297 .setkey = hifn_setkey,
2298 .encrypt = hifn_encrypt_aes_cbc,
2299 .decrypt = hifn_decrypt_aes_cbc,
2300 },
2301 },
2302 {
2303 .name = "cfb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2304 .ablkcipher = {
2305 .min_keysize = AES_MIN_KEY_SIZE,
2306 .max_keysize = AES_MAX_KEY_SIZE,
2307 .setkey = hifn_setkey,
2308 .encrypt = hifn_encrypt_aes_cfb,
2309 .decrypt = hifn_decrypt_aes_cfb,
2310 },
2311 },
2312 {
2313 .name = "ofb(aes)", .drv_name = "hifn-aes", .bsize = 16,
2314 .ablkcipher = {
2315 .min_keysize = AES_MIN_KEY_SIZE,
2316 .max_keysize = AES_MAX_KEY_SIZE,
2317 .setkey = hifn_setkey,
2318 .encrypt = hifn_encrypt_aes_ofb,
2319 .decrypt = hifn_decrypt_aes_ofb,
2320 },
2321 },
2322};
2323
2324static int hifn_cra_init(struct crypto_tfm *tfm)
2325{
2326 struct crypto_alg *alg = tfm->__crt_alg;
2327 struct hifn_crypto_alg *ha = crypto_alg_to_hifn(alg);
2328 struct hifn_context *ctx = crypto_tfm_ctx(tfm);
2329
2330 ctx->dev = ha->dev;
2331
2332 return 0;
2333}
2334
2335static int hifn_alg_alloc(struct hifn_device *dev, struct hifn_alg_template *t)
2336{
2337 struct hifn_crypto_alg *alg;
2338 int err;
2339
2340 alg = kzalloc(sizeof(struct hifn_crypto_alg), GFP_KERNEL);
2341 if (!alg)
2342 return -ENOMEM;
2343
2344 snprintf(alg->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
2345 snprintf(alg->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", t->drv_name);
2346
2347 alg->alg.cra_priority = 300;
2348 alg->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC;
2349 alg->alg.cra_blocksize = t->bsize;
2350 alg->alg.cra_ctxsize = sizeof(struct hifn_context);
2351 alg->alg.cra_alignmask = 15;
2352 if (t->bsize == 8)
2353 alg->alg.cra_alignmask = 3;
2354 alg->alg.cra_type = &crypto_ablkcipher_type;
2355 alg->alg.cra_module = THIS_MODULE;
2356 alg->alg.cra_u.ablkcipher = t->ablkcipher;
2357 alg->alg.cra_init = hifn_cra_init;
2358
2359 alg->dev = dev;
2360
2361 list_add_tail(&alg->entry, &dev->alg_list);
2362
2363 err = crypto_register_alg(&alg->alg);
2364 if (err) {
2365 list_del(&alg->entry);
2366 kfree(alg);
2367 }
2368
2369 return err;
2370}
2371
2372static void hifn_unregister_alg(struct hifn_device *dev)
2373{
2374 struct hifn_crypto_alg *a, *n;
2375
2376 list_for_each_entry_safe(a, n, &dev->alg_list, entry) {
2377 list_del(&a->entry);
2378 crypto_unregister_alg(&a->alg);
2379 kfree(a);
2380 }
2381}
2382
2383static int hifn_register_alg(struct hifn_device *dev)
2384{
2385 int i, err;
2386
2387 for (i=0; i<ARRAY_SIZE(hifn_alg_templates); ++i) {
2388 err = hifn_alg_alloc(dev, &hifn_alg_templates[i]);
2389 if (err)
2390 goto err_out_exit;
2391 }
2392
2393 return 0;
2394
2395err_out_exit:
2396 hifn_unregister_alg(dev);
2397 return err;
2398}
2399
2400static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2401{
2402 int err, i;
2403 struct hifn_device *dev;
2404 char name[8];
2405
2406 err = pci_enable_device(pdev);
2407 if (err)
2408 return err;
2409 pci_set_master(pdev);
2410
2411 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2412 if (err)
2413 goto err_out_disable_pci_device;
2414
2415 snprintf(name, sizeof(name), "hifn%d",
2416 atomic_inc_return(&hifn_dev_number)-1);
2417
2418 err = pci_request_regions(pdev, name);
2419 if (err)
2420 goto err_out_disable_pci_device;
2421
2422 if (pci_resource_len(pdev, 0) < HIFN_BAR0_SIZE ||
2423 pci_resource_len(pdev, 1) < HIFN_BAR1_SIZE ||
2424 pci_resource_len(pdev, 2) < HIFN_BAR2_SIZE) {
2425 dprintk("%s: Broken hardware - I/O regions are too small.\n",
2426 pci_name(pdev));
2427 err = -ENODEV;
2428 goto err_out_free_regions;
2429 }
2430
2431 dev = kzalloc(sizeof(struct hifn_device) + sizeof(struct crypto_alg),
2432 GFP_KERNEL);
2433 if (!dev) {
2434 err = -ENOMEM;
2435 goto err_out_free_regions;
2436 }
2437
2438 INIT_LIST_HEAD(&dev->alg_list);
2439
2440 snprintf(dev->name, sizeof(dev->name), "%s", name);
2441 spin_lock_init(&dev->lock);
2442
2443 for (i=0; i<3; ++i) {
2444 unsigned long addr, size;
2445
2446 addr = pci_resource_start(pdev, i);
2447 size = pci_resource_len(pdev, i);
2448
2449 dev->bar[i] = ioremap_nocache(addr, size);
2450 if (!dev->bar[i])
2451 goto err_out_unmap_bars;
2452 }
2453
2454 dev->result_mem = __get_free_pages(GFP_KERNEL, HIFN_MAX_RESULT_ORDER);
2455 if (!dev->result_mem) {
2456 dprintk("Failed to allocate %d pages for result_mem.\n",
2457 HIFN_MAX_RESULT_ORDER);
2458 goto err_out_unmap_bars;
2459 }
2460 memset((void *)dev->result_mem, 0, PAGE_SIZE*(1<<HIFN_MAX_RESULT_ORDER));
2461
2462 dev->dst = pci_map_single(pdev, (void *)dev->result_mem,
2463 PAGE_SIZE << HIFN_MAX_RESULT_ORDER, PCI_DMA_FROMDEVICE);
2464
2465 dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
2466 &dev->desc_dma);
2467 if (!dev->desc_virt) {
2468 dprintk("Failed to allocate descriptor rings.\n");
2469 goto err_out_free_result_pages;
2470 }
2471 memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
2472
2473 dev->pdev = pdev;
2474 dev->irq = pdev->irq;
2475
2476 for (i=0; i<HIFN_D_RES_RSIZE; ++i)
2477 dev->sa[i] = NULL;
2478
2479 pci_set_drvdata(pdev, dev);
2480
2481 crypto_init_queue(&dev->queue, 1);
2482
2483 err = request_irq(dev->irq, hifn_interrupt, IRQF_SHARED, dev->name, dev);
2484 if (err) {
2485 dprintk("Failed to request IRQ%d: err: %d.\n", dev->irq, err);
2486 dev->irq = 0;
2487 goto err_out_free_desc;
2488 }
2489
2490 err = hifn_start_device(dev);
2491 if (err)
2492 goto err_out_free_irq;
2493
2494 err = hifn_test(dev, 1, 0);
2495 if (err)
2496 goto err_out_stop_device;
2497
2498 err = hifn_register_alg(dev);
2499 if (err)
2500 goto err_out_stop_device;
2501
2502 INIT_DELAYED_WORK(&dev->work, hifn_work);
2503 schedule_delayed_work(&dev->work, HZ);
2504
2505 dprintk("HIFN crypto accelerator card at %s has been "
2506 "successfully registered as %s.\n",
2507 pci_name(pdev), dev->name);
2508
2509 return 0;
2510
2511err_out_stop_device:
2512 hifn_reset_dma(dev, 1);
2513 hifn_stop_device(dev);
2514err_out_free_irq:
2515 free_irq(dev->irq, dev->name);
2516err_out_free_desc:
2517 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2518 dev->desc_virt, dev->desc_dma);
2519
2520err_out_free_result_pages:
2521 pci_unmap_single(pdev, dev->dst, PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2522 PCI_DMA_FROMDEVICE);
2523 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2524
2525err_out_unmap_bars:
2526 for (i=0; i<3; ++i)
2527 if (dev->bar[i])
2528 iounmap(dev->bar[i]);
2529
2530err_out_free_regions:
2531 pci_release_regions(pdev);
2532
2533err_out_disable_pci_device:
2534 pci_disable_device(pdev);
2535
2536 return err;
2537}
2538
2539static void hifn_remove(struct pci_dev *pdev)
2540{
2541 int i;
2542 struct hifn_device *dev;
2543
2544 dev = pci_get_drvdata(pdev);
2545
2546 if (dev) {
2547 cancel_delayed_work(&dev->work);
2548 flush_scheduled_work();
2549
2550 hifn_unregister_alg(dev);
2551 hifn_reset_dma(dev, 1);
2552 hifn_stop_device(dev);
2553
2554 free_irq(dev->irq, dev->name);
2555
2556 hifn_flush(dev);
2557
2558 pci_free_consistent(pdev, sizeof(struct hifn_dma),
2559 dev->desc_virt, dev->desc_dma);
2560 pci_unmap_single(pdev, dev->dst,
2561 PAGE_SIZE << HIFN_MAX_RESULT_ORDER,
2562 PCI_DMA_FROMDEVICE);
2563 free_pages(dev->result_mem, HIFN_MAX_RESULT_ORDER);
2564 for (i=0; i<3; ++i)
2565 if (dev->bar[i])
2566 iounmap(dev->bar[i]);
2567
2568 kfree(dev);
2569 }
2570
2571 pci_release_regions(pdev);
2572 pci_disable_device(pdev);
2573}
2574
2575static struct pci_device_id hifn_pci_tbl[] = {
2576 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7955) },
2577 { PCI_DEVICE(PCI_VENDOR_ID_HIFN, PCI_DEVICE_ID_HIFN_7956) },
2578 { 0 }
2579};
2580MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
2581
2582static struct pci_driver hifn_pci_driver = {
2583 .name = "hifn795x",
2584 .id_table = hifn_pci_tbl,
2585 .probe = hifn_probe,
2586 .remove = __devexit_p(hifn_remove),
2587};
2588
2589static int __devinit hifn_init(void)
2590{
2591 int err;
2592
2593 err = pci_register_driver(&hifn_pci_driver);
2594 if (err < 0) {
2595 dprintk("Failed to register PCI driver for %s device.\n",
2596 hifn_pci_driver.name);
2597 return -ENODEV;
2598 }
2599
2600 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2601 "has been successfully registered.\n");
2602
2603 return 0;
2604}
2605
2606static void __devexit hifn_fini(void)
2607{
2608 pci_unregister_driver(&hifn_pci_driver);
2609
2610 printk(KERN_INFO "Driver for HIFN 795x crypto accelerator chip "
2611 "has been successfully unregistered.\n");
2612}
2613
2614module_init(hifn_init);
2615module_exit(hifn_fini);
2616
2617MODULE_LICENSE("GPL");
2618MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
2619MODULE_DESCRIPTION("Driver for HIFN 795x crypto accelerator chip.");