summaryrefslogtreecommitdiff
path: root/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.h
blob: a02d059fb652d2f8b8eb9472d022aa7664c21b64 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
/* SPDX-License-Identifier: GPL-2.0
 * Marvell OcteonTX CPT driver
 *
 * Copyright (C) 2019 Marvell International Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#ifndef __OTX_CPTVF_REQUEST_MANAGER_H
#define __OTX_CPTVF_REQUEST_MANAGER_H

#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/pci.h>
#include "otx_cpt_hw_types.h"

/*
 * Maximum total number of SG buffers is 100, we divide it equally
 * between input and output
 */
#define OTX_CPT_MAX_SG_IN_CNT		50
#define OTX_CPT_MAX_SG_OUT_CNT		50

/* DMA mode direct or SG */
#define OTX_CPT_DMA_DIRECT_DIRECT	0
#define OTX_CPT_DMA_GATHER_SCATTER	1

/* Context source CPTR or DPTR */
#define OTX_CPT_FROM_CPTR		0
#define OTX_CPT_FROM_DPTR		1

/* CPT instruction queue alignment */
#define OTX_CPT_INST_Q_ALIGNMENT	128
#define OTX_CPT_MAX_REQ_SIZE		65535

/* Default command timeout in seconds */
#define OTX_CPT_COMMAND_TIMEOUT		4
#define OTX_CPT_TIMER_HOLD		0x03F
#define OTX_CPT_COUNT_HOLD		32
#define OTX_CPT_TIME_IN_RESET_COUNT     5

/* Minimum and maximum values for interrupt coalescing */
#define OTX_CPT_COALESC_MIN_TIME_WAIT	0x0
#define OTX_CPT_COALESC_MAX_TIME_WAIT	((1<<16)-1)
#define OTX_CPT_COALESC_MIN_NUM_WAIT	0x0
#define OTX_CPT_COALESC_MAX_NUM_WAIT	((1<<20)-1)

union otx_cpt_opcode_info {
	u16 flags;
	struct {
		u8 major;
		u8 minor;
	} s;
};

struct otx_cptvf_request {
	u32 param1;
	u32 param2;
	u16 dlen;
	union otx_cpt_opcode_info opcode;
};

struct otx_cpt_buf_ptr {
	u8 *vptr;
	dma_addr_t dma_addr;
	u16 size;
};

union otx_cpt_ctrl_info {
	u32 flags;
	struct {
#if defined(__BIG_ENDIAN_BITFIELD)
		u32 reserved0:26;
		u32 grp:3;	/* Group bits */
		u32 dma_mode:2;	/* DMA mode */
		u32 se_req:1;	/* To SE core */
#else
		u32 se_req:1;	/* To SE core */
		u32 dma_mode:2;	/* DMA mode */
		u32 grp:3;	/* Group bits */
		u32 reserved0:26;
#endif
	} s;
};

/*
 * CPT_INST_S software command definitions
 * Words EI (0-3)
 */
union otx_cpt_iq_cmd_word0 {
	u64 u64;
	struct {
		__be16 opcode;
		__be16 param1;
		__be16 param2;
		__be16 dlen;
	} s;
};

union otx_cpt_iq_cmd_word3 {
	u64 u64;
	struct {
#if defined(__BIG_ENDIAN_BITFIELD)
		u64 grp:3;
		u64 cptr:61;
#else
		u64 cptr:61;
		u64 grp:3;
#endif
	} s;
};

struct otx_cpt_iq_cmd {
	union otx_cpt_iq_cmd_word0 cmd;
	u64 dptr;
	u64 rptr;
	union otx_cpt_iq_cmd_word3 cptr;
};

struct otx_cpt_sglist_component {
	union {
		u64 len;
		struct {
			__be16 len0;
			__be16 len1;
			__be16 len2;
			__be16 len3;
		} s;
	} u;
	__be64 ptr0;
	__be64 ptr1;
	__be64 ptr2;
	__be64 ptr3;
};

struct otx_cpt_pending_entry {
	u64 *completion_addr;	/* Completion address */
	struct otx_cpt_info_buffer *info;
	/* Kernel async request callback */
	void (*callback)(int status, void *arg1, void *arg2);
	struct crypto_async_request *areq; /* Async request callback arg */
	u8 resume_sender;	/* Notify sender to resume sending requests */
	u8 busy;		/* Entry status (free/busy) */
};

struct otx_cpt_pending_queue {
	struct otx_cpt_pending_entry *head;	/* Head of the queue */
	u32 front;			/* Process work from here */
	u32 rear;			/* Append new work here */
	u32 pending_count;		/* Pending requests count */
	u32 qlen;			/* Queue length */
	spinlock_t lock;		/* Queue lock */
};

struct otx_cpt_req_info {
	/* Kernel async request callback */
	void (*callback)(int status, void *arg1, void *arg2);
	struct crypto_async_request *areq; /* Async request callback arg */
	struct otx_cptvf_request req;/* Request information (core specific) */
	union otx_cpt_ctrl_info ctrl;/* User control information */
	struct otx_cpt_buf_ptr in[OTX_CPT_MAX_SG_IN_CNT];
	struct otx_cpt_buf_ptr out[OTX_CPT_MAX_SG_OUT_CNT];
	u8 *iv_out;     /* IV to send back */
	u16 rlen;	/* Output length */
	u8 incnt;	/* Number of input buffers */
	u8 outcnt;	/* Number of output buffers */
	u8 req_type;	/* Type of request */
	u8 is_enc;	/* Is a request an encryption request */
	u8 is_trunc_hmac;/* Is truncated hmac used */
};

struct otx_cpt_info_buffer {
	struct otx_cpt_pending_entry *pentry;
	struct otx_cpt_req_info *req;
	struct pci_dev *pdev;
	u64 *completion_addr;
	u8 *out_buffer;
	u8 *in_buffer;
	dma_addr_t dptr_baddr;
	dma_addr_t rptr_baddr;
	dma_addr_t comp_baddr;
	unsigned long time_in;
	u32 dlen;
	u32 dma_len;
	u8 extra_time;
};

static inline void do_request_cleanup(struct pci_dev *pdev,
				      struct otx_cpt_info_buffer *info)
{
	struct otx_cpt_req_info *req;
	int i;

	if (info->dptr_baddr)
		dma_unmap_single(&pdev->dev, info->dptr_baddr,
				 info->dma_len, DMA_BIDIRECTIONAL);

	if (info->req) {
		req = info->req;
		for (i = 0; i < req->outcnt; i++) {
			if (req->out[i].dma_addr)
				dma_unmap_single(&pdev->dev,
						 req->out[i].dma_addr,
						 req->out[i].size,
						 DMA_BIDIRECTIONAL);
		}

		for (i = 0; i < req->incnt; i++) {
			if (req->in[i].dma_addr)
				dma_unmap_single(&pdev->dev,
						 req->in[i].dma_addr,
						 req->in[i].size,
						 DMA_BIDIRECTIONAL);
		}
	}
	kfree_sensitive(info);
}

struct otx_cptvf_wqe;
void otx_cpt_dump_sg_list(struct pci_dev *pdev, struct otx_cpt_req_info *req);
void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req,
		       int cpu_num);

#endif /* __OTX_CPTVF_REQUEST_MANAGER_H */