summaryrefslogtreecommitdiff
path: root/net/sunrpc/auth_gss/gss_krb5_crypto.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/auth_gss/gss_krb5_crypto.c')
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c64
1 files changed, 55 insertions, 9 deletions
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 1d52308ca324..d0f3371c62e2 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -83,8 +83,6 @@ out:
return ret;
}
-EXPORT_SYMBOL(krb5_encrypt);
-
u32
krb5_decrypt(
struct crypto_blkcipher *tfm,
@@ -118,8 +116,6 @@ out:
return ret;
}
-EXPORT_SYMBOL(krb5_decrypt);
-
static int
checksummer(struct scatterlist *sg, void *data)
{
@@ -161,8 +157,6 @@ out:
return err ? GSS_S_FAILURE : 0;
}
-EXPORT_SYMBOL(make_checksum);
-
struct encryptor_desc {
u8 iv[8]; /* XXX hard-coded blocksize */
struct blkcipher_desc desc;
@@ -262,8 +256,6 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
return ret;
}
-EXPORT_SYMBOL(gss_encrypt_xdr_buf);
-
struct decryptor_desc {
u8 iv[8]; /* XXX hard-coded blocksize */
struct blkcipher_desc desc;
@@ -335,4 +327,58 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
}
-EXPORT_SYMBOL(gss_decrypt_xdr_buf);
+/*
+ * This function makes the assumption that it was ultimately called
+ * from gss_wrap().
+ *
+ * The client auth_gss code moves any existing tail data into a
+ * separate page before calling gss_wrap.
+ * The server svcauth_gss code ensures that both the head and the
+ * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
+ *
+ * Even with that guarantee, this function may be called more than
+ * once in the processing of gss_wrap(). The best we can do is
+ * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
+ * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
+ * At run-time we can verify that a single invocation of this
+ * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
+ */
+
+int
+shift_head_data(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
+{
+ u8 *p;
+
+ if (shiftlen == 0)
+ return 0;
+
+ GSS_KRB5_SLACK_CHECK;
+ BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
+
+ /*
+ * If there is a tail, and it shares a page with the head,
+ * make sure we don't clobber the tail. This is a just a
+ * defensive check.
+ */
+ if (buf->tail[0].iov_base != NULL) {
+ if ((((long)buf->tail[0].iov_base >> PAGE_CACHE_SHIFT) ==
+ ((long)buf->head[0].iov_base >> PAGE_CACHE_SHIFT)) &&
+ buf->tail[0].iov_base - buf->head[0].iov_base < shiftlen) {
+ dprintk("%s: collision: head %p:%zu, tail %p:%zu, "
+ "shiftlen %u\n",
+ __func__, buf->head[0].iov_base,
+ buf->head[0].iov_len, buf->tail[0].iov_base,
+ buf->tail[0].iov_len, shiftlen);
+ return 1;
+ }
+ }
+
+ p = buf->head[0].iov_base + base;
+
+ memmove(p + shiftlen, p, buf->head[0].iov_len - base);
+
+ buf->head[0].iov_len += shiftlen;
+ buf->len += shiftlen;
+
+ return 0;
+}