gnutls-commit
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[SCM] GNU gnutls branch, master, updated. gnutls_3_0_2-23-g7ae18e4


From: Nikos Mavrogiannopoulos
Subject: [SCM] GNU gnutls branch, master, updated. gnutls_3_0_2-23-g7ae18e4
Date: Wed, 07 Sep 2011 22:06:52 +0000

This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU gnutls".

http://git.savannah.gnu.org/cgit/gnutls.git/commit/?id=7ae18e4644ae251717dd337c75911d652664732e

The branch, master has been updated
       via  7ae18e4644ae251717dd337c75911d652664732e (commit)
       via  1be5f996286ce2d9e8435353e5d1fd3fa1d1f22e (commit)
       via  8f75c7bcfd1d95e511417ea3099635169a40f237 (commit)
       via  45081eb99b07bb20dcf09db2a09af15cae055400 (commit)
       via  6361933cfaeab5bc0a6d36e6a4bd1b114ac96380 (commit)
       via  1b18e21bfbbf75bc4c60b1c56cc41406a075b813 (commit)
       via  a1c641175df81c3d75610e269921414efc1e2927 (commit)
      from  3eb56adf2a2307368489fa8bc48f325e224da4e8 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
-----------------------------------------------------------------------

Summary of changes:
 AUTHORS                                    |    3 +
 NEWS                                       |    4 +
 doc/cha-internals.texi                     |    4 -
 lib/accelerated/accelerated.c              |    1 +
 lib/accelerated/intel/Makefile.am          |    6 +-
 lib/accelerated/intel/aes-gcm-x86.c        |   16 +-
 lib/accelerated/intel/aes-x86.c            |   22 +-
 lib/accelerated/intel/aes-x86.h            |    5 +
 lib/accelerated/intel/asm/padlock-x86-64.s |  723 ++++++++++++++++++++++++
 lib/accelerated/intel/asm/padlock-x86.s    |  822 ++++++++++++++++++++++++++++
 lib/accelerated/intel/padlock.c            |  258 +++++++++
 lib/crypto-api.c                           |   64 ++-
 lib/crypto-backend.h                       |    2 +-
 lib/ext/session_ticket.c                   |    4 +-
 lib/gnutls_cipher_int.c                    |   10 +-
 lib/gnutls_cipher_int.h                    |    4 +-
 lib/gnutls_constate.c                      |    6 +-
 lib/gnutls_extensions.c                    |    1 +
 lib/gnutls_x509.c                          |    3 +-
 lib/includes/gnutls/crypto.h               |    2 +-
 lib/nettle/cipher.c                        |   88 +---
 lib/x509/privkey_pkcs8.c                   |    4 +-
 22 files changed, 1928 insertions(+), 124 deletions(-)
 create mode 100644 lib/accelerated/intel/asm/padlock-x86-64.s
 create mode 100644 lib/accelerated/intel/asm/padlock-x86.s
 create mode 100644 lib/accelerated/intel/padlock.c

diff --git a/AUTHORS b/AUTHORS
index e1d6221..0c9c1cd 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -72,6 +72,9 @@ Initial DTLS implementation.
 Ruslan Ijbulatov (LRN) <lrn1986 [at] gmail.com>
 Win32 patches.
 
+Andy Polyakov <appro [at] openssl.org>
+AES-NI and Padlock assembler code (at lib/accelerated/intel/asm/)
+
 ----------------------------------------------------------------------
 Copying and distribution of this file, with or without modification,
 are permitted in any medium without royalty provided the copyright
diff --git a/NEWS b/NEWS
index 406abaf..01649bf 100644
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,10 @@ See the end for copying conditions.
 
 * Version 3.0.3 (unreleased)
 
+** libgnutls: Do not send an empty extension structure in server 
+hello. This affected old implementations that do not support extensions. 
+Reported by J. Cameijo Cerdeira.
+
 ** libgnutls: Allow CA importing of 0 certificates to succeed.
 Reported by Jonathan Nieder <address@hidden> in
 <http://bugs.debian.org/640639>.
diff --git a/doc/cha-internals.texi b/doc/cha-internals.texi
index c442990..6a5ba66 100644
--- a/doc/cha-internals.texi
+++ b/doc/cha-internals.texi
@@ -181,11 +181,7 @@ A typical entry would be:
 @example
   int ret;
 
-  /* ...
-   */
-
 #if ENABLE_FOOBAR
-
   ret = _gnutls_ext_register (&foobar_ext);
   if (ret != GNUTLS_E_SUCCESS)
     return ret;
diff --git a/lib/accelerated/accelerated.c b/lib/accelerated/accelerated.c
index b64c8aa..ddfdf0c 100644
--- a/lib/accelerated/accelerated.c
+++ b/lib/accelerated/accelerated.c
@@ -30,6 +30,7 @@ void _gnutls_register_accel_crypto(void)
 
 #ifdef TRY_X86_OPTIMIZATIONS
   register_x86_crypto ();
+  register_padlock_crypto ();
 #endif
 
   return;
diff --git a/lib/accelerated/intel/Makefile.am 
b/lib/accelerated/intel/Makefile.am
index 40fc5ca..59ea536 100644
--- a/lib/accelerated/intel/Makefile.am
+++ b/lib/accelerated/intel/Makefile.am
@@ -34,12 +34,12 @@ EXTRA_DIST = aes-x86.h README license.txt
 
 noinst_LTLIBRARIES = libintel.la
 
-libintel_la_SOURCES = aes-x86.c
+libintel_la_SOURCES = aes-x86.c padlock.c
 
 if ASM_X86_64
 AM_CPPFLAGS += -DASM_X86_64
-libintel_la_SOURCES += asm/appro-aes-x86-64.s asm/appro-aes-gcm-x86-64.s 
aes-gcm-x86.c
+libintel_la_SOURCES += asm/appro-aes-x86-64.s asm/appro-aes-gcm-x86-64.s 
aes-gcm-x86.c asm/padlock-x86-64.s
 else
-libintel_la_SOURCES += asm/appro-aes-x86.s
+libintel_la_SOURCES += asm/appro-aes-x86.s asm/padlock-x86.s
 endif
 
diff --git a/lib/accelerated/intel/aes-gcm-x86.c 
b/lib/accelerated/intel/aes-gcm-x86.c
index cd18dc9..2e37b0a 100644
--- a/lib/accelerated/intel/aes-gcm-x86.c
+++ b/lib/accelerated/intel/aes-gcm-x86.c
@@ -75,7 +75,7 @@ aes_gcm_deinit (void *_ctx)
 }
 
 static int
-aes_gcm_cipher_init (gnutls_cipher_algorithm_t algorithm, void **_ctx)
+aes_gcm_cipher_init (gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
 {
   /* we use key size to distinguish */
   if (algorithm != GNUTLS_CIPHER_AES_128_GCM &&
@@ -98,12 +98,12 @@ aes_gcm_cipher_setkey (void *_ctx, const void *userkey, 
size_t keysize)
   struct aes_gcm_ctx *ctx = _ctx;
   int ret;
 
-  ret = aesni_set_encrypt_key (userkey, keysize * 8, &ctx->expanded_key);
+  ret = aesni_set_encrypt_key (userkey, keysize * 8, 
ALIGN16(&ctx->expanded_key));
   if (ret != 0)
     return gnutls_assert_val (GNUTLS_E_ENCRYPTION_FAILED);
 
   aesni_ecb_encrypt (ctx->gcm.H.c, ctx->gcm.H.c,
-                     GCM_BLOCK_SIZE, &ctx->expanded_key, 1);
+                     GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
 
   ctx->gcm.H.u[0] = bswap_64 (ctx->gcm.H.u[0]);
   ctx->gcm.H.u[1] = bswap_64 (ctx->gcm.H.u[1]);
@@ -131,7 +131,7 @@ aes_gcm_setiv (void *_ctx, const void *iv, size_t iv_size)
   ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 1;
 
   aesni_ecb_encrypt (ctx->gcm.Yi.c, ctx->gcm.EK0.c,
-                     GCM_BLOCK_SIZE, &ctx->expanded_key, 1);
+                     GCM_BLOCK_SIZE, ALIGN16(&ctx->expanded_key), 1);
   ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2;
   return 0;
 }
@@ -160,7 +160,7 @@ ctr_encrypt_last (struct aes_gcm_ctx *ctx, const uint8_t * 
src,
   uint8_t out[GCM_BLOCK_SIZE];
 
   memcpy (tmp, &src[pos], length);
-  aesni_ctr32_encrypt_blocks (tmp, out, 1, &ctx->expanded_key, ctx->gcm.Yi.c);
+  aesni_ctr32_encrypt_blocks (tmp, out, 1, ALIGN16(&ctx->expanded_key), 
ctx->gcm.Yi.c);
 
   memcpy (&dst[pos], out, length);
 
@@ -179,7 +179,8 @@ aes_gcm_encrypt (void *_ctx, const void *src, size_t 
src_size,
   if (blocks > 0)
     {
       aesni_ctr32_encrypt_blocks (src, dst,
-                                  blocks, &ctx->expanded_key, ctx->gcm.Yi.c);
+                                  blocks, ALIGN16(&ctx->expanded_key), 
+                                  ctx->gcm.Yi.c);
 
       counter = _gnutls_read_uint32 (ctx->gcm.Yi.c + 12);
       counter += blocks;
@@ -211,7 +212,8 @@ aes_gcm_decrypt (void *_ctx, const void *src, size_t 
src_size,
   if (blocks > 0)
     {
       aesni_ctr32_encrypt_blocks (src, dst,
-                                  blocks, &ctx->expanded_key, ctx->gcm.Yi.c);
+                                  blocks, ALIGN16(&ctx->expanded_key), 
+                                  ctx->gcm.Yi.c);
 
       counter = _gnutls_read_uint32 (ctx->gcm.Yi.c + 12);
       counter += blocks;
diff --git a/lib/accelerated/intel/aes-x86.c b/lib/accelerated/intel/aes-x86.c
index 7a5ed64..9f4c4cb 100644
--- a/lib/accelerated/intel/aes-x86.c
+++ b/lib/accelerated/intel/aes-x86.c
@@ -22,8 +22,7 @@
 
 /*
  * The following code is an implementation of the AES-128-CBC cipher
- * using intel's AES instruction set. It is based on Intel reference
- * code.
+ * using intel's AES instruction set. 
  */
 
 #include <gnutls_errors.h>
@@ -36,15 +35,12 @@
 struct aes_ctx
 {
   AES_KEY expanded_key;
-  AES_KEY expanded_key_dec;
   uint8_t iv[16];
+  int enc;
 };
 
-#define ALIGN16(x) \
-        ((void *)(((unsigned long)(x)+0x0f)&~(0x0f)))
-
 static int
-aes_cipher_init (gnutls_cipher_algorithm_t algorithm, void **_ctx)
+aes_cipher_init (gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
 {
   /* we use key size to distinguish */
   if (algorithm != GNUTLS_CIPHER_AES_128_CBC
@@ -59,6 +55,8 @@ aes_cipher_init (gnutls_cipher_algorithm_t algorithm, void 
**_ctx)
       return GNUTLS_E_MEMORY_ERROR;
     }
 
+  ((struct aes_ctx*)(*_ctx))->enc = enc;
+
   return 0;
 }
 
@@ -68,11 +66,11 @@ aes_cipher_setkey (void *_ctx, const void *userkey, size_t 
keysize)
   struct aes_ctx *ctx = _ctx;
   int ret;
 
-  ret = aesni_set_encrypt_key (userkey, keysize * 8, 
ALIGN16(&ctx->expanded_key));
-  if (ret != 0)
-    return gnutls_assert_val (GNUTLS_E_ENCRYPTION_FAILED);
+  if (ctx->enc)
+    ret = aesni_set_encrypt_key (userkey, keysize * 8, 
ALIGN16(&ctx->expanded_key));
+  else
+    ret = aesni_set_decrypt_key (userkey, keysize * 8, 
ALIGN16(&ctx->expanded_key));
 
-  ret = aesni_set_decrypt_key (userkey, keysize * 8, 
ALIGN16(&ctx->expanded_key_dec));
   if (ret != 0)
     return gnutls_assert_val (GNUTLS_E_ENCRYPTION_FAILED);
 
@@ -104,7 +102,7 @@ aes_decrypt (void *_ctx, const void *src, size_t src_size,
 {
   struct aes_ctx *ctx = _ctx;
 
-  aesni_cbc_encrypt (src, dst, src_size, ALIGN16(&ctx->expanded_key_dec), 
ctx->iv, 0);
+  aesni_cbc_encrypt (src, dst, src_size, ALIGN16(&ctx->expanded_key), ctx->iv, 
0);
 
   return 0;
 }
diff --git a/lib/accelerated/intel/aes-x86.h b/lib/accelerated/intel/aes-x86.h
index 1fab364..7692ab3 100644
--- a/lib/accelerated/intel/aes-x86.h
+++ b/lib/accelerated/intel/aes-x86.h
@@ -4,6 +4,10 @@
 #include <gnutls_int.h>
 
 void register_x86_crypto (void);
+void register_padlock_crypto(void);
+
+#define ALIGN16(x) \
+        ((void *)(((unsigned long)(x)+0x0f)&~(0x0f)))
 
 #define AES_KEY_ALIGN_SIZE 4
 #define AES_MAXNR 14
@@ -13,6 +17,7 @@ typedef struct
    * on a 16-byte boundary.
    */
   uint32_t rd_key[4 * (AES_MAXNR + 1)+AES_KEY_ALIGN_SIZE];
+  uint32_t rounds;
 } AES_KEY;
 
 void aesni_ecb_encrypt (const unsigned char *in, unsigned char *out,
diff --git a/lib/accelerated/intel/asm/padlock-x86-64.s 
b/lib/accelerated/intel/asm/padlock-x86-64.s
new file mode 100644
index 0000000..ebca3ae
--- /dev/null
+++ b/lib/accelerated/intel/asm/padlock-x86-64.s
@@ -0,0 +1,723 @@
+# Copyright (c) 2011, Andy Polyakov by <address@hidden>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 
+#     * Redistributions of source code must retain copyright notices,
+#      this list of conditions and the following disclaimer.
+#
+#     * Redistributions in binary form must reproduce the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer in the documentation and/or other materials
+#      provided with the distribution.
+#
+#     * Neither the name of the Andy Polyakov nor the names of its
+#      copyright holder and contributors may be used to endorse or
+#      promote products derived from this software without specific
+#      prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.text  
+.globl padlock_capability
+.type  padlock_capability,@function
+.align 16
+padlock_capability:
+       movq    %rbx,%r8
+       xorl    %eax,%eax
+       cpuid
+       xorl    %eax,%eax
+       cmpl    $1953391939,%ebx
+       jne     .Lnoluck
+       cmpl    $1215460705,%edx
+       jne     .Lnoluck
+       cmpl    $1936487777,%ecx
+       jne     .Lnoluck
+       movl    $3221225472,%eax
+       cpuid
+       movl    %eax,%edx
+       xorl    %eax,%eax
+       cmpl    $3221225473,%edx
+       jb      .Lnoluck
+       movl    $3221225473,%eax
+       cpuid
+       movl    %edx,%eax
+       andl    $4294967279,%eax
+       orl     $16,%eax
+.Lnoluck:
+       movq    %r8,%rbx
+       .byte   0xf3,0xc3
+.size  padlock_capability,.-padlock_capability
+
+.globl padlock_key_bswap
+.type  padlock_key_bswap,@function
+.align 16
+padlock_key_bswap:
+       movl    240(%rdi),%edx
+.Lbswap_loop:
+       movl    (%rdi),%eax
+       bswapl  %eax
+       movl    %eax,(%rdi)
+       leaq    4(%rdi),%rdi
+       subl    $1,%edx
+       jnz     .Lbswap_loop
+       .byte   0xf3,0xc3
+.size  padlock_key_bswap,.-padlock_key_bswap
+
+.globl padlock_verify_context
+.type  padlock_verify_context,@function
+.align 16
+padlock_verify_context:
+       movq    %rdi,%rdx
+       pushf
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       call    _padlock_verify_ctx
+       leaq    8(%rsp),%rsp
+       .byte   0xf3,0xc3
+.size  padlock_verify_context,.-padlock_verify_context
+
+.type  _padlock_verify_ctx,@function
+.align 16
+_padlock_verify_ctx:
+       movq    8(%rsp),%r8
+       btq     $30,%r8
+       jnc     .Lverified
+       cmpq    (%rax),%rdx
+       je      .Lverified
+       pushf
+       popf
+.Lverified:
+       movq    %rdx,(%rax)
+       .byte   0xf3,0xc3
+.size  _padlock_verify_ctx,.-_padlock_verify_ctx
+
+.globl padlock_reload_key
+.type  padlock_reload_key,@function
+.align 16
+padlock_reload_key:
+       pushf
+       popf
+       .byte   0xf3,0xc3
+.size  padlock_reload_key,.-padlock_reload_key
+
+.globl padlock_aes_block
+.type  padlock_aes_block,@function
+.align 16
+padlock_aes_block:
+       movq    %rbx,%r8
+       movq    $1,%rcx
+       leaq    32(%rdx),%rbx
+       leaq    16(%rdx),%rdx
+.byte  0xf3,0x0f,0xa7,0xc8     
+       movq    %r8,%rbx
+       .byte   0xf3,0xc3
+.size  padlock_aes_block,.-padlock_aes_block
+
+.globl padlock_xstore
+.type  padlock_xstore,@function
+.align 16
+padlock_xstore:
+       movl    %esi,%edx
+.byte  0x0f,0xa7,0xc0          
+       .byte   0xf3,0xc3
+.size  padlock_xstore,.-padlock_xstore
+
+.globl padlock_sha1_oneshot
+.type  padlock_sha1_oneshot,@function
+.align 16
+padlock_sha1_oneshot:
+       xorq    %rax,%rax
+       movq    %rdx,%rcx
+.byte  0xf3,0x0f,0xa6,0xc8     
+       .byte   0xf3,0xc3
+.size  padlock_sha1_oneshot,.-padlock_sha1_oneshot
+
+.globl padlock_sha1
+.type  padlock_sha1,@function
+.align 16
+padlock_sha1:
+       movq    $-1,%rax
+       movq    %rdx,%rcx
+.byte  0xf3,0x0f,0xa6,0xc8     
+       .byte   0xf3,0xc3
+.size  padlock_sha1,.-padlock_sha1
+
+.globl padlock_sha256_oneshot
+.type  padlock_sha256_oneshot,@function
+.align 16
+padlock_sha256_oneshot:
+       xorq    %rax,%rax
+       movq    %rdx,%rcx
+.byte  0xf3,0x0f,0xa6,0xd0     
+       .byte   0xf3,0xc3
+.size  padlock_sha256_oneshot,.-padlock_sha256_oneshot
+
+.globl padlock_sha256
+.type  padlock_sha256,@function
+.align 16
+padlock_sha256:
+       movq    $-1,%rax
+       movq    %rdx,%rcx
+.byte  0xf3,0x0f,0xa6,0xd0     
+       .byte   0xf3,0xc3
+.size  padlock_sha256,.-padlock_sha256
+.globl padlock_ecb_encrypt
+.type  padlock_ecb_encrypt,@function
+.align 16
+padlock_ecb_encrypt:
+       pushq   %rbp
+       pushq   %rbx
+
+       xorl    %eax,%eax
+       testq   $15,%rdx
+       jnz     .Lecb_abort
+       testq   $15,%rcx
+       jnz     .Lecb_abort
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       pushf
+       cld
+       call    _padlock_verify_ctx
+       leaq    16(%rdx),%rdx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%rdx)
+       testq   $15,%rdi
+       setz    %al
+       testq   $15,%rsi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .Lecb_aligned
+       negq    %rax
+       movq    $512,%rbx
+       notq    %rax
+       leaq    (%rsp),%rbp
+       cmpq    %rbx,%rcx
+       cmovcq  %rcx,%rbx
+       andq    %rbx,%rax
+       movq    %rcx,%rbx
+       negq    %rax
+       andq    $512-1,%rbx
+       leaq    (%rax,%rbp,1),%rsp
+       jmp     .Lecb_loop
+.align 16
+.Lecb_loop:
+       movq    %rdi,%r8
+       movq    %rsi,%r9
+       movq    %rcx,%r10
+       movq    %rbx,%rcx
+       movq    %rbx,%r11
+       testq   $15,%rdi
+       cmovnzq %rsp,%rdi
+       testq   $15,%rsi
+       jz      .Lecb_inp_aligned
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+       movq    %rbx,%rcx
+       movq    %rdi,%rsi
+.Lecb_inp_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,200      
+       movq    %r8,%rdi
+       movq    %r11,%rbx
+       testq   $15,%rdi
+       jz      .Lecb_out_aligned
+       movq    %rbx,%rcx
+       shrq    $3,%rcx
+       leaq    (%rsp),%rsi
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+.Lecb_out_aligned:
+       movq    %r9,%rsi
+       movq    %r10,%rcx
+       addq    %rbx,%rdi
+       addq    %rbx,%rsi
+       subq    %rbx,%rcx
+       movq    $512,%rbx
+       jnz     .Lecb_loop
+
+       testq   $15,%rdi
+       jz      .Lecb_done
+
+       movq    %rbp,%rcx
+       movq    %rsp,%rdi
+       subq    %rsp,%rcx
+       xorq    %rax,%rax
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xab          
+.Lecb_done:
+       leaq    (%rbp),%rsp
+       jmp     .Lecb_exit
+
+.align 16
+.Lecb_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,200      
+.Lecb_exit:
+       movl    $1,%eax
+       leaq    8(%rsp),%rsp
+.Lecb_abort:
+       popq    %rbx
+       popq    %rbp
+       .byte   0xf3,0xc3
+.size  padlock_ecb_encrypt,.-padlock_ecb_encrypt
+.globl padlock_cbc_encrypt
+.type  padlock_cbc_encrypt,@function
+.align 16
+padlock_cbc_encrypt:
+       pushq   %rbp
+       pushq   %rbx
+
+       xorl    %eax,%eax
+       testq   $15,%rdx
+       jnz     .Lcbc_abort
+       testq   $15,%rcx
+       jnz     .Lcbc_abort
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       pushf
+       cld
+       call    _padlock_verify_ctx
+       leaq    16(%rdx),%rdx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%rdx)
+       testq   $15,%rdi
+       setz    %al
+       testq   $15,%rsi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .Lcbc_aligned
+       negq    %rax
+       movq    $512,%rbx
+       notq    %rax
+       leaq    (%rsp),%rbp
+       cmpq    %rbx,%rcx
+       cmovcq  %rcx,%rbx
+       andq    %rbx,%rax
+       movq    %rcx,%rbx
+       negq    %rax
+       andq    $512-1,%rbx
+       leaq    (%rax,%rbp,1),%rsp
+       jmp     .Lcbc_loop
+.align 16
+.Lcbc_loop:
+       movq    %rdi,%r8
+       movq    %rsi,%r9
+       movq    %rcx,%r10
+       movq    %rbx,%rcx
+       movq    %rbx,%r11
+       testq   $15,%rdi
+       cmovnzq %rsp,%rdi
+       testq   $15,%rsi
+       jz      .Lcbc_inp_aligned
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+       movq    %rbx,%rcx
+       movq    %rdi,%rsi
+.Lcbc_inp_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,208      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+       movq    %r8,%rdi
+       movq    %r11,%rbx
+       testq   $15,%rdi
+       jz      .Lcbc_out_aligned
+       movq    %rbx,%rcx
+       shrq    $3,%rcx
+       leaq    (%rsp),%rsi
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+.Lcbc_out_aligned:
+       movq    %r9,%rsi
+       movq    %r10,%rcx
+       addq    %rbx,%rdi
+       addq    %rbx,%rsi
+       subq    %rbx,%rcx
+       movq    $512,%rbx
+       jnz     .Lcbc_loop
+
+       testq   $15,%rdi
+       jz      .Lcbc_done
+
+       movq    %rbp,%rcx
+       movq    %rsp,%rdi
+       subq    %rsp,%rcx
+       xorq    %rax,%rax
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xab          
+.Lcbc_done:
+       leaq    (%rbp),%rsp
+       jmp     .Lcbc_exit
+
+.align 16
+.Lcbc_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,208      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+.Lcbc_exit:
+       movl    $1,%eax
+       leaq    8(%rsp),%rsp
+.Lcbc_abort:
+       popq    %rbx
+       popq    %rbp
+       .byte   0xf3,0xc3
+.size  padlock_cbc_encrypt,.-padlock_cbc_encrypt
+.globl padlock_cfb_encrypt
+.type  padlock_cfb_encrypt,@function
+.align 16
+padlock_cfb_encrypt:
+       pushq   %rbp
+       pushq   %rbx
+
+       xorl    %eax,%eax
+       testq   $15,%rdx
+       jnz     .Lcfb_abort
+       testq   $15,%rcx
+       jnz     .Lcfb_abort
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       pushf
+       cld
+       call    _padlock_verify_ctx
+       leaq    16(%rdx),%rdx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%rdx)
+       testq   $15,%rdi
+       setz    %al
+       testq   $15,%rsi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .Lcfb_aligned
+       negq    %rax
+       movq    $512,%rbx
+       notq    %rax
+       leaq    (%rsp),%rbp
+       cmpq    %rbx,%rcx
+       cmovcq  %rcx,%rbx
+       andq    %rbx,%rax
+       movq    %rcx,%rbx
+       negq    %rax
+       andq    $512-1,%rbx
+       leaq    (%rax,%rbp,1),%rsp
+       jmp     .Lcfb_loop
+.align 16
+.Lcfb_loop:
+       movq    %rdi,%r8
+       movq    %rsi,%r9
+       movq    %rcx,%r10
+       movq    %rbx,%rcx
+       movq    %rbx,%r11
+       testq   $15,%rdi
+       cmovnzq %rsp,%rdi
+       testq   $15,%rsi
+       jz      .Lcfb_inp_aligned
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+       movq    %rbx,%rcx
+       movq    %rdi,%rsi
+.Lcfb_inp_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,224      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+       movq    %r8,%rdi
+       movq    %r11,%rbx
+       testq   $15,%rdi
+       jz      .Lcfb_out_aligned
+       movq    %rbx,%rcx
+       shrq    $3,%rcx
+       leaq    (%rsp),%rsi
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+.Lcfb_out_aligned:
+       movq    %r9,%rsi
+       movq    %r10,%rcx
+       addq    %rbx,%rdi
+       addq    %rbx,%rsi
+       subq    %rbx,%rcx
+       movq    $512,%rbx
+       jnz     .Lcfb_loop
+
+       testq   $15,%rdi
+       jz      .Lcfb_done
+
+       movq    %rbp,%rcx
+       movq    %rsp,%rdi
+       subq    %rsp,%rcx
+       xorq    %rax,%rax
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xab          
+.Lcfb_done:
+       leaq    (%rbp),%rsp
+       jmp     .Lcfb_exit
+
+.align 16
+.Lcfb_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,224      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+.Lcfb_exit:
+       movl    $1,%eax
+       leaq    8(%rsp),%rsp
+.Lcfb_abort:
+       popq    %rbx
+       popq    %rbp
+       .byte   0xf3,0xc3
+.size  padlock_cfb_encrypt,.-padlock_cfb_encrypt
+.globl padlock_ofb_encrypt
+.type  padlock_ofb_encrypt,@function
+.align 16
+padlock_ofb_encrypt:
+       pushq   %rbp
+       pushq   %rbx
+
+       xorl    %eax,%eax
+       testq   $15,%rdx
+       jnz     .Lofb_abort
+       testq   $15,%rcx
+       jnz     .Lofb_abort
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       pushf
+       cld
+       call    _padlock_verify_ctx
+       leaq    16(%rdx),%rdx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%rdx)
+       testq   $15,%rdi
+       setz    %al
+       testq   $15,%rsi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .Lofb_aligned
+       negq    %rax
+       movq    $512,%rbx
+       notq    %rax
+       leaq    (%rsp),%rbp
+       cmpq    %rbx,%rcx
+       cmovcq  %rcx,%rbx
+       andq    %rbx,%rax
+       movq    %rcx,%rbx
+       negq    %rax
+       andq    $512-1,%rbx
+       leaq    (%rax,%rbp,1),%rsp
+       jmp     .Lofb_loop
+.align 16
+.Lofb_loop:
+       movq    %rdi,%r8
+       movq    %rsi,%r9
+       movq    %rcx,%r10
+       movq    %rbx,%rcx
+       movq    %rbx,%r11
+       testq   $15,%rdi
+       cmovnzq %rsp,%rdi
+       testq   $15,%rsi
+       jz      .Lofb_inp_aligned
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+       movq    %rbx,%rcx
+       movq    %rdi,%rsi
+.Lofb_inp_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,232      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+       movq    %r8,%rdi
+       movq    %r11,%rbx
+       testq   $15,%rdi
+       jz      .Lofb_out_aligned
+       movq    %rbx,%rcx
+       shrq    $3,%rcx
+       leaq    (%rsp),%rsi
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+.Lofb_out_aligned:
+       movq    %r9,%rsi
+       movq    %r10,%rcx
+       addq    %rbx,%rdi
+       addq    %rbx,%rsi
+       subq    %rbx,%rcx
+       movq    $512,%rbx
+       jnz     .Lofb_loop
+
+       testq   $15,%rdi
+       jz      .Lofb_done
+
+       movq    %rbp,%rcx
+       movq    %rsp,%rdi
+       subq    %rsp,%rcx
+       xorq    %rax,%rax
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xab          
+.Lofb_done:
+       leaq    (%rbp),%rsp
+       jmp     .Lofb_exit
+
+.align 16
+.Lofb_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,232      
+       movdqa  (%rax),%xmm0
+       movdqa  %xmm0,-16(%rdx)
+.Lofb_exit:
+       movl    $1,%eax
+       leaq    8(%rsp),%rsp
+.Lofb_abort:
+       popq    %rbx
+       popq    %rbp
+       .byte   0xf3,0xc3
+.size  padlock_ofb_encrypt,.-padlock_ofb_encrypt
+.globl padlock_ctr16_encrypt
+.type  padlock_ctr16_encrypt,@function
+.align 16
+padlock_ctr16_encrypt:
+       pushq   %rbp
+       pushq   %rbx
+
+       xorl    %eax,%eax
+       testq   $15,%rdx
+       jnz     .Lctr16_abort
+       testq   $15,%rcx
+       jnz     .Lctr16_abort
+       leaq    .Lpadlock_saved_context(%rip),%rax
+       pushf
+       cld
+       call    _padlock_verify_ctx
+       leaq    16(%rdx),%rdx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%rdx)
+       testq   $15,%rdi
+       setz    %al
+       testq   $15,%rsi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .Lctr16_aligned
+       negq    %rax
+       movq    $512,%rbx
+       notq    %rax
+       leaq    (%rsp),%rbp
+       cmpq    %rbx,%rcx
+       cmovcq  %rcx,%rbx
+       andq    %rbx,%rax
+       movq    %rcx,%rbx
+       negq    %rax
+       andq    $512-1,%rbx
+       leaq    (%rax,%rbp,1),%rsp
+       jmp     .Lctr16_loop
+.align 16
+.Lctr16_loop:
+       movq    %rdi,%r8
+       movq    %rsi,%r9
+       movq    %rcx,%r10
+       movq    %rbx,%rcx
+       movq    %rbx,%r11
+       testq   $15,%rdi
+       cmovnzq %rsp,%rdi
+       testq   $15,%rsi
+       jz      .Lctr16_inp_aligned
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+       movq    %rbx,%rcx
+       movq    %rdi,%rsi
+.Lctr16_inp_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,216      
+       movq    %r8,%rdi
+       movq    %r11,%rbx
+       testq   $15,%rdi
+       jz      .Lctr16_out_aligned
+       movq    %rbx,%rcx
+       shrq    $3,%rcx
+       leaq    (%rsp),%rsi
+.byte  0xf3,0x48,0xa5          
+       subq    %rbx,%rdi
+.Lctr16_out_aligned:
+       movq    %r9,%rsi
+       movq    %r10,%rcx
+       addq    %rbx,%rdi
+       addq    %rbx,%rsi
+       subq    %rbx,%rcx
+       movq    $512,%rbx
+       jnz     .Lctr16_loop
+
+       testq   $15,%rdi
+       jz      .Lctr16_done
+
+       movq    %rbp,%rcx
+       movq    %rsp,%rdi
+       subq    %rsp,%rcx
+       xorq    %rax,%rax
+       shrq    $3,%rcx
+.byte  0xf3,0x48,0xab          
+.Lctr16_done:
+       leaq    (%rbp),%rsp
+       jmp     .Lctr16_exit
+
+.align 16
+.Lctr16_aligned:
+       leaq    -16(%rdx),%rax
+       leaq    16(%rdx),%rbx
+       shrq    $4,%rcx
+.byte  0xf3,0x0f,0xa7,216      
+.Lctr16_exit:
+       movl    $1,%eax
+       leaq    8(%rsp),%rsp
+.Lctr16_abort:
+       popq    %rbx
+       popq    %rbp
+       .byte   0xf3,0xc3
+.size  padlock_ctr16_encrypt,.-padlock_ctr16_encrypt
+.byte  
86,73,65,32,80,97,100,108,111,99,107,32,120,56,54,95,54,52,32,109,111,100,117,108,101,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 16
+.data  
+.align 8
+.Lpadlock_saved_context:
+.quad  0
+
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
diff --git a/lib/accelerated/intel/asm/padlock-x86.s 
b/lib/accelerated/intel/asm/padlock-x86.s
new file mode 100644
index 0000000..e81c967
--- /dev/null
+++ b/lib/accelerated/intel/asm/padlock-x86.s
@@ -0,0 +1,822 @@
+# Copyright (c) 2011, Andy Polyakov by <address@hidden>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 
+#     * Redistributions of source code must retain copyright notices,
+#      this list of conditions and the following disclaimer.
+#
+#     * Redistributions in binary form must reproduce the above
+#      copyright notice, this list of conditions and the following
+#      disclaimer in the documentation and/or other materials
+#      provided with the distribution.
+#
+#     * Neither the name of the Andy Polyakov nor the names of its
+#      copyright holder and contributors may be used to endorse or
+#      promote products derived from this software without specific
+#      prior written permission.
+#
+# ALTERNATIVELY, provided that this notice is retained in full, this
+# product may be distributed under the terms of the GNU General Public
+# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
+# those given above.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.file  "e_padlock-x86.s"
+.text
+.globl padlock_capability
+.type  padlock_capability,@function
+.align 16
+padlock_capability:
+.L_padlock_capability_begin:
+       pushl   %ebx
+       pushfl
+       popl    %eax
+       movl    %eax,%ecx
+       xorl    $2097152,%eax
+       pushl   %eax
+       popfl
+       pushfl
+       popl    %eax
+       xorl    %eax,%ecx
+       xorl    %eax,%eax
+       btl     $21,%ecx
+       jnc     .L000noluck
+       .byte   0x0f,0xa2
+       xorl    %eax,%eax
+       cmpl    $0x746e6543,%ebx
+       jne     .L000noluck
+       cmpl    $0x48727561,%edx
+       jne     .L000noluck
+       cmpl    $0x736c7561,%ecx
+       jne     .L000noluck
+       movl    $3221225472,%eax
+       .byte   0x0f,0xa2
+       movl    %eax,%edx
+       xorl    %eax,%eax
+       cmpl    $3221225473,%edx
+       jb      .L000noluck
+       movl    $1,%eax
+       .byte   0x0f,0xa2
+       orl     $15,%eax
+       xorl    %ebx,%ebx
+       andl    $4095,%eax
+       cmpl    $1791,%eax
+       sete    %bl
+       movl    $3221225473,%eax
+       pushl   %ebx
+       .byte   0x0f,0xa2
+       popl    %ebx
+       movl    %edx,%eax
+       shll    $4,%ebx
+       andl    $4294967279,%eax
+       orl     %ebx,%eax
+.L000noluck:
+       popl    %ebx
+       ret
+.size  padlock_capability,.-.L_padlock_capability_begin
+.globl padlock_key_bswap
+.type  padlock_key_bswap,@function
+.align 16
+padlock_key_bswap:
+.L_padlock_key_bswap_begin:
+       movl    4(%esp),%edx
+       movl    240(%edx),%ecx
+.L001bswap_loop:
+       movl    (%edx),%eax
+       bswap   %eax
+       movl    %eax,(%edx)
+       leal    4(%edx),%edx
+       subl    $1,%ecx
+       jnz     .L001bswap_loop
+       ret
+.size  padlock_key_bswap,.-.L_padlock_key_bswap_begin
+.globl padlock_verify_context
+.type  padlock_verify_context,@function
+.align 16
+padlock_verify_context:
+.L_padlock_verify_context_begin:
+       movl    4(%esp),%edx
+       leal    .Lpadlock_saved_context-.L002verify_pic_point,%eax
+       pushfl
+       call    _padlock_verify_ctx
+.L002verify_pic_point:
+       leal    4(%esp),%esp
+       ret
+.size  padlock_verify_context,.-.L_padlock_verify_context_begin
+.type  _padlock_verify_ctx,@function
+.align 16
+_padlock_verify_ctx:
+       addl    (%esp),%eax
+       btl     $30,4(%esp)
+       jnc     .L003verified
+       cmpl    (%eax),%edx
+       je      .L003verified
+       pushfl
+       popfl
+.L003verified:
+       movl    %edx,(%eax)
+       ret
+.size  _padlock_verify_ctx,.-_padlock_verify_ctx
+.globl padlock_reload_key
+.type  padlock_reload_key,@function
+.align 16
+padlock_reload_key:
+.L_padlock_reload_key_begin:
+       pushfl
+       popfl
+       ret
+.size  padlock_reload_key,.-.L_padlock_reload_key_begin
+.globl padlock_aes_block
+.type  padlock_aes_block,@function
+.align 16
+padlock_aes_block:
+.L_padlock_aes_block_begin:
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       movl    16(%esp),%edi
+       movl    20(%esp),%esi
+       movl    24(%esp),%edx
+       movl    $1,%ecx
+       leal    32(%edx),%ebx
+       leal    16(%edx),%edx
+.byte  243,15,167,200
+       popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret
+.size  padlock_aes_block,.-.L_padlock_aes_block_begin
+.globl padlock_ecb_encrypt
+.type  padlock_ecb_encrypt,@function
+.align 16
+padlock_ecb_encrypt:
+.L_padlock_ecb_encrypt_begin:
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    20(%esp),%edi
+       movl    24(%esp),%esi
+       movl    28(%esp),%edx
+       movl    32(%esp),%ecx
+       testl   $15,%edx
+       jnz     .L004ecb_abort
+       testl   $15,%ecx
+       jnz     .L004ecb_abort
+       leal    .Lpadlock_saved_context-.L005ecb_pic_point,%eax
+       pushfl
+       cld
+       call    _padlock_verify_ctx
+.L005ecb_pic_point:
+       leal    16(%edx),%edx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%edx)
+       jnz     .L006ecb_aligned
+       testl   $15,%edi
+       setz    %al
+       testl   $15,%esi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .L006ecb_aligned
+       negl    %eax
+       movl    $512,%ebx
+       notl    %eax
+       leal    -24(%esp),%ebp
+       cmpl    %ebx,%ecx
+       cmovcl  %ecx,%ebx
+       andl    %ebx,%eax
+       movl    %ecx,%ebx
+       negl    %eax
+       andl    $511,%ebx
+       leal    (%eax,%ebp,1),%esp
+       andl    $-16,%esp
+       jmp     .L007ecb_loop
+.align 16
+.L007ecb_loop:
+       movl    %edi,(%ebp)
+       movl    %esi,4(%ebp)
+       movl    %ecx,8(%ebp)
+       movl    %ebx,%ecx
+       movl    %ebx,12(%ebp)
+       testl   $15,%edi
+       cmovnzl %esp,%edi
+       testl   $15,%esi
+       jz      .L008ecb_inp_aligned
+       shrl    $2,%ecx
+.byte  243,165
+       subl    %ebx,%edi
+       movl    %ebx,%ecx
+       movl    %edi,%esi
+.L008ecb_inp_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,200
+       movl    (%ebp),%edi
+       movl    12(%ebp),%ebx
+       testl   $15,%edi
+       jz      .L009ecb_out_aligned
+       movl    %ebx,%ecx
+       shrl    $2,%ecx
+       leal    (%esp),%esi
+.byte  243,165
+       subl    %ebx,%edi
+.L009ecb_out_aligned:
+       movl    4(%ebp),%esi
+       movl    8(%ebp),%ecx
+       addl    %ebx,%edi
+       addl    %ebx,%esi
+       subl    %ebx,%ecx
+       movl    $512,%ebx
+       jnz     .L007ecb_loop
+       testl   $15,%edi
+       jz      .L010ecb_done
+       movl    %ebp,%ecx
+       movl    %esp,%edi
+       subl    %esp,%ecx
+       xorl    %eax,%eax
+       shrl    $2,%ecx
+.byte  243,171
+.L010ecb_done:
+       leal    24(%ebp),%esp
+       jmp     .L011ecb_exit
+.align 16
+.L006ecb_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,200
+.L011ecb_exit:
+       movl    $1,%eax
+       leal    4(%esp),%esp
+.L004ecb_abort:
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+.size  padlock_ecb_encrypt,.-.L_padlock_ecb_encrypt_begin
+.globl padlock_cbc_encrypt
+.type  padlock_cbc_encrypt,@function
+.align 16
+padlock_cbc_encrypt:
+.L_padlock_cbc_encrypt_begin:
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    20(%esp),%edi
+       movl    24(%esp),%esi
+       movl    28(%esp),%edx
+       movl    32(%esp),%ecx
+       testl   $15,%edx
+       jnz     .L012cbc_abort
+       testl   $15,%ecx
+       jnz     .L012cbc_abort
+       leal    .Lpadlock_saved_context-.L013cbc_pic_point,%eax
+       pushfl
+       cld
+       call    _padlock_verify_ctx
+.L013cbc_pic_point:
+       leal    16(%edx),%edx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%edx)
+       jnz     .L014cbc_aligned
+       testl   $15,%edi
+       setz    %al
+       testl   $15,%esi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .L014cbc_aligned
+       negl    %eax
+       movl    $512,%ebx
+       notl    %eax
+       leal    -24(%esp),%ebp
+       cmpl    %ebx,%ecx
+       cmovcl  %ecx,%ebx
+       andl    %ebx,%eax
+       movl    %ecx,%ebx
+       negl    %eax
+       andl    $511,%ebx
+       leal    (%eax,%ebp,1),%esp
+       andl    $-16,%esp
+       jmp     .L015cbc_loop
+.align 16
+.L015cbc_loop:
+       movl    %edi,(%ebp)
+       movl    %esi,4(%ebp)
+       movl    %ecx,8(%ebp)
+       movl    %ebx,%ecx
+       movl    %ebx,12(%ebp)
+       testl   $15,%edi
+       cmovnzl %esp,%edi
+       testl   $15,%esi
+       jz      .L016cbc_inp_aligned
+       shrl    $2,%ecx
+.byte  243,165
+       subl    %ebx,%edi
+       movl    %ebx,%ecx
+       movl    %edi,%esi
+.L016cbc_inp_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,208
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+       movl    (%ebp),%edi
+       movl    12(%ebp),%ebx
+       testl   $15,%edi
+       jz      .L017cbc_out_aligned
+       movl    %ebx,%ecx
+       shrl    $2,%ecx
+       leal    (%esp),%esi
+.byte  243,165
+       subl    %ebx,%edi
+.L017cbc_out_aligned:
+       movl    4(%ebp),%esi
+       movl    8(%ebp),%ecx
+       addl    %ebx,%edi
+       addl    %ebx,%esi
+       subl    %ebx,%ecx
+       movl    $512,%ebx
+       jnz     .L015cbc_loop
+       testl   $15,%edi
+       jz      .L018cbc_done
+       movl    %ebp,%ecx
+       movl    %esp,%edi
+       subl    %esp,%ecx
+       xorl    %eax,%eax
+       shrl    $2,%ecx
+.byte  243,171
+.L018cbc_done:
+       leal    24(%ebp),%esp
+       jmp     .L019cbc_exit
+.align 16
+.L014cbc_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,208
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+.L019cbc_exit:
+       movl    $1,%eax
+       leal    4(%esp),%esp
+.L012cbc_abort:
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+.size  padlock_cbc_encrypt,.-.L_padlock_cbc_encrypt_begin
+.globl padlock_cfb_encrypt
+.type  padlock_cfb_encrypt,@function
+.align 16
+padlock_cfb_encrypt:
+.L_padlock_cfb_encrypt_begin:
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    20(%esp),%edi
+       movl    24(%esp),%esi
+       movl    28(%esp),%edx
+       movl    32(%esp),%ecx
+       testl   $15,%edx
+       jnz     .L020cfb_abort
+       testl   $15,%ecx
+       jnz     .L020cfb_abort
+       leal    .Lpadlock_saved_context-.L021cfb_pic_point,%eax
+       pushfl
+       cld
+       call    _padlock_verify_ctx
+.L021cfb_pic_point:
+       leal    16(%edx),%edx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%edx)
+       jnz     .L022cfb_aligned
+       testl   $15,%edi
+       setz    %al
+       testl   $15,%esi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .L022cfb_aligned
+       negl    %eax
+       movl    $512,%ebx
+       notl    %eax
+       leal    -24(%esp),%ebp
+       cmpl    %ebx,%ecx
+       cmovcl  %ecx,%ebx
+       andl    %ebx,%eax
+       movl    %ecx,%ebx
+       negl    %eax
+       andl    $511,%ebx
+       leal    (%eax,%ebp,1),%esp
+       andl    $-16,%esp
+       jmp     .L023cfb_loop
+.align 16
+.L023cfb_loop:
+       movl    %edi,(%ebp)
+       movl    %esi,4(%ebp)
+       movl    %ecx,8(%ebp)
+       movl    %ebx,%ecx
+       movl    %ebx,12(%ebp)
+       testl   $15,%edi
+       cmovnzl %esp,%edi
+       testl   $15,%esi
+       jz      .L024cfb_inp_aligned
+       shrl    $2,%ecx
+.byte  243,165
+       subl    %ebx,%edi
+       movl    %ebx,%ecx
+       movl    %edi,%esi
+.L024cfb_inp_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,224
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+       movl    (%ebp),%edi
+       movl    12(%ebp),%ebx
+       testl   $15,%edi
+       jz      .L025cfb_out_aligned
+       movl    %ebx,%ecx
+       shrl    $2,%ecx
+       leal    (%esp),%esi
+.byte  243,165
+       subl    %ebx,%edi
+.L025cfb_out_aligned:
+       movl    4(%ebp),%esi
+       movl    8(%ebp),%ecx
+       addl    %ebx,%edi
+       addl    %ebx,%esi
+       subl    %ebx,%ecx
+       movl    $512,%ebx
+       jnz     .L023cfb_loop
+       testl   $15,%edi
+       jz      .L026cfb_done
+       movl    %ebp,%ecx
+       movl    %esp,%edi
+       subl    %esp,%ecx
+       xorl    %eax,%eax
+       shrl    $2,%ecx
+.byte  243,171
+.L026cfb_done:
+       leal    24(%ebp),%esp
+       jmp     .L027cfb_exit
+.align 16
+.L022cfb_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,224
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+.L027cfb_exit:
+       movl    $1,%eax
+       leal    4(%esp),%esp
+.L020cfb_abort:
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+.size  padlock_cfb_encrypt,.-.L_padlock_cfb_encrypt_begin
+.globl padlock_ofb_encrypt
+.type  padlock_ofb_encrypt,@function
+.align 16
+padlock_ofb_encrypt:
+.L_padlock_ofb_encrypt_begin:
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    20(%esp),%edi
+       movl    24(%esp),%esi
+       movl    28(%esp),%edx
+       movl    32(%esp),%ecx
+       testl   $15,%edx
+       jnz     .L028ofb_abort
+       testl   $15,%ecx
+       jnz     .L028ofb_abort
+       leal    .Lpadlock_saved_context-.L029ofb_pic_point,%eax
+       pushfl
+       cld
+       call    _padlock_verify_ctx
+.L029ofb_pic_point:
+       leal    16(%edx),%edx
+       xorl    %eax,%eax
+       xorl    %ebx,%ebx
+       testl   $32,(%edx)
+       jnz     .L030ofb_aligned
+       testl   $15,%edi
+       setz    %al
+       testl   $15,%esi
+       setz    %bl
+       testl   %ebx,%eax
+       jnz     .L030ofb_aligned
+       negl    %eax
+       movl    $512,%ebx
+       notl    %eax
+       leal    -24(%esp),%ebp
+       cmpl    %ebx,%ecx
+       cmovcl  %ecx,%ebx
+       andl    %ebx,%eax
+       movl    %ecx,%ebx
+       negl    %eax
+       andl    $511,%ebx
+       leal    (%eax,%ebp,1),%esp
+       andl    $-16,%esp
+       jmp     .L031ofb_loop
+.align 16
+.L031ofb_loop:
+       movl    %edi,(%ebp)
+       movl    %esi,4(%ebp)
+       movl    %ecx,8(%ebp)
+       movl    %ebx,%ecx
+       movl    %ebx,12(%ebp)
+       testl   $15,%edi
+       cmovnzl %esp,%edi
+       testl   $15,%esi
+       jz      .L032ofb_inp_aligned
+       shrl    $2,%ecx
+.byte  243,165
+       subl    %ebx,%edi
+       movl    %ebx,%ecx
+       movl    %edi,%esi
+.L032ofb_inp_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,232
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+       movl    (%ebp),%edi
+       movl    12(%ebp),%ebx
+       testl   $15,%edi
+       jz      .L033ofb_out_aligned
+       movl    %ebx,%ecx
+       shrl    $2,%ecx
+       leal    (%esp),%esi
+.byte  243,165
+       subl    %ebx,%edi
+.L033ofb_out_aligned:
+       movl    4(%ebp),%esi
+       movl    8(%ebp),%ecx
+       addl    %ebx,%edi
+       addl    %ebx,%esi
+       subl    %ebx,%ecx
+       movl    $512,%ebx
+       jnz     .L031ofb_loop
+       testl   $15,%edi
+       jz      .L034ofb_done
+       movl    %ebp,%ecx
+       movl    %esp,%edi
+       subl    %esp,%ecx
+       xorl    %eax,%eax
+       shrl    $2,%ecx
+.byte  243,171
+.L034ofb_done:
+       leal    24(%ebp),%esp
+       jmp     .L035ofb_exit
+.align 16
+.L030ofb_aligned:
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,232
+       movdqa  (%eax),%xmm0
+       movdqa  %xmm0,-16(%edx)
+.L035ofb_exit:
+       movl    $1,%eax
+       leal    4(%esp),%esp
+.L028ofb_abort:
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+.size  padlock_ofb_encrypt,.-.L_padlock_ofb_encrypt_begin
+.globl padlock_ctr16_encrypt
+.type  padlock_ctr16_encrypt,@function
+.align 16
+padlock_ctr16_encrypt:
+.L_padlock_ctr16_encrypt_begin:
+       pushl   %ebp
+       pushl   %ebx
+       pushl   %esi
+       pushl   %edi
+       movl    20(%esp),%edi
+       movl    24(%esp),%esi
+       movl    28(%esp),%edx
+       movl    32(%esp),%ecx
+       testl   $15,%edx
+       jnz     .L036ctr16_abort
+       testl   $15,%ecx
+       jnz     .L036ctr16_abort
+       leal    .Lpadlock_saved_context-.L037ctr16_pic_point,%eax
+       pushfl
+       cld
+       call    _padlock_verify_ctx
+.L037ctr16_pic_point:
+       leal    16(%edx),%edx
+       xorl    %eax,%eax
+       movdqa  -16(%edx),%xmm0
+       movl    $512,%ebx
+       notl    %eax
+       leal    -24(%esp),%ebp
+       cmpl    %ebx,%ecx
+       cmovcl  %ecx,%ebx
+       andl    %ebx,%eax
+       movl    %ecx,%ebx
+       negl    %eax
+       andl    $511,%ebx
+       leal    (%eax,%ebp,1),%esp
+       andl    $-16,%esp
+       jmp     .L038ctr16_loop
+.align 16
+.L038ctr16_loop:
+       movl    %edi,(%ebp)
+       movl    %esi,4(%ebp)
+       movl    %ecx,8(%ebp)
+       movl    %ebx,%ecx
+       movl    %ebx,12(%ebp)
+       pextrw  $7,%xmm0,%ecx
+       movl    $1,%esi
+       xorl    %edi,%edi
+       xchgb   %cl,%ch
+.L039ctr16_prepare:
+       movdqa  %xmm0,(%esp,%edi,1)
+       leal    (%ecx,%esi,1),%eax
+       xchgb   %al,%ah
+       leal    16(%edi),%edi
+       pinsrw  $7,%eax,%xmm0
+       leal    1(%esi),%esi
+       cmpl    %ebx,%edi
+       jb      .L039ctr16_prepare
+       leal    (%esp),%esi
+       leal    (%esp),%edi
+       movl    %ebx,%ecx
+       leal    -16(%edx),%eax
+       leal    16(%edx),%ebx
+       shrl    $4,%ecx
+.byte  243,15,167,200
+       movl    (%ebp),%edi
+       movl    12(%ebp),%ebx
+       movl    4(%ebp),%esi
+       xorl    %ecx,%ecx
+.L040ctr16_xor:
+       movdqu  (%esi,%ecx,1),%xmm1
+       leal    16(%ecx),%ecx
+       pxor    -16(%esp,%ecx,1),%xmm1
+       movdqu  %xmm1,-16(%edi,%ecx,1)
+       cmpl    %ebx,%ecx
+       jb      .L040ctr16_xor
+       movl    8(%ebp),%ecx
+       addl    %ebx,%edi
+       addl    %ebx,%esi
+       subl    %ebx,%ecx
+       movl    $512,%ebx
+       jnz     .L038ctr16_loop
+       movdqa  %xmm0,-16(%edx)
+       pxor    %xmm0,%xmm0
+       pxor    %xmm1,%xmm1
+       movl    %ebp,%ecx
+       movl    %esp,%edi
+       subl    %esp,%ecx
+       xorl    %eax,%eax
+       shrl    $2,%ecx
+.byte  243,171
+.L041ctr16_done:
+       leal    24(%ebp),%esp
+       movl    $1,%eax
+       leal    4(%esp),%esp
+.L036ctr16_abort:
+       popl    %edi
+       popl    %esi
+       popl    %ebx
+       popl    %ebp
+       ret
+.size  padlock_ctr16_encrypt,.-.L_padlock_ctr16_encrypt_begin
+.globl padlock_xstore
+.type  padlock_xstore,@function
+.align 16
+padlock_xstore:
+.L_padlock_xstore_begin:
+       pushl   %edi
+       movl    8(%esp),%edi
+       movl    12(%esp),%edx
+.byte  15,167,192
+       popl    %edi
+       ret
+.size  padlock_xstore,.-.L_padlock_xstore_begin
+.type  _win32_segv_handler,@function
+.align 16
+_win32_segv_handler:
+       movl    $1,%eax
+       movl    4(%esp),%edx
+       movl    12(%esp),%ecx
+       cmpl    $3221225477,(%edx)
+       jne     .L042ret
+       addl    $4,184(%ecx)
+       movl    $0,%eax
+.L042ret:
+       ret
+.size  _win32_segv_handler,.-_win32_segv_handler
+.globl padlock_sha1_oneshot
+.type  padlock_sha1_oneshot,@function
+.align 16
+padlock_sha1_oneshot:
+.L_padlock_sha1_oneshot_begin:
+       pushl   %edi
+       pushl   %esi
+       xorl    %eax,%eax
+       movl    12(%esp),%edi
+       movl    16(%esp),%esi
+       movl    20(%esp),%ecx
+.byte  243,15,166,200
+       popl    %esi
+       popl    %edi
+       ret
+.size  padlock_sha1_oneshot,.-.L_padlock_sha1_oneshot_begin
+.globl padlock_sha1
+.type  padlock_sha1,@function
+.align 16
+padlock_sha1:
+.L_padlock_sha1_begin:
+       pushl   %edi
+       pushl   %esi
+       movl    $-1,%eax
+       movl    12(%esp),%edi
+       movl    16(%esp),%esi
+       movl    20(%esp),%ecx
+.byte  243,15,166,200
+       popl    %esi
+       popl    %edi
+       ret
+.size  padlock_sha1,.-.L_padlock_sha1_begin
+.globl padlock_sha256_oneshot
+.type  padlock_sha256_oneshot,@function
+.align 16
+padlock_sha256_oneshot:
+.L_padlock_sha256_oneshot_begin:
+       pushl   %edi
+       pushl   %esi
+       xorl    %eax,%eax
+       movl    12(%esp),%edi
+       movl    16(%esp),%esi
+       movl    20(%esp),%ecx
+.byte  243,15,166,208
+       popl    %esi
+       popl    %edi
+       ret
+.size  padlock_sha256_oneshot,.-.L_padlock_sha256_oneshot_begin
+.globl padlock_sha256
+.type  padlock_sha256,@function
+.align 16
+padlock_sha256:
+.L_padlock_sha256_begin:
+       pushl   %edi
+       pushl   %esi
+       movl    $-1,%eax
+       movl    12(%esp),%edi
+       movl    16(%esp),%esi
+       movl    20(%esp),%ecx
+.byte  243,15,166,208
+       popl    %esi
+       popl    %edi
+       ret
+.size  padlock_sha256,.-.L_padlock_sha256_begin
+.byte  86,73,65,32,80,97,100,108,111,99,107,32,120,56,54,32
+.byte  109,111,100,117,108,101,44,32,67,82,89,80,84,79,71,65
+.byte  77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101
+.byte  110,115,115,108,46,111,114,103,62,0
+.align 16
+.data
+.align 4
+.Lpadlock_saved_context:
+.long  0
+
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
diff --git a/lib/accelerated/intel/padlock.c b/lib/accelerated/intel/padlock.c
new file mode 100644
index 0000000..403a9d8
--- /dev/null
+++ b/lib/accelerated/intel/padlock.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2011 Free Software Foundation, Inc.
+ *
+ * Author: Nikos Mavrogiannopoulos
+ *
+ * This file is part of GnuTLS.
+ *
+ * The GnuTLS is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 3 of
+ * the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>
+ *
+ */
+
+/*
+ * The following code is an implementation of the AES-128-CBC cipher
+ * using VIA Padlock instruction set. 
+ */
+
+#include <gnutls_errors.h>
+#include <gnutls_int.h>
+#include <gnutls/crypto.h>
+#include <gnutls_errors.h>
+#include <aes-x86.h>
+#include <x86.h>
+#ifdef HAVE_LIBNETTLE
+# include <nettle/aes.h>         /* for key generation in 192 and 256 bits */
+#endif
+
+struct padlock_cipher_data {
+    unsigned char iv[16];       /* Initialization vector */
+    union {
+        unsigned int pad[4];
+        struct {
+            int rounds:4;
+            int dgst:1;         /* n/a in C3 */
+            int align:1;        /* n/a in C3 */
+            int ciphr:1;        /* n/a in C3 */
+            unsigned int keygen:1;
+            int interm:1;
+            unsigned int encdec:1;
+            int ksize:2;
+        } b;
+    } cword;                    /* Control word */
+    AES_KEY ks;                 /* Encryption key */
+};
+
+struct padlock_ctx {
+    struct padlock_cipher_data expanded_key;
+    int enc;
+};
+
+unsigned int padlock_capability();
+void padlock_key_bswap(AES_KEY * key);
+void padlock_verify_context(struct padlock_cipher_data *ctx);
+void padlock_reload_key();
+void padlock_aes_block(void *out, const void *inp,
+                       struct padlock_cipher_data *ctx);
+int padlock_ecb_encrypt(void *out, const void *inp,
+                        struct padlock_cipher_data *ctx, size_t len);
+int padlock_cbc_encrypt(void *out, const void *inp,
+                        struct padlock_cipher_data *ctx, size_t len);
+int padlock_ctr32_encrypt(void *out, const void *inp,
+                          struct padlock_cipher_data *ctx, size_t len);
+void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
+void padlock_sha1(void *ctx, const void *inp, size_t len);
+void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
+void padlock_sha256(void *ctx, const void *inp, size_t len);
+
+
+static int
+aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc)
+{
+    /* we use key size to distinguish */
+    if (algorithm != GNUTLS_CIPHER_AES_128_CBC
+        && algorithm != GNUTLS_CIPHER_AES_192_CBC
+        && algorithm != GNUTLS_CIPHER_AES_256_CBC)
+        return GNUTLS_E_INVALID_REQUEST;
+
+    *_ctx = gnutls_calloc(1, sizeof(struct padlock_ctx));
+    if (*_ctx == NULL) {
+        gnutls_assert();
+        return GNUTLS_E_MEMORY_ERROR;
+    }
+
+    ((struct padlock_ctx*)(*_ctx))->enc = enc;
+    return 0;
+}
+
+static int
+aes_cipher_setkey(void *_ctx, const void *userkey, size_t keysize)
+{
+    struct padlock_ctx *ctx = _ctx;
+    struct padlock_cipher_data *pce;
+#ifdef HAVE_LIBNETTLE
+    struct aes_ctx nc;
+#endif
+    int ret;
+
+    memset(_ctx, 0, sizeof(struct padlock_cipher_data));
+
+    pce = ALIGN16(&ctx->expanded_key);
+
+    pce->cword.b.encdec = (ctx->enc == 0);
+
+    switch (keysize) {
+    case 16:
+        pce->cword.b.ksize = 0;
+        pce->cword.b.rounds = 10;
+        memcpy(pce->ks.rd_key, userkey, 16);
+        pce->cword.b.keygen = 0;
+        break;
+#ifdef HAVE_LIBNETTLE
+    case 24:
+        pce->cword.b.ksize = 1;
+        pce->cword.b.rounds = 12;
+        goto common_24_32;
+    case 32:
+        pce->cword.b.ksize = 2;
+        pce->cword.b.rounds = 14;
+common_24_32:
+        /* expand key using nettle */
+        if (ctx->enc)
+          aes_set_encrypt_key(&nc, keysize, userkey);
+        else
+          aes_set_decrypt_key(&nc, keysize, userkey);
+
+        memcpy(pce->ks.rd_key, nc.keys, sizeof(nc.keys));
+        pce->ks.rounds = nc.nrounds;
+
+        pce->cword.b.keygen = 1;
+        break;
+#endif
+    default:
+        return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED);
+    }
+    
+    padlock_reload_key ();
+
+    return 0;
+}
+
+static int aes_setiv(void *_ctx, const void *iv, size_t iv_size)
+{
+    struct padlock_ctx *ctx = _ctx;
+    struct padlock_cipher_data *pce;
+
+    pce = ALIGN16(&ctx->expanded_key);
+
+    memcpy(pce->iv, iv, 16);
+    return 0;
+}
+
+static int
+padlock_aes_encrypt(void *_ctx, const void *src, size_t src_size,
+            void *dst, size_t dst_size)
+{
+    struct padlock_ctx *ctx = _ctx;
+    struct padlock_cipher_data *pce;
+
+    pce = ALIGN16(&ctx->expanded_key);
+
+    padlock_cbc_encrypt(dst, src, pce, src_size);
+
+    return 0;
+}
+
+static int
+padlock_aes_decrypt(void *_ctx, const void *src, size_t src_size,
+            void *dst, size_t dst_size)
+{
+    struct padlock_ctx *ctx = _ctx;
+    struct padlock_cipher_data *pcd;
+
+    pcd = ALIGN16(&ctx->expanded_key);
+
+    padlock_cbc_encrypt(dst, src, pcd, src_size);
+
+    return 0;
+}
+
+static void aes_deinit(void *_ctx)
+{
+    gnutls_free(_ctx);
+}
+
+static const gnutls_crypto_cipher_st cipher_struct = {
+    .init = aes_cipher_init,
+    .setkey = aes_cipher_setkey,
+    .setiv = aes_setiv,
+    .encrypt = padlock_aes_encrypt,
+    .decrypt = padlock_aes_decrypt,
+    .deinit = aes_deinit,
+};
+
+static int check_padlock(void)
+{
+    unsigned int edx = padlock_capability();
+
+    return ((edx & (0x3 << 6)) == (0x3 << 6));
+}
+
+static unsigned check_via(void)
+{
+    unsigned int a, b, c, d;
+    cpuid(0, a, b, c, d);
+
+    if ((memcmp(&b, "VIA ", 4) == 0 &&
+         memcmp(&d, "VIA ", 4) == 0 && memcmp(&c, "VIA ", 4) == 0)) {
+        return 1;
+    }
+
+    return 0;
+}
+
+void register_padlock_crypto(void)
+{
+    int ret;
+
+    if (check_via() == 0)
+        return;
+
+    if (check_padlock()) {
+        _gnutls_debug_log("Padlock AES accelerator was detected\n");
+        ret =
+            gnutls_crypto_single_cipher_register(GNUTLS_CIPHER_AES_128_CBC,
+                                                 80, &cipher_struct);
+        if (ret < 0) {
+            gnutls_assert();
+        }
+
+#ifdef HAVE_LIBNETTLE
+        ret =
+            gnutls_crypto_single_cipher_register(GNUTLS_CIPHER_AES_192_CBC,
+                                                 80, &cipher_struct);
+        if (ret < 0) {
+            gnutls_assert();
+        }
+
+        ret =
+            gnutls_crypto_single_cipher_register(GNUTLS_CIPHER_AES_256_CBC,
+                                                 80, &cipher_struct);
+        if (ret < 0) {
+            gnutls_assert();
+        }
+#endif
+    }
+
+    return;
+}
diff --git a/lib/crypto-api.c b/lib/crypto-api.c
index fa1f1bb..ce9d02b 100644
--- a/lib/crypto-api.c
+++ b/lib/crypto-api.c
@@ -29,6 +29,12 @@
 #include <random.h>
 #include <crypto.h>
 
+typedef struct api_cipher_hd_st
+{
+  cipher_hd_st ctx_enc;
+  cipher_hd_st ctx_dec;
+} api_cipher_hd_st;
+
 /**
  * gnutls_cipher_init:
  * @handle: is a #gnutls_cipher_hd_t structure.
@@ -50,14 +56,23 @@ gnutls_cipher_init (gnutls_cipher_hd_t * handle,
                     gnutls_cipher_algorithm_t cipher,
                     const gnutls_datum_t * key, const gnutls_datum_t * iv)
 {
-  *handle = gnutls_malloc (sizeof (cipher_hd_st));
+api_cipher_hd_st * h;
+int ret;
+
+  *handle = gnutls_calloc (1, sizeof (api_cipher_hd_st));
   if (*handle == NULL)
     {
       gnutls_assert ();
       return GNUTLS_E_MEMORY_ERROR;
     }
 
-  return _gnutls_cipher_init (((cipher_hd_st *) * handle), cipher, key, iv);
+  h = *handle;
+  ret = _gnutls_cipher_init (&h->ctx_enc, cipher, key, iv, 1);
+
+  if (ret >= 0 && _gnutls_cipher_is_aead( &h->ctx_enc) == 0) /* AEAD ciphers 
are stream - so far */
+    ret = _gnutls_cipher_init (&h->ctx_dec, cipher, key, iv, 0);
+
+  return ret;
 }
 
 /**
@@ -77,10 +92,12 @@ gnutls_cipher_init (gnutls_cipher_hd_t * handle,
 int
 gnutls_cipher_tag (gnutls_cipher_hd_t handle, void *tag, size_t tag_size)
 {
-  if (_gnutls_cipher_is_aead( (cipher_hd_st*)handle)==0)
+api_cipher_hd_st * h = handle;
+
+  if (_gnutls_cipher_is_aead( &h->ctx_enc)==0)
     return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST);
 
-  _gnutls_cipher_tag( (cipher_hd_st*)handle, tag, tag_size);
+  _gnutls_cipher_tag( &h->ctx_enc, tag, tag_size);
   
   return 0;
 }
@@ -103,10 +120,12 @@ gnutls_cipher_tag (gnutls_cipher_hd_t handle, void *tag, 
size_t tag_size)
 int
 gnutls_cipher_add_auth (gnutls_cipher_hd_t handle, const void *text, size_t 
text_size)
 {
-  if (_gnutls_cipher_is_aead( (cipher_hd_st*)handle)==0)
+api_cipher_hd_st * h = handle;
+
+  if (_gnutls_cipher_is_aead( &h->ctx_enc)==0)
     return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST);
 
-  _gnutls_cipher_auth( (cipher_hd_st*)handle, text, text_size);
+  _gnutls_cipher_auth( &h->ctx_enc, text, text_size);
   
   return 0;
 }
@@ -125,7 +144,12 @@ gnutls_cipher_add_auth (gnutls_cipher_hd_t handle, const 
void *text, size_t text
 void
 gnutls_cipher_set_iv (gnutls_cipher_hd_t handle, void *iv, size_t ivlen)
 {
-  _gnutls_cipher_setiv((cipher_hd_st *)handle, iv, ivlen);
+api_cipher_hd_st * h = handle;
+
+  _gnutls_cipher_setiv( &h->ctx_enc, iv, ivlen);
+
+  if (_gnutls_cipher_is_aead( &h->ctx_enc)==0)
+    _gnutls_cipher_setiv( &h->ctx_dec, iv, ivlen);
 }
 
 /**
@@ -144,7 +168,9 @@ gnutls_cipher_set_iv (gnutls_cipher_hd_t handle, void *iv, 
size_t ivlen)
 int
 gnutls_cipher_encrypt (gnutls_cipher_hd_t handle, void *text, size_t textlen)
 {
-  return _gnutls_cipher_encrypt ((cipher_hd_st *) handle, text, textlen);
+api_cipher_hd_st * h = handle;
+
+  return _gnutls_cipher_encrypt (&h->ctx_enc, text, textlen);
 }
 
 /**
@@ -164,8 +190,12 @@ int
 gnutls_cipher_decrypt (gnutls_cipher_hd_t handle, void *ciphertext,
                        size_t ciphertextlen)
 {
-  return _gnutls_cipher_decrypt ((cipher_hd_st *) handle, ciphertext,
-                                 ciphertextlen);
+api_cipher_hd_st * h = handle;
+
+  if (_gnutls_cipher_is_aead( &h->ctx_enc)!=0)
+    return _gnutls_cipher_decrypt (&h->ctx_enc, ciphertext, ciphertextlen);
+  else
+    return _gnutls_cipher_decrypt (&h->ctx_dec, ciphertext, ciphertextlen);
 }
 
 /**
@@ -187,7 +217,9 @@ int
 gnutls_cipher_encrypt2 (gnutls_cipher_hd_t handle, const void *text, size_t 
textlen,
                         void *ciphertext, size_t ciphertextlen)
 {
-  return _gnutls_cipher_encrypt2 ((cipher_hd_st *) handle, text, textlen,
+api_cipher_hd_st * h = handle;
+
+  return _gnutls_cipher_encrypt2 (&h->ctx_enc, text, textlen,
                                   ciphertext, ciphertextlen);
 }
 
@@ -210,7 +242,9 @@ int
 gnutls_cipher_decrypt2 (gnutls_cipher_hd_t handle, const void *ciphertext,
                         size_t ciphertextlen, void *text, size_t textlen)
 {
-  return _gnutls_cipher_decrypt2 ((cipher_hd_st *) handle, ciphertext,
+api_cipher_hd_st * h = handle;
+
+  return _gnutls_cipher_decrypt2 (&h->ctx_dec, ciphertext,
                                   ciphertextlen, text, textlen);
 }
 
@@ -226,7 +260,11 @@ gnutls_cipher_decrypt2 (gnutls_cipher_hd_t handle, const 
void *ciphertext,
 void
 gnutls_cipher_deinit (gnutls_cipher_hd_t handle)
 {
-  _gnutls_cipher_deinit ((cipher_hd_st *) handle);
+api_cipher_hd_st * h = handle;
+
+  _gnutls_cipher_deinit (&h->ctx_enc);
+  if (_gnutls_cipher_is_aead( &h->ctx_enc)==0)
+    _gnutls_cipher_deinit (&h->ctx_dec);
   gnutls_free (handle);
 }
 
diff --git a/lib/crypto-backend.h b/lib/crypto-backend.h
index d5e1ded..5be0dd9 100644
--- a/lib/crypto-backend.h
+++ b/lib/crypto-backend.h
@@ -31,7 +31,7 @@
 
   typedef struct
   {
-    int (*init) (gnutls_cipher_algorithm_t, void **ctx);
+    int (*init) (gnutls_cipher_algorithm_t, void **ctx, int enc);
     int (*setkey) (void *ctx, const void *key, size_t keysize);
     int (*setiv) (void *ctx, const void *iv, size_t ivsize);
     int (*encrypt) (void *ctx, const void *plain, size_t plainsize,
diff --git a/lib/ext/session_ticket.c b/lib/ext/session_ticket.c
index c756f5e..a08881d 100644
--- a/lib/ext/session_ticket.c
+++ b/lib/ext/session_ticket.c
@@ -147,7 +147,7 @@ decrypt_ticket (gnutls_session_t session, 
session_ticket_ext_st * priv,
   IV.data = ticket->IV;
   IV.size = IV_SIZE;
   ret =
-    _gnutls_cipher_init (&cipher_hd, GNUTLS_CIPHER_AES_128_CBC, &key, &IV);
+    _gnutls_cipher_init (&cipher_hd, GNUTLS_CIPHER_AES_128_CBC, &key, &IV, 0);
   if (ret < 0)
     {
       gnutls_assert ();
@@ -222,7 +222,7 @@ encrypt_ticket (gnutls_session_t session, 
session_ticket_ext_st * priv,
   IV.data = priv->session_ticket_IV;
   IV.size = IV_SIZE;
   ret =
-    _gnutls_cipher_init (&cipher_hd, GNUTLS_CIPHER_AES_128_CBC, &key, &IV);
+    _gnutls_cipher_init (&cipher_hd, GNUTLS_CIPHER_AES_128_CBC, &key, &IV, 1);
   if (ret < 0)
     {
       gnutls_assert ();
diff --git a/lib/gnutls_cipher_int.c b/lib/gnutls_cipher_int.c
index 73cb043..d61d2c8 100644
--- a/lib/gnutls_cipher_int.c
+++ b/lib/gnutls_cipher_int.c
@@ -36,7 +36,7 @@
 
 int
 _gnutls_cipher_init (cipher_hd_st * handle, gnutls_cipher_algorithm_t cipher,
-                     const gnutls_datum_t * key, const gnutls_datum_t * iv)
+                     const gnutls_datum_t * key, const gnutls_datum_t * iv, 
int enc)
 {
   int ret = GNUTLS_E_INTERNAL_ERROR;
   const gnutls_crypto_cipher_st *cc = NULL;
@@ -57,7 +57,7 @@ _gnutls_cipher_init (cipher_hd_st * handle, 
gnutls_cipher_algorithm_t cipher,
       handle->tag = cc->tag;
       handle->setiv = cc->setiv;
 
-      SR (cc->init (cipher, &handle->handle), cc_cleanup);
+      SR (cc->init (cipher, &handle->handle, enc), cc_cleanup);
       SR (cc->setkey( handle->handle, key->data, key->size), cc_cleanup);
       if (iv)
         {
@@ -76,7 +76,7 @@ _gnutls_cipher_init (cipher_hd_st * handle, 
gnutls_cipher_algorithm_t cipher,
 
   /* otherwise use generic cipher interface
    */
-  ret = _gnutls_cipher_ops.init (cipher, &handle->handle);
+  ret = _gnutls_cipher_ops.init (cipher, &handle->handle, enc);
   if (ret < 0)
     {
       gnutls_assert ();
@@ -118,13 +118,13 @@ int _gnutls_auth_cipher_init (auth_cipher_hd_st * handle,
   const gnutls_datum_t * iv,
   gnutls_mac_algorithm_t mac,
   const gnutls_datum_t * mac_key,
-  int ssl_hmac)
+  int ssl_hmac, int enc)
 {
 int ret;
 
   memset(handle, 0, sizeof(*handle));
 
-  ret = _gnutls_cipher_init(&handle->cipher, cipher, cipher_key, iv);
+  ret = _gnutls_cipher_init(&handle->cipher, cipher, cipher_key, iv, enc);
   if (ret < 0)
     {
       gnutls_assert();
diff --git a/lib/gnutls_cipher_int.h b/lib/gnutls_cipher_int.h
index 1a5717f..bd2b68d 100644
--- a/lib/gnutls_cipher_int.h
+++ b/lib/gnutls_cipher_int.h
@@ -56,7 +56,7 @@ typedef struct
 
 int _gnutls_cipher_init (cipher_hd_st *, gnutls_cipher_algorithm_t cipher,
                          const gnutls_datum_t * key,
-                         const gnutls_datum_t * iv);
+                         const gnutls_datum_t * iv, int enc);
 
 inline static void _gnutls_cipher_setiv (const cipher_hd_st * handle, 
     const void *iv, int ivlen)
@@ -151,7 +151,7 @@ int _gnutls_auth_cipher_init (auth_cipher_hd_st * handle,
   const gnutls_datum_t * cipher_key,
   const gnutls_datum_t * iv,
   gnutls_mac_algorithm_t mac,
-  const gnutls_datum_t * mac_key, int ssl_hmac);
+  const gnutls_datum_t * mac_key, int ssl_hmac, int enc);
 
 int _gnutls_auth_cipher_add_auth (auth_cipher_hd_st * handle, const void *text,
                              int textlen);
diff --git a/lib/gnutls_constate.c b/lib/gnutls_constate.c
index 2b3308a..d747cc3 100644
--- a/lib/gnutls_constate.c
+++ b/lib/gnutls_constate.c
@@ -289,7 +289,7 @@ _gnutls_set_keys (gnutls_session_t session, 
record_parameters_st * params,
 }
 
 static int
-_gnutls_init_record_state (record_parameters_st * params, gnutls_protocol_t 
ver, int d,
+_gnutls_init_record_state (record_parameters_st * params, gnutls_protocol_t 
ver, int read,
                            record_state_st * state)
 {
   int ret;
@@ -302,12 +302,12 @@ _gnutls_init_record_state (record_parameters_st * params, 
gnutls_protocol_t ver,
 
   ret = _gnutls_auth_cipher_init (&state->cipher_state,
     params->cipher_algorithm, &state->key, iv,
-    params->mac_algorithm, &state->mac_secret, (ver==GNUTLS_SSL3)?1:0);
+    params->mac_algorithm, &state->mac_secret, (ver==GNUTLS_SSL3)?1:0, 
1-read/*1==encrypt*/);
   if (ret < 0 && params->cipher_algorithm != GNUTLS_CIPHER_NULL)
     return gnutls_assert_val (ret);
 
   state->compression_state =
-    _gnutls_comp_init (params->compression_algorithm, d);
+    _gnutls_comp_init (params->compression_algorithm, read/*1==decompress*/);
 
   if (state->compression_state == GNUTLS_COMP_FAILED)
     return gnutls_assert_val (GNUTLS_E_UNKNOWN_COMPRESSION_ALGORITHM);
diff --git a/lib/gnutls_extensions.c b/lib/gnutls_extensions.c
index 2ec5c15..99238a0 100644
--- a/lib/gnutls_extensions.c
+++ b/lib/gnutls_extensions.c
@@ -297,6 +297,7 @@ _gnutls_gen_extensions (gnutls_session_t session, 
gnutls_buffer_st * extdata,
   
   if ( size > 0)
     _gnutls_write_uint16(size, &extdata->data[pos]);
+  else if (size == 0) extdata->length = 0;
 
   return size;
 }
diff --git a/lib/gnutls_x509.c b/lib/gnutls_x509.c
index b336795..1d4ed53 100644
--- a/lib/gnutls_x509.c
+++ b/lib/gnutls_x509.c
@@ -1400,7 +1400,8 @@ gnutls_certificate_set_x509_trust_mem 
(gnutls_certificate_credentials_t res,
  * a certificate request is sent. This can be disabled using
  * gnutls_certificate_send_x509_rdn_sequence().
  *
- * Returns: %GNUTLS_E_SUCCESS (0) on success, or a negative error code.
+ * Returns: the number of certificates processed or a negative error code
+ * on error.
  *
  * Since: 2.4.0
  **/
diff --git a/lib/includes/gnutls/crypto.h b/lib/includes/gnutls/crypto.h
index 52081f8..9946ed2 100644
--- a/lib/includes/gnutls/crypto.h
+++ b/lib/includes/gnutls/crypto.h
@@ -28,7 +28,7 @@ extern "C"
 {
 #endif
 
-  typedef struct cipher_hd_st *gnutls_cipher_hd_t;
+  typedef struct api_cipher_hd_st *gnutls_cipher_hd_t;
 
   int gnutls_cipher_init (gnutls_cipher_hd_t * handle,
                           gnutls_cipher_algorithm_t cipher,
diff --git a/lib/nettle/cipher.c b/lib/nettle/cipher.c
index 006cb2b..014a549 100644
--- a/lib/nettle/cipher.c
+++ b/lib/nettle/cipher.c
@@ -58,68 +58,12 @@ stream_encrypt (void *ctx, nettle_crypt_func func, unsigned 
block_size,
   func (ctx, length, dst, src);
 }
 
-struct aes_bidi_ctx
-{
-  struct aes_ctx encrypt;
-  struct aes_ctx decrypt;
-};
-
-static void
-aes_bidi_setkey (struct aes_bidi_ctx *ctx, unsigned length,
-                 const uint8_t * key)
-{
-  aes_set_encrypt_key (&ctx->encrypt, length, key);
-  aes_invert_key (&ctx->decrypt, &ctx->encrypt);
-}
-
-static void
-aes_bidi_encrypt (struct aes_bidi_ctx *ctx,
-                  unsigned length, uint8_t * dst, const uint8_t * src)
-{
-  aes_encrypt (&ctx->encrypt, length, dst, src);
-}
-
-static void
-aes_bidi_decrypt (struct aes_bidi_ctx *ctx,
-                  unsigned length, uint8_t * dst, const uint8_t * src)
-{
-  aes_decrypt (&ctx->decrypt, length, dst, src);
-}
-
-struct camellia_bidi_ctx
-{
-  struct camellia_ctx encrypt;
-  struct camellia_ctx decrypt;
-};
-
-static void
-camellia_bidi_setkey (struct camellia_bidi_ctx *ctx, unsigned length,
-                      const uint8_t * key)
-{
-  camellia_set_encrypt_key (&ctx->encrypt, length, key);
-  camellia_invert_key (&ctx->decrypt, &ctx->encrypt);
-}
-
-static void
-camellia_bidi_encrypt (struct camellia_bidi_ctx *ctx,
-                       unsigned length, uint8_t * dst, const uint8_t * src)
-{
-  camellia_crypt (&ctx->encrypt, length, dst, src);
-}
-
-static void
-camellia_bidi_decrypt (struct camellia_bidi_ctx *ctx,
-                       unsigned length, uint8_t * dst, const uint8_t * src)
-{
-  camellia_crypt (&ctx->decrypt, length, dst, src);
-}
-
 struct nettle_cipher_ctx
 {
   union
   {
-    struct aes_bidi_ctx aes_bidi;
-    struct camellia_bidi_ctx camellia_bidi;
+    struct aes_ctx aes;
+    struct camellia_ctx camellia;
     struct arcfour_ctx arcfour;
     struct arctwo_ctx arctwo;
     struct des3_ctx des3;
@@ -136,6 +80,7 @@ struct nettle_cipher_ctx
   decrypt_func decrypt;
   auth_func auth;
   tag_func tag;
+  int enc;
 };
 
 #define GCM_DEFAULT_NONCE_SIZE 12
@@ -157,7 +102,7 @@ static void _gcm_decrypt(void *_ctx, nettle_crypt_func f,
 }
 
 static int
-wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, void **_ctx)
+wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, void **_ctx, int enc)
 {
   struct nettle_cipher_ctx *ctx;
 
@@ -169,6 +114,7 @@ wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, 
void **_ctx)
     }
 
   ctx->algo = algo;
+  ctx->enc = enc;
 
   switch (algo)
     {
@@ -176,7 +122,7 @@ wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, 
void **_ctx)
     case GNUTLS_CIPHER_AES_256_GCM:
       ctx->encrypt = _gcm_encrypt;
       ctx->decrypt = _gcm_decrypt;
-      ctx->i_encrypt = (nettle_crypt_func*) aes_bidi_encrypt;
+      ctx->i_encrypt = (nettle_crypt_func*) aes_encrypt;
       ctx->auth = (auth_func)gcm_aes_update;
       ctx->tag = (tag_func)gcm_aes_digest;
       ctx->ctx_ptr = &ctx->ctx.aes_gcm;
@@ -186,9 +132,9 @@ wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, 
void **_ctx)
     case GNUTLS_CIPHER_CAMELLIA_256_CBC:
       ctx->encrypt = cbc_encrypt;
       ctx->decrypt = cbc_decrypt;
-      ctx->i_encrypt = (nettle_crypt_func *) camellia_bidi_encrypt;
-      ctx->i_decrypt = (nettle_crypt_func *) camellia_bidi_decrypt;
-      ctx->ctx_ptr = &ctx->ctx.camellia_bidi;
+      ctx->i_encrypt = (nettle_crypt_func*)camellia_crypt;
+      ctx->i_decrypt = (nettle_crypt_func*)camellia_crypt;
+      ctx->ctx_ptr = &ctx->ctx.camellia;
       ctx->block_size = CAMELLIA_BLOCK_SIZE;
       break;
     case GNUTLS_CIPHER_AES_128_CBC:
@@ -196,9 +142,9 @@ wrap_nettle_cipher_init (gnutls_cipher_algorithm_t algo, 
void **_ctx)
     case GNUTLS_CIPHER_AES_256_CBC:
       ctx->encrypt = cbc_encrypt;
       ctx->decrypt = cbc_decrypt;
-      ctx->i_encrypt = (nettle_crypt_func *) aes_bidi_encrypt;
-      ctx->i_decrypt = (nettle_crypt_func *) aes_bidi_decrypt;
-      ctx->ctx_ptr = &ctx->ctx.aes_bidi;
+      ctx->i_encrypt = (nettle_crypt_func*)aes_encrypt;
+      ctx->i_decrypt = (nettle_crypt_func*)aes_decrypt;
+      ctx->ctx_ptr = &ctx->ctx.aes;
       ctx->block_size = AES_BLOCK_SIZE;
       break;
     case GNUTLS_CIPHER_3DES_CBC:
@@ -259,11 +205,17 @@ wrap_nettle_cipher_setkey (void *_ctx, const void *key, 
size_t keysize)
     case GNUTLS_CIPHER_AES_128_CBC:
     case GNUTLS_CIPHER_AES_192_CBC:
     case GNUTLS_CIPHER_AES_256_CBC:
-      aes_bidi_setkey (ctx->ctx_ptr, keysize, key);
+      if (ctx->enc)
+        aes_set_encrypt_key (ctx->ctx_ptr, keysize, key);
+      else
+        aes_set_decrypt_key (ctx->ctx_ptr, keysize, key);
       break;
     case GNUTLS_CIPHER_CAMELLIA_128_CBC:
     case GNUTLS_CIPHER_CAMELLIA_256_CBC:
-      camellia_bidi_setkey (ctx->ctx_ptr, keysize, key);
+      if (ctx->enc)
+        camellia_set_encrypt_key (ctx->ctx_ptr, keysize, key);
+      else
+        camellia_set_decrypt_key (ctx->ctx_ptr, keysize, key);
       break;
     case GNUTLS_CIPHER_3DES_CBC:
       if (keysize != DES3_KEY_SIZE)
diff --git a/lib/x509/privkey_pkcs8.c b/lib/x509/privkey_pkcs8.c
index 20b1366..106a13f 100644
--- a/lib/x509/privkey_pkcs8.c
+++ b/lib/x509/privkey_pkcs8.c
@@ -1674,7 +1674,7 @@ decrypt_data (schema_id schema, ASN1_TYPE pkcs8_asn,
 
   d_iv.data = (opaque *) enc_params->iv;
   d_iv.size = enc_params->iv_size;
-  result = _gnutls_cipher_init (&ch, enc_params->cipher, &dkey, &d_iv);
+  result = _gnutls_cipher_init (&ch, enc_params->cipher, &dkey, &d_iv, 0);
 
   gnutls_free (key);
   key = NULL;
@@ -2162,7 +2162,7 @@ encrypt_data (const gnutls_datum_t * plain,
 
   d_iv.data = (opaque *) enc_params->iv;
   d_iv.size = enc_params->iv_size;
-  result = _gnutls_cipher_init (&ch, enc_params->cipher, key, &d_iv);
+  result = _gnutls_cipher_init (&ch, enc_params->cipher, key, &d_iv, 1);
 
   if (result < 0)
     {


hooks/post-receive
-- 
GNU gnutls



reply via email to

[Prev in Thread] Current Thread [Next in Thread]