2013-08-10 19:23:38 +00:00
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
|
|
|
# implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
2014-03-08 16:32:56 +00:00
|
|
|
from __future__ import absolute_import, division, print_function
|
|
|
|
|
|
2014-04-13 12:58:02 +00:00
|
|
|
import binascii
|
2014-01-27 18:08:35 +00:00
|
|
|
import collections
|
2014-04-13 12:58:02 +00:00
|
|
|
import re
|
2014-04-20 21:24:41 +00:00
|
|
|
from contextlib import contextmanager
|
2014-03-19 16:37:17 +00:00
|
|
|
|
2014-04-30 18:28:28 +00:00
|
|
|
from pyasn1.codec.der import encoder
|
2014-04-30 16:13:17 +00:00
|
|
|
from pyasn1.type import namedtype, univ
|
|
|
|
|
|
2014-03-14 20:03:12 +00:00
|
|
|
import pytest
|
|
|
|
|
|
2014-03-19 15:49:32 +00:00
|
|
|
import six
|
2013-12-25 05:55:24 +00:00
|
|
|
|
2014-03-22 22:09:34 +00:00
|
|
|
from cryptography.exceptions import UnsupportedAlgorithm
|
2014-04-05 18:22:07 +00:00
|
|
|
|
2014-03-14 20:03:12 +00:00
|
|
|
import cryptography_vectors
|
2014-03-12 21:54:43 +00:00
|
|
|
|
2013-12-25 05:55:24 +00:00
|
|
|
|
2014-01-27 18:08:35 +00:00
|
|
|
HashVector = collections.namedtuple("HashVector", ["message", "digest"])
|
|
|
|
|
KeyedHashVector = collections.namedtuple(
|
|
|
|
|
"KeyedHashVector", ["message", "digest", "key"]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2014-01-18 15:22:21 +00:00
|
|
|
def select_backends(names, backend_list):
|
|
|
|
|
if names is None:
|
|
|
|
|
return backend_list
|
|
|
|
|
split_names = [x.strip() for x in names.split(',')]
|
|
|
|
|
# this must be duplicated and then removed to preserve the metadata
|
|
|
|
|
# pytest associates. Appending backends to a new list doesn't seem to work
|
2014-01-19 18:09:27 +00:00
|
|
|
selected_backends = []
|
|
|
|
|
for backend in backend_list:
|
|
|
|
|
if backend.name in split_names:
|
|
|
|
|
selected_backends.append(backend)
|
2014-01-18 15:22:21 +00:00
|
|
|
|
2014-01-19 18:09:27 +00:00
|
|
|
if len(selected_backends) > 0:
|
|
|
|
|
return selected_backends
|
2014-01-18 15:22:21 +00:00
|
|
|
else:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
"No backend selected. Tried to select: {0}".format(split_names)
|
|
|
|
|
)
|
2014-01-14 02:52:08 +00:00
|
|
|
|
|
|
|
|
|
2013-12-25 05:55:24 +00:00
|
|
|
def check_for_iface(name, iface, item):
|
|
|
|
|
if name in item.keywords and "backend" in item.funcargs:
|
|
|
|
|
if not isinstance(item.funcargs["backend"], iface):
|
|
|
|
|
pytest.skip("{0} backend does not support {1}".format(
|
|
|
|
|
item.funcargs["backend"], name
|
|
|
|
|
))
|
2013-08-09 04:32:30 +00:00
|
|
|
|
|
|
|
|
|
2013-12-27 02:19:34 +00:00
|
|
|
def check_backend_support(item):
|
2013-12-27 02:13:45 +00:00
|
|
|
supported = item.keywords.get("supported")
|
|
|
|
|
if supported and "backend" in item.funcargs:
|
|
|
|
|
if not supported.kwargs["only_if"](item.funcargs["backend"]):
|
2014-01-03 05:16:14 +00:00
|
|
|
pytest.skip("{0} ({1})".format(
|
|
|
|
|
supported.kwargs["skip_message"], item.funcargs["backend"]
|
|
|
|
|
))
|
2013-12-27 02:13:45 +00:00
|
|
|
elif supported:
|
2013-12-27 21:51:40 +00:00
|
|
|
raise ValueError("This mark is only available on methods that take a "
|
|
|
|
|
"backend")
|
2013-12-27 02:13:45 +00:00
|
|
|
|
|
|
|
|
|
2014-03-22 22:09:34 +00:00
|
|
|
@contextmanager
|
2014-03-27 16:38:00 +00:00
|
|
|
def raises_unsupported_algorithm(reason):
|
2014-03-22 22:09:34 +00:00
|
|
|
with pytest.raises(UnsupportedAlgorithm) as exc_info:
|
2014-03-26 17:39:29 +00:00
|
|
|
yield exc_info
|
2014-03-27 16:38:00 +00:00
|
|
|
|
2014-03-27 16:55:41 +00:00
|
|
|
assert exc_info.value._reason is reason
|
2014-03-22 22:09:34 +00:00
|
|
|
|
|
|
|
|
|
2014-04-30 17:12:50 +00:00
|
|
|
class _DSSSigValue(univ.Sequence):
|
2014-04-30 16:13:17 +00:00
|
|
|
componentType = namedtype.NamedTypes(
|
|
|
|
|
namedtype.NamedType('r', univ.Integer()),
|
|
|
|
|
namedtype.NamedType('s', univ.Integer())
|
|
|
|
|
)
|
2014-04-30 14:07:27 +00:00
|
|
|
|
|
|
|
|
|
2014-04-30 17:14:48 +00:00
|
|
|
def der_encode_dsa_signature(r, s):
|
2014-04-30 17:12:50 +00:00
|
|
|
sig = _DSSSigValue()
|
2014-04-30 16:13:17 +00:00
|
|
|
sig.setComponentByName('r', r)
|
|
|
|
|
sig.setComponentByName('s', s)
|
|
|
|
|
return encoder.encode(sig)
|
2014-04-30 14:07:27 +00:00
|
|
|
|
|
|
|
|
|
2013-11-12 02:43:52 +00:00
|
|
|
def load_vectors_from_file(filename, loader):
|
2014-03-14 20:03:12 +00:00
|
|
|
with cryptography_vectors.open_vector_file(filename) as vector_file:
|
|
|
|
|
return loader(vector_file)
|
2013-11-12 02:43:52 +00:00
|
|
|
|
|
|
|
|
|
2013-11-11 22:46:20 +00:00
|
|
|
def load_nist_vectors(vector_data):
|
2013-11-19 00:12:41 +00:00
|
|
|
test_data = None
|
|
|
|
|
data = []
|
2013-08-09 04:32:30 +00:00
|
|
|
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
2013-11-19 00:12:41 +00:00
|
|
|
# Blank lines, comments, and section headers are ignored
|
|
|
|
|
if not line or line.startswith("#") or (line.startswith("[")
|
|
|
|
|
and line.endswith("]")):
|
2013-11-11 22:25:59 +00:00
|
|
|
continue
|
|
|
|
|
|
2013-11-12 21:35:49 +00:00
|
|
|
if line.strip() == "FAIL":
|
2013-11-19 00:12:41 +00:00
|
|
|
test_data["fail"] = True
|
2013-11-12 21:35:49 +00:00
|
|
|
continue
|
|
|
|
|
|
2013-08-09 04:32:30 +00:00
|
|
|
# Build our data using a simple Key = Value format
|
2013-11-12 21:35:49 +00:00
|
|
|
name, value = [c.strip() for c in line.split("=")]
|
2013-08-09 04:32:30 +00:00
|
|
|
|
2014-01-28 03:04:03 +00:00
|
|
|
# Some tests (PBKDF2) contain \0, which should be interpreted as a
|
|
|
|
|
# null character rather than literal.
|
|
|
|
|
value = value.replace("\\0", "\0")
|
|
|
|
|
|
2013-08-09 04:32:30 +00:00
|
|
|
# COUNT is a special token that indicates a new block of data
|
|
|
|
|
if name.upper() == "COUNT":
|
2013-11-19 00:12:41 +00:00
|
|
|
test_data = {}
|
|
|
|
|
data.append(test_data)
|
|
|
|
|
continue
|
2013-08-09 04:32:30 +00:00
|
|
|
# For all other tokens we simply want the name, value stored in
|
|
|
|
|
# the dictionary
|
|
|
|
|
else:
|
2013-11-19 00:12:41 +00:00
|
|
|
test_data[name.lower()] = value.encode("ascii")
|
2013-08-09 04:32:30 +00:00
|
|
|
|
2013-11-19 00:12:41 +00:00
|
|
|
return data
|
2013-08-09 04:32:30 +00:00
|
|
|
|
|
|
|
|
|
2013-09-15 17:05:43 +00:00
|
|
|
def load_cryptrec_vectors(vector_data):
|
2013-09-27 16:26:01 +00:00
|
|
|
cryptrec_list = []
|
2013-09-15 17:05:43 +00:00
|
|
|
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
# Blank lines and comments are ignored
|
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if line.startswith("K"):
|
2013-09-27 16:26:01 +00:00
|
|
|
key = line.split(" : ")[1].replace(" ", "").encode("ascii")
|
2013-09-15 17:05:43 +00:00
|
|
|
elif line.startswith("P"):
|
2013-09-27 16:26:01 +00:00
|
|
|
pt = line.split(" : ")[1].replace(" ", "").encode("ascii")
|
2013-09-15 17:05:43 +00:00
|
|
|
elif line.startswith("C"):
|
2013-09-27 16:26:01 +00:00
|
|
|
ct = line.split(" : ")[1].replace(" ", "").encode("ascii")
|
|
|
|
|
# after a C is found the K+P+C tuple is complete
|
|
|
|
|
# there are many P+C pairs for each K
|
2013-10-16 18:59:17 +00:00
|
|
|
cryptrec_list.append({
|
|
|
|
|
"key": key,
|
|
|
|
|
"plaintext": pt,
|
|
|
|
|
"ciphertext": ct
|
|
|
|
|
})
|
2013-10-19 23:33:06 +00:00
|
|
|
else:
|
|
|
|
|
raise ValueError("Invalid line in file '{}'".format(line))
|
2013-09-15 17:05:43 +00:00
|
|
|
return cryptrec_list
|
|
|
|
|
|
|
|
|
|
|
2013-10-18 22:28:39 +00:00
|
|
|
def load_hash_vectors(vector_data):
|
|
|
|
|
vectors = []
|
2013-10-27 22:00:14 +00:00
|
|
|
key = None
|
|
|
|
|
msg = None
|
|
|
|
|
md = None
|
2013-10-18 22:28:39 +00:00
|
|
|
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
2013-10-18 23:01:26 +00:00
|
|
|
if not line or line.startswith("#") or line.startswith("["):
|
2013-10-18 22:28:39 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if line.startswith("Len"):
|
|
|
|
|
length = int(line.split(" = ")[1])
|
2013-10-28 22:34:27 +00:00
|
|
|
elif line.startswith("Key"):
|
2014-01-27 18:08:35 +00:00
|
|
|
# HMAC vectors contain a key attribute. Hash vectors do not.
|
2013-10-28 22:34:27 +00:00
|
|
|
key = line.split(" = ")[1].encode("ascii")
|
2013-10-18 22:28:39 +00:00
|
|
|
elif line.startswith("Msg"):
|
2014-01-27 18:08:35 +00:00
|
|
|
# In the NIST vectors they have chosen to represent an empty
|
|
|
|
|
# string as hex 00, which is of course not actually an empty
|
|
|
|
|
# string. So we parse the provided length and catch this edge case.
|
2013-10-18 22:28:39 +00:00
|
|
|
msg = line.split(" = ")[1].encode("ascii") if length > 0 else b""
|
|
|
|
|
elif line.startswith("MD"):
|
|
|
|
|
md = line.split(" = ")[1]
|
2013-10-28 22:34:27 +00:00
|
|
|
# after MD is found the Msg+MD (+ potential key) tuple is complete
|
2013-10-23 14:41:49 +00:00
|
|
|
if key is not None:
|
2014-01-27 18:08:35 +00:00
|
|
|
vectors.append(KeyedHashVector(msg, md, key))
|
2013-10-27 22:00:14 +00:00
|
|
|
key = None
|
|
|
|
|
msg = None
|
|
|
|
|
md = None
|
2013-10-23 14:41:49 +00:00
|
|
|
else:
|
2014-01-27 18:08:35 +00:00
|
|
|
vectors.append(HashVector(msg, md))
|
2013-10-27 22:00:14 +00:00
|
|
|
msg = None
|
|
|
|
|
md = None
|
2013-10-18 22:28:39 +00:00
|
|
|
else:
|
|
|
|
|
raise ValueError("Unknown line in hash vector")
|
|
|
|
|
return vectors
|
2014-02-02 19:30:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_pkcs1_vectors(vector_data):
|
|
|
|
|
"""
|
|
|
|
|
Loads data out of RSA PKCS #1 vector files.
|
|
|
|
|
"""
|
|
|
|
|
private_key_vector = None
|
|
|
|
|
public_key_vector = None
|
|
|
|
|
attr = None
|
|
|
|
|
key = None
|
2014-02-18 02:55:13 +00:00
|
|
|
example_vector = None
|
|
|
|
|
examples = []
|
2014-02-02 19:30:03 +00:00
|
|
|
vectors = []
|
|
|
|
|
for line in vector_data:
|
2014-02-18 04:56:55 +00:00
|
|
|
if (
|
|
|
|
|
line.startswith("# PSS Example") or
|
2014-03-29 17:08:39 +00:00
|
|
|
line.startswith("# OAEP Example") or
|
|
|
|
|
line.startswith("# PKCS#1 v1.5")
|
2014-02-18 04:56:55 +00:00
|
|
|
):
|
2014-02-18 02:55:13 +00:00
|
|
|
if example_vector:
|
|
|
|
|
for key, value in six.iteritems(example_vector):
|
2014-02-19 22:32:11 +00:00
|
|
|
hex_str = "".join(value).replace(" ", "").encode("ascii")
|
2014-02-18 02:55:13 +00:00
|
|
|
example_vector[key] = hex_str
|
|
|
|
|
examples.append(example_vector)
|
|
|
|
|
|
|
|
|
|
attr = None
|
|
|
|
|
example_vector = collections.defaultdict(list)
|
|
|
|
|
|
2014-03-29 17:08:39 +00:00
|
|
|
if line.startswith("# Message"):
|
2014-02-18 14:27:39 +00:00
|
|
|
attr = "message"
|
2014-02-18 02:55:13 +00:00
|
|
|
continue
|
|
|
|
|
elif line.startswith("# Salt"):
|
|
|
|
|
attr = "salt"
|
|
|
|
|
continue
|
2014-03-29 17:08:39 +00:00
|
|
|
elif line.startswith("# Seed"):
|
|
|
|
|
attr = "seed"
|
|
|
|
|
continue
|
2014-02-18 02:55:13 +00:00
|
|
|
elif line.startswith("# Signature"):
|
|
|
|
|
attr = "signature"
|
|
|
|
|
continue
|
2014-03-29 17:08:39 +00:00
|
|
|
elif line.startswith("# Encryption"):
|
|
|
|
|
attr = "encryption"
|
|
|
|
|
continue
|
2014-02-18 02:55:13 +00:00
|
|
|
elif (
|
|
|
|
|
example_vector and
|
|
|
|
|
line.startswith("# =============================================")
|
|
|
|
|
):
|
|
|
|
|
for key, value in six.iteritems(example_vector):
|
2014-02-19 22:32:11 +00:00
|
|
|
hex_str = "".join(value).replace(" ", "").encode("ascii")
|
2014-02-18 02:55:13 +00:00
|
|
|
example_vector[key] = hex_str
|
|
|
|
|
examples.append(example_vector)
|
|
|
|
|
example_vector = None
|
|
|
|
|
attr = None
|
|
|
|
|
elif example_vector and line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
if attr is not None and example_vector is not None:
|
|
|
|
|
example_vector[attr].append(line.strip())
|
|
|
|
|
continue
|
|
|
|
|
|
2014-02-02 19:30:03 +00:00
|
|
|
if (
|
|
|
|
|
line.startswith("# Example") or
|
|
|
|
|
line.startswith("# =============================================")
|
|
|
|
|
):
|
|
|
|
|
if key:
|
|
|
|
|
assert private_key_vector
|
|
|
|
|
assert public_key_vector
|
|
|
|
|
|
|
|
|
|
for key, value in six.iteritems(public_key_vector):
|
|
|
|
|
hex_str = "".join(value).replace(" ", "")
|
|
|
|
|
public_key_vector[key] = int(hex_str, 16)
|
|
|
|
|
|
|
|
|
|
for key, value in six.iteritems(private_key_vector):
|
|
|
|
|
hex_str = "".join(value).replace(" ", "")
|
|
|
|
|
private_key_vector[key] = int(hex_str, 16)
|
|
|
|
|
|
2014-02-18 02:55:13 +00:00
|
|
|
private_key_vector["examples"] = examples
|
|
|
|
|
examples = []
|
|
|
|
|
|
2014-02-02 19:30:03 +00:00
|
|
|
assert (
|
|
|
|
|
private_key_vector['public_exponent'] ==
|
|
|
|
|
public_key_vector['public_exponent']
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
assert (
|
|
|
|
|
private_key_vector['modulus'] ==
|
|
|
|
|
public_key_vector['modulus']
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
vectors.append(
|
|
|
|
|
(private_key_vector, public_key_vector)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
public_key_vector = collections.defaultdict(list)
|
|
|
|
|
private_key_vector = collections.defaultdict(list)
|
|
|
|
|
key = None
|
|
|
|
|
attr = None
|
|
|
|
|
|
|
|
|
|
if private_key_vector is None or public_key_vector is None:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if line.startswith("# Private key"):
|
|
|
|
|
key = private_key_vector
|
|
|
|
|
elif line.startswith("# Public key"):
|
|
|
|
|
key = public_key_vector
|
|
|
|
|
elif line.startswith("# Modulus:"):
|
|
|
|
|
attr = "modulus"
|
|
|
|
|
elif line.startswith("# Public exponent:"):
|
|
|
|
|
attr = "public_exponent"
|
|
|
|
|
elif line.startswith("# Exponent:"):
|
|
|
|
|
if key is public_key_vector:
|
|
|
|
|
attr = "public_exponent"
|
|
|
|
|
else:
|
|
|
|
|
assert key is private_key_vector
|
|
|
|
|
attr = "private_exponent"
|
|
|
|
|
elif line.startswith("# Prime 1:"):
|
|
|
|
|
attr = "p"
|
|
|
|
|
elif line.startswith("# Prime 2:"):
|
|
|
|
|
attr = "q"
|
2014-02-13 05:57:27 +00:00
|
|
|
elif line.startswith("# Prime exponent 1:"):
|
|
|
|
|
attr = "dmp1"
|
|
|
|
|
elif line.startswith("# Prime exponent 2:"):
|
|
|
|
|
attr = "dmq1"
|
|
|
|
|
elif line.startswith("# Coefficient:"):
|
|
|
|
|
attr = "iqmp"
|
2014-02-02 19:30:03 +00:00
|
|
|
elif line.startswith("#"):
|
|
|
|
|
attr = None
|
|
|
|
|
else:
|
|
|
|
|
if key is not None and attr is not None:
|
|
|
|
|
key[attr].append(line.strip())
|
|
|
|
|
return vectors
|
2014-03-11 03:30:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_rsa_nist_vectors(vector_data):
|
|
|
|
|
test_data = None
|
2014-03-18 11:19:14 +00:00
|
|
|
p = None
|
2014-03-18 11:51:56 +00:00
|
|
|
salt_length = None
|
2014-03-11 03:30:28 +00:00
|
|
|
data = []
|
|
|
|
|
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
# Blank lines and section headers are ignored
|
|
|
|
|
if not line or line.startswith("["):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if line.startswith("# Salt len:"):
|
|
|
|
|
salt_length = int(line.split(":")[1].strip())
|
|
|
|
|
continue
|
|
|
|
|
elif line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Build our data using a simple Key = Value format
|
|
|
|
|
name, value = [c.strip() for c in line.split("=")]
|
|
|
|
|
|
|
|
|
|
if name == "n":
|
|
|
|
|
n = int(value, 16)
|
2014-03-18 11:19:14 +00:00
|
|
|
elif name == "e" and p is None:
|
2014-03-11 03:30:28 +00:00
|
|
|
e = int(value, 16)
|
2014-03-18 11:19:14 +00:00
|
|
|
elif name == "p":
|
|
|
|
|
p = int(value, 16)
|
|
|
|
|
elif name == "q":
|
|
|
|
|
q = int(value, 16)
|
2014-03-11 03:30:28 +00:00
|
|
|
elif name == "SHAAlg":
|
2014-03-18 11:19:14 +00:00
|
|
|
if p is None:
|
|
|
|
|
test_data = {
|
|
|
|
|
"modulus": n,
|
|
|
|
|
"public_exponent": e,
|
|
|
|
|
"salt_length": salt_length,
|
2014-03-18 11:57:26 +00:00
|
|
|
"algorithm": value,
|
2014-03-18 11:19:14 +00:00
|
|
|
"fail": False
|
|
|
|
|
}
|
|
|
|
|
else:
|
|
|
|
|
test_data = {
|
|
|
|
|
"modulus": n,
|
|
|
|
|
"p": p,
|
|
|
|
|
"q": q,
|
2014-03-18 11:57:26 +00:00
|
|
|
"algorithm": value
|
2014-03-18 11:19:14 +00:00
|
|
|
}
|
2014-03-18 11:51:56 +00:00
|
|
|
if salt_length is not None:
|
|
|
|
|
test_data["salt_length"] = salt_length
|
2014-03-11 03:30:28 +00:00
|
|
|
data.append(test_data)
|
2014-03-18 11:19:14 +00:00
|
|
|
elif name == "e" and p is not None:
|
|
|
|
|
test_data["public_exponent"] = int(value, 16)
|
|
|
|
|
elif name == "d":
|
|
|
|
|
test_data["private_exponent"] = int(value, 16)
|
|
|
|
|
elif name == "Result":
|
|
|
|
|
test_data["fail"] = value.startswith("F")
|
2014-03-11 03:30:28 +00:00
|
|
|
# For all other tokens we simply want the name, value stored in
|
|
|
|
|
# the dictionary
|
|
|
|
|
else:
|
|
|
|
|
test_data[name.lower()] = value.encode("ascii")
|
|
|
|
|
|
|
|
|
|
return data
|
2014-03-12 14:07:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_fips_dsa_key_pair_vectors(vector_data):
|
|
|
|
|
"""
|
|
|
|
|
Loads data out of the FIPS DSA KeyPair vector files.
|
|
|
|
|
"""
|
|
|
|
|
vectors = []
|
2014-03-12 18:07:05 +00:00
|
|
|
# When reading_key_data is set to True it tells the loader to continue
|
|
|
|
|
# constructing dictionaries. We set reading_key_data to False during the
|
|
|
|
|
# blocks of the vectors of N=224 because we don't support it.
|
|
|
|
|
reading_key_data = True
|
2014-03-12 14:07:21 +00:00
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
|
continue
|
2014-03-12 18:07:05 +00:00
|
|
|
elif line.startswith("[mod = L=1024"):
|
2014-03-12 14:07:21 +00:00
|
|
|
continue
|
2014-03-12 18:07:05 +00:00
|
|
|
elif line.startswith("[mod = L=2048, N=224"):
|
|
|
|
|
reading_key_data = False
|
2014-03-12 14:07:21 +00:00
|
|
|
continue
|
2014-03-12 18:07:05 +00:00
|
|
|
elif line.startswith("[mod = L=2048, N=256"):
|
|
|
|
|
reading_key_data = True
|
2014-03-12 14:07:21 +00:00
|
|
|
continue
|
2014-03-12 18:07:05 +00:00
|
|
|
elif line.startswith("[mod = L=3072"):
|
2014-03-12 14:07:21 +00:00
|
|
|
continue
|
|
|
|
|
|
2014-03-12 18:07:05 +00:00
|
|
|
if not reading_key_data:
|
2014-03-12 14:07:21 +00:00
|
|
|
continue
|
|
|
|
|
|
2014-03-12 18:07:05 +00:00
|
|
|
elif reading_key_data:
|
2014-03-12 14:07:21 +00:00
|
|
|
if line.startswith("P"):
|
|
|
|
|
vectors.append({'p': int(line.split("=")[1], 16)})
|
2014-03-12 16:27:59 +00:00
|
|
|
elif line.startswith("Q"):
|
2014-03-12 14:07:21 +00:00
|
|
|
vectors[-1]['q'] = int(line.split("=")[1], 16)
|
2014-03-12 16:27:59 +00:00
|
|
|
elif line.startswith("G"):
|
2014-03-12 14:07:21 +00:00
|
|
|
vectors[-1]['g'] = int(line.split("=")[1], 16)
|
2014-03-12 16:27:59 +00:00
|
|
|
elif line.startswith("X") and 'x' not in vectors[-1]:
|
2014-03-12 14:07:21 +00:00
|
|
|
vectors[-1]['x'] = int(line.split("=")[1], 16)
|
2014-03-12 16:27:59 +00:00
|
|
|
elif line.startswith("X") and 'x' in vectors[-1]:
|
2014-03-12 14:07:21 +00:00
|
|
|
vectors.append({'p': vectors[-1]['p'],
|
|
|
|
|
'q': vectors[-1]['q'],
|
|
|
|
|
'g': vectors[-1]['g'],
|
|
|
|
|
'x': int(line.split("=")[1], 16)
|
|
|
|
|
})
|
2014-03-12 16:27:59 +00:00
|
|
|
elif line.startswith("Y"):
|
2014-03-12 14:07:21 +00:00
|
|
|
vectors[-1]['y'] = int(line.split("=")[1], 16)
|
|
|
|
|
|
|
|
|
|
return vectors
|
2014-04-12 11:48:59 +00:00
|
|
|
|
|
|
|
|
|
2014-04-22 12:24:44 +00:00
|
|
|
def load_fips_dsa_sig_vectors(vector_data):
|
2014-04-21 08:31:15 +00:00
|
|
|
"""
|
|
|
|
|
Loads data out of the FIPS DSA SigVer vector files.
|
|
|
|
|
"""
|
|
|
|
|
vectors = []
|
|
|
|
|
sha_regex = re.compile(
|
|
|
|
|
r"\[mod = L=...., N=..., SHA-(?P<sha>1|224|256|384|512)\]"
|
|
|
|
|
)
|
|
|
|
|
# When reading_key_data is set to True it tells the loader to continue
|
|
|
|
|
# constructing dictionaries. We set reading_key_data to False during the
|
|
|
|
|
# blocks of the vectors of N=224 because we don't support it.
|
|
|
|
|
reading_key_data = True
|
2014-04-22 12:24:44 +00:00
|
|
|
|
2014-04-21 08:31:15 +00:00
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
sha_match = sha_regex.match(line)
|
|
|
|
|
if sha_match:
|
|
|
|
|
digest_algorithm = "SHA-{0}".format(sha_match.group("sha"))
|
|
|
|
|
|
2014-04-22 13:37:58 +00:00
|
|
|
if line.startswith("[mod = L=2048, N=224"):
|
2014-04-21 08:31:15 +00:00
|
|
|
reading_key_data = False
|
|
|
|
|
continue
|
2014-04-22 13:37:58 +00:00
|
|
|
elif line.startswith("[mod = L=2048, N=256"):
|
2014-04-21 08:31:15 +00:00
|
|
|
reading_key_data = True
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if not reading_key_data or line.startswith("[mod"):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
name, value = [c.strip() for c in line.split("=")]
|
|
|
|
|
|
|
|
|
|
if name == "P":
|
|
|
|
|
vectors.append({'p': int(value, 16),
|
|
|
|
|
'digest_algorithm': digest_algorithm})
|
|
|
|
|
elif name == "Q":
|
|
|
|
|
vectors[-1]['q'] = int(value, 16)
|
|
|
|
|
elif name == "G":
|
|
|
|
|
vectors[-1]['g'] = int(value, 16)
|
|
|
|
|
elif name == "Msg" and 'msg' not in vectors[-1]:
|
|
|
|
|
hexmsg = value.strip().encode("ascii")
|
|
|
|
|
vectors[-1]['msg'] = binascii.unhexlify(hexmsg)
|
|
|
|
|
elif name == "Msg" and 'msg' in vectors[-1]:
|
|
|
|
|
hexmsg = value.strip().encode("ascii")
|
|
|
|
|
vectors.append({'p': vectors[-1]['p'],
|
|
|
|
|
'q': vectors[-1]['q'],
|
|
|
|
|
'g': vectors[-1]['g'],
|
|
|
|
|
'digest_algorithm':
|
|
|
|
|
vectors[-1]['digest_algorithm'],
|
|
|
|
|
'msg': binascii.unhexlify(hexmsg)})
|
|
|
|
|
elif name == "X":
|
|
|
|
|
vectors[-1]['x'] = int(value, 16)
|
|
|
|
|
elif name == "Y":
|
|
|
|
|
vectors[-1]['y'] = int(value, 16)
|
|
|
|
|
elif name == "R":
|
|
|
|
|
vectors[-1]['r'] = int(value, 16)
|
|
|
|
|
elif name == "S":
|
|
|
|
|
vectors[-1]['s'] = int(value, 16)
|
|
|
|
|
elif name == "Result":
|
|
|
|
|
vectors[-1]['result'] = value.split("(")[0].strip()
|
|
|
|
|
|
|
|
|
|
return vectors
|
|
|
|
|
|
|
|
|
|
|
2014-04-19 08:44:26 +00:00
|
|
|
# http://tools.ietf.org/html/rfc4492#appendix-A
|
2014-04-13 12:58:02 +00:00
|
|
|
_ECDSA_CURVE_NAMES = {
|
|
|
|
|
"P-192": "secp192r1",
|
|
|
|
|
"P-224": "secp224r1",
|
2014-04-18 21:44:02 +00:00
|
|
|
"P-256": "secp256r1",
|
2014-04-13 12:58:02 +00:00
|
|
|
"P-384": "secp384r1",
|
|
|
|
|
"P-521": "secp521r1",
|
2014-04-18 21:44:02 +00:00
|
|
|
|
2014-04-13 12:58:02 +00:00
|
|
|
"K-163": "sect163k1",
|
|
|
|
|
"K-233": "sect233k1",
|
2014-04-18 21:44:02 +00:00
|
|
|
"K-283": "sect283k1",
|
2014-04-13 12:58:02 +00:00
|
|
|
"K-409": "sect409k1",
|
|
|
|
|
"K-571": "sect571k1",
|
2014-04-18 21:44:02 +00:00
|
|
|
|
2014-04-19 08:44:26 +00:00
|
|
|
"B-163": "sect163r2",
|
2014-04-13 12:58:02 +00:00
|
|
|
"B-233": "sect233r1",
|
|
|
|
|
"B-283": "sect283r1",
|
|
|
|
|
"B-409": "sect409r1",
|
|
|
|
|
"B-571": "sect571r1",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-04-12 11:48:59 +00:00
|
|
|
def load_fips_ecdsa_key_pair_vectors(vector_data):
|
|
|
|
|
"""
|
|
|
|
|
Loads data out of the FIPS ECDSA KeyPair vector files.
|
|
|
|
|
"""
|
|
|
|
|
vectors = []
|
|
|
|
|
key_data = None
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
|
2014-04-13 12:58:02 +00:00
|
|
|
if line[1:-1] in _ECDSA_CURVE_NAMES:
|
|
|
|
|
curve_name = _ECDSA_CURVE_NAMES[line[1:-1]]
|
2014-04-12 11:48:59 +00:00
|
|
|
|
|
|
|
|
elif line.startswith("d = "):
|
|
|
|
|
if key_data is not None:
|
|
|
|
|
vectors.append(key_data)
|
|
|
|
|
|
|
|
|
|
key_data = {
|
|
|
|
|
"curve": curve_name,
|
|
|
|
|
"d": int(line.split("=")[1], 16)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
elif key_data is not None:
|
|
|
|
|
if line.startswith("Qx = "):
|
|
|
|
|
key_data["x"] = int(line.split("=")[1], 16)
|
|
|
|
|
elif line.startswith("Qy = "):
|
|
|
|
|
key_data["y"] = int(line.split("=")[1], 16)
|
|
|
|
|
|
|
|
|
|
if key_data is not None:
|
|
|
|
|
vectors.append(key_data)
|
|
|
|
|
|
|
|
|
|
return vectors
|
2014-04-13 12:58:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_fips_ecdsa_signing_vectors(vector_data):
|
|
|
|
|
"""
|
|
|
|
|
Loads data out of the FIPS ECDSA SigGen vector files.
|
|
|
|
|
"""
|
|
|
|
|
vectors = []
|
|
|
|
|
|
|
|
|
|
curve_rx = re.compile(
|
|
|
|
|
r"\[(?P<curve>[PKB]-[0-9]{3}),SHA-(?P<sha>1|224|256|384|512)\]"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
data = None
|
|
|
|
|
for line in vector_data:
|
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
|
|
if not line or line.startswith("#"):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
curve_match = curve_rx.match(line)
|
|
|
|
|
if curve_match:
|
|
|
|
|
curve_name = _ECDSA_CURVE_NAMES[curve_match.group("curve")]
|
|
|
|
|
digest_name = "SHA-{0}".format(curve_match.group("sha"))
|
|
|
|
|
|
|
|
|
|
elif line.startswith("Msg = "):
|
|
|
|
|
if data is not None:
|
|
|
|
|
vectors.append(data)
|
|
|
|
|
|
|
|
|
|
hexmsg = line.split("=")[1].strip().encode("ascii")
|
|
|
|
|
|
|
|
|
|
data = {
|
|
|
|
|
"curve": curve_name,
|
|
|
|
|
"digest_algorithm": digest_name,
|
|
|
|
|
"message": binascii.unhexlify(hexmsg)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
elif data is not None:
|
|
|
|
|
if line.startswith("Qx = "):
|
|
|
|
|
data["x"] = int(line.split("=")[1], 16)
|
|
|
|
|
elif line.startswith("Qy = "):
|
|
|
|
|
data["y"] = int(line.split("=")[1], 16)
|
|
|
|
|
elif line.startswith("R = "):
|
|
|
|
|
data["r"] = int(line.split("=")[1], 16)
|
|
|
|
|
elif line.startswith("S = "):
|
|
|
|
|
data["s"] = int(line.split("=")[1], 16)
|
|
|
|
|
elif line.startswith("d = "):
|
|
|
|
|
data["d"] = int(line.split("=")[1], 16)
|
2014-04-19 08:01:25 +00:00
|
|
|
elif line.startswith("Result = "):
|
|
|
|
|
data["fail"] = line.split("=")[1].strip()[0] == "F"
|
2014-04-13 12:58:02 +00:00
|
|
|
|
|
|
|
|
if data is not None:
|
|
|
|
|
vectors.append(data)
|
|
|
|
|
return vectors
|