aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRasmus Dahlberg <rasmus.dahlberg@kau.se>2020-11-17 15:34:33 +0100
committerRasmus Dahlberg <rasmus.dahlberg@kau.se>2020-11-17 15:34:33 +0100
commitc11006fb89bcac7fdfc2278990b2b1a3a3553ba9 (patch)
tree7dc1547912ba1a018b56e8e1dc4e3fe27d4cbff9
parentf02d9ad52b4b70fc1af8224201cf993faa82eaee (diff)
added type tests
As a result these changes were made: - Simplified a few New* functions - Allowed empty inclusion and consistency proofs, which is in contrast to not being able to marshal them due to `tls:"minlen:1"`. For example, an inclusion proof will be empty for a Merkle tree of size 1. - Disallowed empty signatures and chains in a leaf's Appendix - Removed unnecessary examples
-rw-r--r--handler.go6
-rw-r--r--trillian_test.go11
-rw-r--r--type.go27
-rw-r--r--type_test.go342
4 files changed, 342 insertions, 44 deletions
diff --git a/handler.go b/handler.go
index d77379d..839a310 100644
--- a/handler.go
+++ b/handler.go
@@ -138,7 +138,7 @@ func getProofByHash(ctx context.Context, i *Instance, w http.ResponseWriter, r *
return status, fmt.Errorf("bad GetInclusionProofByHashResponse: %v", errInner)
}
- rsp, err := NewInclusionProofV1(i.LogParameters.LogId, uint64(req.TreeSize), trsp.Proof[0]).MarshalB64()
+ rsp, err := NewInclusionProofV1(i.LogParameters.LogId, uint64(req.TreeSize), uint64(trsp.Proof[0].LeafIndex), trsp.Proof[0].Hashes).MarshalB64()
if err != nil {
return http.StatusInternalServerError, err
}
@@ -165,7 +165,7 @@ func getConsistencyProof(ctx context.Context, i *Instance, w http.ResponseWriter
return status, fmt.Errorf("bad GetConsistencyProofResponse: %v", errInner)
}
- rsp, err := NewConsistencyProofV1(i.LogParameters.LogId, req.First, req.Second, trsp.Proof).MarshalB64()
+ rsp, err := NewConsistencyProofV1(i.LogParameters.LogId, uint64(req.First), uint64(req.Second), trsp.Proof.Hashes).MarshalB64()
if err != nil {
return http.StatusInternalServerError, err
}
@@ -186,7 +186,7 @@ func getSth(ctx context.Context, i *Instance, w http.ResponseWriter, _ *http.Req
return status, fmt.Errorf("bad GetLatestSignedLogRootResponse: %v", errInner)
}
- sth, err := i.LogParameters.genV1Sth(NewTreeHeadV1(i.LogParameters, &lr))
+ sth, err := i.LogParameters.genV1Sth(NewTreeHeadV1(&lr))
if err != nil {
return http.StatusInternalServerError, fmt.Errorf("failed creating signed tree head: %v", err)
}
diff --git a/trillian_test.go b/trillian_test.go
index 7b26bb9..174fa13 100644
--- a/trillian_test.go
+++ b/trillian_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/google/trillian"
+ "github.com/google/trillian/types"
"github.com/system-transparency/stfe/server/testdata"
"google.golang.org/grpc/codes"
@@ -112,3 +113,13 @@ func makeTrillianGetLeavesByRangeResponse(t *testing.T, start, end int64, name,
SignedLogRoot: testdata.NewGetLatestSignedLogRootResponse(t, 0, uint64(end)+1, make([]byte, 32)).SignedLogRoot,
}
}
+
+func makeTrillianLogRoot(t *testing.T, timestamp, size uint64, hash []byte) *types.LogRootV1 {
+ return &types.LogRootV1{
+ TreeSize: size,
+ RootHash: hash,
+ TimestampNanos: timestamp,
+ Revision: 0, // not used by stfe
+ Metadata: nil, // not used by stfe
+ }
+}
diff --git a/type.go b/type.go
index 7105eff..564e185 100644
--- a/type.go
+++ b/type.go
@@ -8,7 +8,6 @@ import (
"encoding/base64"
"github.com/google/certificate-transparency-go/tls"
- "github.com/google/trillian"
"github.com/google/trillian/types"
)
@@ -54,7 +53,7 @@ type ConsistencyProofV1 struct {
LogId []byte `tls:"minlen:32,maxlen:32"`
TreeSize1 uint64
TreeSize2 uint64
- ConsistencyPath []NodeHash `tls:"minlen:1,maxlen:65535"`
+ ConsistencyPath []NodeHash `tls:"minlen:0,maxlen:65535"`
}
// InclusionProofV1 is an inclusion proof as defined by RFC 6962/bis, ยง4.12
@@ -62,7 +61,7 @@ type InclusionProofV1 struct {
LogId []byte `tls:"minlen:32,maxlen:32"`
TreeSize uint64
LeafIndex uint64
- InclusionPath []NodeHash `tls:"minlen:1,maxlen:65535"`
+ InclusionPath []NodeHash `tls:"minlen:0,maxlen:65535"`
}
// ChecksumV1 associates a leaf type as defined by markdown/api.md
@@ -91,9 +90,9 @@ type RawCertificate struct {
// Appendix is extra leaf data that is not stored in the log's Merkle tree
type Appendix struct {
- Signature []byte `tls:"minlen:0,maxlen:16383"`
+ Signature []byte `tls:"minlen:1,maxlen:16383"`
SignatureScheme uint16
- Chain []RawCertificate `tls:"minlen:0,maxlen:65535"`
+ Chain []RawCertificate `tls:"minlen:1,maxlen:65535"`
}
func (f StFormat) String() string {
@@ -249,21 +248,21 @@ func NewSignedDebugInfoV1(logId, message, signature []byte) *StItem {
}
// NewInclusionProofV1 creates a new StItem of type inclusion_proof_v1
-func NewInclusionProofV1(logID []byte, treeSize uint64, proof *trillian.Proof) *StItem {
- inclusionPath := make([]NodeHash, 0, len(proof.Hashes))
- for _, hash := range proof.Hashes {
- inclusionPath = append(inclusionPath, NodeHash{Data: hash})
+func NewInclusionProofV1(logID []byte, treeSize, index uint64, proof [][]byte) *StItem {
+ path := make([]NodeHash, 0, len(proof))
+ for _, hash := range proof {
+ path = append(path, NodeHash{Data: hash})
}
return &StItem{
Format: StFormatInclusionProofV1,
- InclusionProofV1: &InclusionProofV1{logID, treeSize, uint64(proof.LeafIndex), inclusionPath},
+ InclusionProofV1: &InclusionProofV1{logID, treeSize, index, path},
}
}
// NewConsistencyProofV1 creates a new StItem of type consistency_proof_v1
-func NewConsistencyProofV1(logId []byte, first, second int64, proof *trillian.Proof) *StItem {
- path := make([]NodeHash, 0, len(proof.Hashes))
- for _, hash := range proof.Hashes {
+func NewConsistencyProofV1(logId []byte, first, second uint64, proof [][]byte) *StItem {
+ path := make([]NodeHash, 0, len(proof))
+ for _, hash := range proof {
path = append(path, NodeHash{Data: hash})
}
return &StItem{
@@ -282,7 +281,7 @@ func NewChecksumV1(identifier []byte, checksum []byte) *StItem {
// NewTreeHead creates a new TreeHeadV1 from a Trillian-signed log root without
// verifying any signature. In other words, Trillian <-> STFE must be trusted.
-func NewTreeHeadV1(lp *LogParameters, lr *types.LogRootV1) *TreeHeadV1 {
+func NewTreeHeadV1(lr *types.LogRootV1) *TreeHeadV1 {
return &TreeHeadV1{
uint64(lr.TimestampNanos / 1000 / 1000),
uint64(lr.TreeSize),
diff --git a/type_test.go b/type_test.go
index c6fa687..1389467 100644
--- a/type_test.go
+++ b/type_test.go
@@ -1,41 +1,329 @@
package stfe
import (
- "fmt"
+ "testing"
- "crypto/sha256"
-)
+ "crypto/tls"
-func ExampleNewChecksumV1() {
- name := []byte("foobar-1.2.3")
- hasher := sha256.New()
- hasher.Write([]byte(name))
- checksum := hasher.Sum(nil) // hash of package name
+ "github.com/system-transparency/stfe/server/testdata"
+ "github.com/system-transparency/stfe/x509util"
+)
- item := NewChecksumV1(name, checksum)
- fmt.Printf("%s\n", item)
- // Output: Format(checksum_v1): Package(foobar-1.2.3) Checksum(UOeWe84malBvj2FLtQlr66WA0gUEa5GPR9I7LsYm114=)
-}
+var (
+ testLogId = make([]byte, 32)
+ testSignature = make([]byte, 32)
+ testNodeHash = make([]byte, 32)
+ testMessage = []byte("test message")
+ testPackage = []byte("foobar")
+ testChecksum = make([]byte, 32)
+ testTreeSize = uint64(128)
+ testTreeSizeLarger = uint64(256)
+ testTimestamp = uint64(0)
+ testProof = [][]byte{
+ make([]byte, 32),
+ make([]byte, 32),
+ }
+ testIndex = uint64(0)
+ testSignatureScheme = tls.Ed25519
+)
-func ExampleMarshalChecksumV1() {
- item := NewChecksumV1([]byte("foobar-1.2.3"), make([]byte, 32))
- b, err := item.Marshal()
+// TestEncDecAppendix tests that valid appendices can be (un)marshaled, and that
+// invalid ones in fact dail.
+//
+// TODO: max limits for certificate chains are not tested.
+func TestEncDecAppendix(t *testing.T) {
+ chain, err := x509util.NewCertificateList(testdata.PemChain)
if err != nil {
- fmt.Printf("%v", err)
- return
+ t.Fatalf("must decode certificate chain: %v", err)
+ }
+
+ signatureMin := 1
+ signatureMax := 16383
+ for _, table := range []struct {
+ description string
+ appendix *Appendix
+ wantErr bool
+ }{
+ {
+ description: "too short signature",
+ appendix: NewAppendix(chain, make([]byte, signatureMin-1), uint16(testSignatureScheme)),
+ wantErr: true,
+ },
+ {
+ description: "too large signature",
+ appendix: NewAppendix(chain, make([]byte, signatureMax+1), uint16(testSignatureScheme)),
+ wantErr: true,
+ },
+ {
+ description: "ok signature: min",
+ appendix: NewAppendix(chain, make([]byte, signatureMin), uint16(testSignatureScheme)),
+ },
+ {
+ description: "ok signature: max",
+ appendix: NewAppendix(chain, make([]byte, signatureMax), uint16(testSignatureScheme)),
+ },
+ {
+ description: "too short chain",
+ appendix: NewAppendix(nil, testSignature, uint16(testSignatureScheme)),
+ wantErr: true,
+ },
+ } {
+ b, err := table.appendix.Marshal()
+ if err != nil && !table.wantErr {
+ t.Errorf("failed marshaling Appendix for %q: %v", table.description, err)
+ } else if err == nil && table.wantErr {
+ t.Errorf("succeeded marshaling Appendix but wanted error for %q", table.description)
+ }
+ if err != nil || table.wantErr {
+ continue // nothing to unmarshal
+ }
+
+ var appendix Appendix
+ if err := appendix.Unmarshal(b); err != nil {
+ t.Errorf("failed unmarshaling Appendix: %v", err)
+ }
}
- fmt.Printf("%v\n", b)
- // Output: [0 5 12 102 111 111 98 97 114 45 49 46 50 46 51 32 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
}
-func ExampleUnmarshalChecksumV1() {
- b := []byte{0, 5, 12, 102, 111, 111, 98, 97, 114, 45, 49, 46, 50, 46, 51, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+// TestEncDecStItem tests that valid StItems can be (un)marshaled, and that
+// invalid ones in fact fail.
+//
+// TODO: max limits for inclusion and consistency proofs are not tested.
+// Note: TreeHeadV1 extensions are not tested (not used by stfe)
+func TestEncDecStItem(t *testing.T) {
+ logIdSize := 32
+ signatureMin := 1
+ signatureMax := 65535
+ messageMax := 65535
+ nodeHashMin := 32
+ nodeHashMax := 255
+ packageMin := 1
+ packageMax := 255
+ checksumMin := 1
+ checksumMax := 64
+ for _, table := range []struct {
+ description string
+ item *StItem
+ wantErr bool
+ }{
+ // signed_tree_head_v1
+ {
+ description: "too short log id",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), make([]byte, logIdSize-1), testSignature),
+ wantErr: true,
+ },
+ {
+ description: "too large log id",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), make([]byte, logIdSize+1), testSignature),
+ wantErr: true,
+ },
+ {
+ description: "ok log id: min and max",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), testLogId, testSignature),
+ },
+ {
+ description: "too short signature",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), testLogId, make([]byte, signatureMin-1)),
+ wantErr: true,
+ },
+ {
+ description: "too large signature",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), testLogId, make([]byte, signatureMax+1)),
+ wantErr: true,
+ },
+ {
+ description: "ok signature: min",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), testLogId, make([]byte, signatureMin)),
+ },
+ {
+ description: "ok signature: max",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, testNodeHash)), testLogId, make([]byte, signatureMax)),
+ },
+ {
+ description: "too short root hash",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, make([]byte, nodeHashMin-1))), testLogId, testSignature),
+ wantErr: true,
+ },
+ {
+ description: "too large root hash",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, make([]byte, nodeHashMax+1))), testLogId, testSignature),
+ wantErr: true,
+ },
+ {
+ description: "ok root hash: min",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, make([]byte, nodeHashMin))), testLogId, testSignature),
+ },
+ {
+ description: "ok root hash: min",
+ item: NewSignedTreeHeadV1(NewTreeHeadV1(makeTrillianLogRoot(t, testTimestamp, testTreeSize, make([]byte, nodeHashMax))), testLogId, testSignature),
+ },
+ // signed_debug_info_v1
+ {
+ description: "too short log id",
+ item: NewSignedDebugInfoV1(make([]byte, logIdSize-1), testMessage, testSignature),
+ wantErr: true,
+ },
+ {
+ description: "too large log id",
+ item: NewSignedDebugInfoV1(make([]byte, logIdSize+1), testMessage, testSignature),
+ wantErr: true,
+ },
+ {
+ description: "ok log id: min and max",
+ item: NewSignedDebugInfoV1(testLogId, testMessage, testSignature),
+ },
+ {
+ description: "too large message",
+ item: NewSignedDebugInfoV1(testLogId, make([]byte, messageMax+1), testSignature),
+ wantErr: true,
+ },
+ {
+ description: "ok message: max",
+ item: NewSignedDebugInfoV1(testLogId, make([]byte, messageMax), testSignature),
+ },
+ {
+ description: "too short signature",
+ item: NewSignedDebugInfoV1(testLogId, testMessage, make([]byte, signatureMin-1)),
+ wantErr: true,
+ },
+ {
+ description: "too large signature",
+ item: NewSignedDebugInfoV1(testLogId, testMessage, make([]byte, signatureMax+1)),
+ wantErr: true,
+ },
+ {
+ description: "ok signature: min",
+ item: NewSignedDebugInfoV1(testLogId, testMessage, make([]byte, signatureMin)),
+ },
+ {
+ description: "ok signature: max",
+ item: NewSignedDebugInfoV1(testLogId, testMessage, make([]byte, signatureMax)),
+ },
+ // consistency_proof_v1
+ {
+ description: "too short log id",
+ item: NewConsistencyProofV1(make([]byte, logIdSize-1), testTreeSize, testTreeSizeLarger, testProof),
+ wantErr: true,
+ },
+ {
+ description: "too large log id",
+ item: NewConsistencyProofV1(make([]byte, logIdSize+1), testTreeSize, testTreeSizeLarger, testProof),
+ wantErr: true,
+ },
+ {
+ description: "ok log id: min and max",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, testProof),
+ },
+ {
+ description: "too small node hash in proof",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, [][]byte{make([]byte, nodeHashMin-1)}),
+ wantErr: true,
+ },
+ {
+ description: "too large node hash in proof",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, [][]byte{make([]byte, nodeHashMax+1)}),
+ wantErr: true,
+ },
+ {
+ description: "ok proof: min node hash",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, [][]byte{make([]byte, nodeHashMin)}),
+ },
+ {
+ description: "ok proof: max node hash",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, [][]byte{make([]byte, nodeHashMin)}),
+ },
+ {
+ description: "ok proof: empty",
+ item: NewConsistencyProofV1(testLogId, testTreeSize, testTreeSizeLarger, [][]byte{}),
+ },
+ // inclusion_proof_v1
+ {
+ description: "too short log id",
+ item: NewInclusionProofV1(make([]byte, logIdSize-1), testTreeSize, testIndex, testProof),
+ wantErr: true,
+ },
+ {
+ description: "too large log id",
+ item: NewInclusionProofV1(make([]byte, logIdSize+1), testTreeSize, testIndex, testProof),
+ wantErr: true,
+ },
+ {
+ description: "ok log id: min and max",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, testProof),
+ },
+ {
+ description: "too short node hash in proof",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, [][]byte{make([]byte, nodeHashMin-1)}),
+ wantErr: true,
+ },
+ {
+ description: "too large node hash in proof",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, [][]byte{make([]byte, nodeHashMax+1)}),
+ wantErr: true,
+ },
+ {
+ description: "ok proof: min node hash",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, [][]byte{make([]byte, nodeHashMin)}),
+ },
+ {
+ description: "ok proof: max node hash",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, [][]byte{make([]byte, nodeHashMax)}),
+ },
+ {
+ description: "ok proof: empty",
+ item: NewInclusionProofV1(testLogId, testTreeSize, testIndex, [][]byte{}),
+ },
+ // checksum_v1
+ {
+ description: "too short package",
+ item: NewChecksumV1(make([]byte, packageMin-1), testChecksum),
+ wantErr: true,
+ },
+ {
+ description: "too large package",
+ item: NewChecksumV1(make([]byte, packageMax+1), testChecksum),
+ wantErr: true,
+ },
+ {
+ description: "ok package: min",
+ item: NewChecksumV1(make([]byte, packageMin), testChecksum),
+ },
+ {
+ description: "ok package: max",
+ item: NewChecksumV1(make([]byte, packageMax), testChecksum),
+ },
+ {
+ description: "too short checksum",
+ item: NewChecksumV1(testPackage, make([]byte, checksumMin-1)),
+ wantErr: true,
+ },
+ {
+ description: "too large checksum",
+ item: NewChecksumV1(testPackage, make([]byte, checksumMax+1)),
+ wantErr: true,
+ },
+ {
+ description: "ok checksum: min",
+ item: NewChecksumV1(testPackage, make([]byte, checksumMin)),
+ },
+ {
+ description: "ok checksum: max",
+ item: NewChecksumV1(testPackage, make([]byte, checksumMax)),
+ },
+ } {
+ b, err := table.item.Marshal()
+ if err != nil && !table.wantErr {
+ t.Errorf("failed marshaling StItem(%s) in test %q: %v", table.item.Format, table.description, err)
+ } else if err == nil && table.wantErr {
+ t.Errorf("succeeded marshaling StItem(%s) in test %q but want failure", table.item.Format, table.description)
+ }
+ if err != nil || table.wantErr {
+ continue // nothing to unmarshal
+ }
- var item StItem
- if err := item.Unmarshal(b); err != nil {
- fmt.Printf("%v", err)
- return
+ var item StItem
+ if err := item.Unmarshal(b); err != nil {
+ t.Errorf("failed unmarshaling StItem(%s) in test %q: %v", table.item.Format, table.description, err)
+ }
}
- fmt.Printf("%v\n", item)
- // Output: Format(checksum_v1): Package(foobar-1.2.3) Checksum(AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=)
}