aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Nordberg <linus@nordberg.se>2021-05-25 11:29:29 +0200
committerLinus Nordberg <linus@nordberg.se>2021-05-25 11:29:29 +0200
commitf9aae584c787950e84cf3b098290a0c73330d8ac (patch)
treed3a018493954529794de3c21c9f4f6b0c846a925
parent533f683ef1ae999c2fdc0086cbc3de4e675d1e33 (diff)
parent6a20aec8e8a93ce11f8b940659f49c889f94aef1 (diff)
Merge branch 'design' of github.com:system-transparency/stfe into design
-rw-r--r--doc/claimant.md71
-rw-r--r--types/cmd/new-namespace/main.go56
-rw-r--r--types/http.go188
-rw-r--r--types/http_test.go331
-rw-r--r--types/namespace.go91
-rw-r--r--types/namespace_pool.go69
-rw-r--r--types/namespace_pool_test.go91
-rw-r--r--types/namespace_test.go200
-rw-r--r--types/serialize.go50
-rw-r--r--types/serialize_test.go736
-rw-r--r--types/stitem.go192
-rw-r--r--types/stitem_test.go64
-rw-r--r--types/trunnel.go57
-rw-r--r--types/trunnel_test.go114
-rw-r--r--types/types.go73
15 files changed, 834 insertions, 1549 deletions
diff --git a/doc/claimant.md b/doc/claimant.md
new file mode 100644
index 0000000..6728fef
--- /dev/null
+++ b/doc/claimant.md
@@ -0,0 +1,71 @@
+# Claimant model
+## **System<sup>CHECKSUM</sup>**
+System<sup>CHECKSUM</sup> is about the claims made by a data publisher.
+* **Claim<sup>CHECKSUM</sup>**:
+ _I, data publisher, claim that the data_:
+ 1. has cryptographic hash X
+ 2. is produced by no-one but myself
+* **Statement<sup>CHECKSUM</sup>**: signed checksum<br>
+* **Claimant<sup>CHECKSUM</sup>**: data publisher<br>
+ The data publisher is a party that wants to publish some data.
+* **Believer<sup>CHECKSUM</sup>**: end-user<br>
+ The end-user is a party that wants to use some published data.
+* **Verifier<sup>CHECKSUM</sup>**: data publisher<br>
+ Only the data publisher can verify the above claims.
+* **Arbiter<sup>CHECKSUM</sup>**:<br>
+ There's no official body. Invalidated claims would affect reputation.
+
+System<sup>CHECKSUM\*</sup> can be defined to make more specific claims. Below
+is a reproducible builds example.
+
+### **System<sup>CHECKSUM-RB</sup>**:
+System<sup>CHECKSUM-RB</sup> is about the claims made by a _software publisher_
+that makes reproducible builds available.
+* **Claim<sup>CHECKSUM-RB</sup>**:
+ _I, software publisher, claim that the data_:
+ 1. has cryptographic hash X
+ 2. is the output of a reproducible build for which the source can be located
+ using X as an identifier
+* **Statement<sup>CHECKSUM-RB</sup>**: Statement<sup>CHECKSUM</sup>
+* **Claimant<sup>CHECKSUM-RB</sup>**: software publisher<br>
+ The software publisher is a party that wants to publish the output of a
+ reproducible build.
+* **Believer<sup>CHECKSUM-RB</sup>**: end-user<br>
+ The end-user is a party that wants to run an executable binary that built
+ reproducibly.
+* **Verifier<sup>CHECKSUM-RB</sup>**: any interested party<br>
+ These parties try to verify the above claims. For example:
+ * the software publisher itself (_"has my identity been compromised?"_)
+ * rebuilders that check for locatability and reproducibility
+* **Arbiter<sup>CHECKSUM-RB</sup>**:<br>
+ There's no official body. Invalidated claims would affect reputation.
+
+## **System<sup>CHECKSUM-LOG</sup>**:
+System<sup>CHECKSUM-LOG</sup> is about the claims made by a _log operator_.
+It adds _discoverability_ into System<sup>CHECKSUM\*</sup>. Discoverability
+means that Verifier<sup>CHECKSUM\*</sup> can see all
+Statement<sup>CHECKSUM</sup> that Believer<sup>CHECKSUM\*</sup> accept.
+
+* **Claim<sup>CHECKSUM-LOG</sup>**:
+ _I, log operator, make available:_
+ 1. a globally consistent append-only log of Statement<sup>CHECKSUM</sup>
+* **Statement<sup>CHECKSUM-LOG</sup>**: signed tree head
+* **Claimant<sup>CHECKSUM-LOG</sup>**: log operator<br>
+ Possible operators might be:
+ * a small subset of data publishers
+ * members of relevant consortia
+* **Believer<sup>CHECKSUM-LOG</sup>**:
+ * Believer<sup>CHECKSUM\*</sup>
+ * Verifier<sup>CHECKSUM\*</sup><br>
+* **Verifier<sup>CHECKSUM-LOG</sup>**: third parties<br>
+ These parties verify the above claims. Examples include:
+ * members of relevant consortia
+ * non-profits and other reputable organizations
+ * security enthusiasts and researchers
+ * log operators (cross-ecosystem)
+ * monitors (cross-ecosystem)
+ * a small subset of data publishers (cross-ecosystem)
+* **Arbiter<sup>CHECKSUM-LOG</sup>**:<br>
+ There is no official body. The ecosystem at large should stop using an
+ instance of System<sup>CHECKSUM-LOG</sup> if cryptographic proofs of log
+ misbehavior are preseneted by some Verifier<sup>CHECKSUM-LOG</sup>.
diff --git a/types/cmd/new-namespace/main.go b/types/cmd/new-namespace/main.go
deleted file mode 100644
index e338d7c..0000000
--- a/types/cmd/new-namespace/main.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Package main outputs the private and public parts of a new namespace
-package main
-
-import (
- "flag"
- "fmt"
-
- "crypto/ed25519"
- "crypto/rand"
- "encoding/base64"
-
- "github.com/golang/glog"
- "github.com/system-transparency/stfe/types"
-)
-
-var (
- format = flag.String("format", string(types.NamespaceFormatEd25519V1), "namespace format")
-)
-
-func main() {
- flag.Parse()
- defer glog.Flush()
-
- switch *format {
- case string(types.NamespaceFormatEd25519V1):
- glog.Infof("generating new ed25519_v1 namespace")
- sk, vk, namespace, err := genEd25519V1Namespace()
- if err != nil {
- glog.Errorf("genEd25519V1Namespace: %v", err)
- break
- }
- fmt.Printf("sk: %s\n", base64.StdEncoding.EncodeToString(sk))
- fmt.Printf("vk: %s\n", base64.StdEncoding.EncodeToString(vk))
- fmt.Printf("ed25519_v1: %s\n", base64.StdEncoding.EncodeToString(namespace))
- default:
- glog.Errorf("unsupported namespace format: %s", format)
- }
-}
-
-// genEd25519V1Namespace generates an Ed25519 secret key, verification key, and
-// serialized ed25519_v1 namespace.
-func genEd25519V1Namespace() ([]byte, []byte, []byte, error) {
- vk, sk, err := ed25519.GenerateKey(rand.Reader)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("ed25519.GenerateKey: %v", err)
- }
- namespace, err := types.NewNamespaceEd25519V1(vk[:])
- if err != nil {
- return nil, nil, nil, fmt.Errorf("types.NewNamespaceEd25519V1: %v", err)
- }
- serialized, err := types.Marshal(*namespace)
- if err != nil {
- fmt.Errorf("types.Marshal: %v", err)
- }
- return sk, vk, serialized, nil
-}
diff --git a/types/http.go b/types/http.go
new file mode 100644
index 0000000..8bbe26d
--- /dev/null
+++ b/types/http.go
@@ -0,0 +1,188 @@
+package types
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+const (
+ // HeaderPrefix is the start of every ST log HTTP header key
+ HeaderPrefix = "stlog-"
+
+ // New leaf
+ HeaderShardHint = HeaderPrefix + "shard_hint"
+ HeaderChecksum = HeaderPrefix + "checksum"
+ HeaderSignatureOverMessage = HeaderPrefix + "signature_over_message"
+ HeaderVerificationKey = HeaderPrefix + "verification_key"
+ HeaderDomainHint = HeaderPrefix + "domain_hint"
+
+ // Inclusion proof
+ HeaderLeafHash = HeaderPrefix + "leaf_hash"
+ HeaderLeafIndex = HeaderPrefix + "leaf_index"
+ HeaderInclusionPath = HeaderPrefix + "inclusion_path"
+
+ // Consistency proof
+ HeaderNewSize = HeaderPrefix + "new_size"
+ HeaderOldSize = HeaderPrefix + "old_size"
+ HeaderConsistencyPath = HeaderPrefix + "consistency_path"
+
+ // Range of leaves
+ HeaderStartSize = HeaderPrefix + "start_size"
+ HeaderEndSize = HeaderPrefix + "end_size"
+
+ // Tree head
+ HeaderTimestamp = HeaderPrefix + "timestamp"
+ HeaderTreeSize = HeaderPrefix + "tree_size"
+ HeaderRootHash = HeaderPrefix + "root_hash"
+
+ // Signature and signer identity
+ HeaderSignature = HeaderPrefix + "signature"
+ HeaderKeyHash = HeaderPrefix + "key_hash"
+)
+
+// ToHTTP returns a signed tree-head as HTTP key-value pairs
+func (sth *SignedTreeHead) ToHTTP() ([]byte, error) {
+ hdr := http.Header{}
+ hdr.Add(HeaderTimestamp, strconv.FormatUint(sth.Timestamp, 10))
+ hdr.Add(HeaderTreeSize, strconv.FormatUint(sth.TreeSize, 10))
+ hdr.Add(HeaderRootHash, hex.EncodeToString(sth.RootHash[:]))
+ for _, sigident := range sth.SigIdent {
+ hdr.Add(HeaderSignature, hex.EncodeToString(sigident.Signature[:]))
+ hdr.Add(HeaderKeyHash, hex.EncodeToString(sigident.KeyHash[:]))
+ }
+ return headerToBytes(hdr)
+}
+
+// ToHTTP returns a consistency proof as HTTP key-value pairs
+func (p *ConsistencyProof) ToHTTP() ([]byte, error) {
+ hdr := http.Header{}
+ hdr.Add(HeaderNewSize, strconv.FormatUint(p.NewSize, 10))
+ hdr.Add(HeaderOldSize, strconv.FormatUint(p.OldSize, 10))
+ for _, hash := range p.Path {
+ hdr.Add(HeaderConsistencyPath, hex.EncodeToString(hash[:]))
+ }
+ return headerToBytes(hdr)
+}
+
+// ToHTTP returns an inclusion proof as HTTP key-value pairs
+func (p *InclusionProof) ToHTTP() ([]byte, error) {
+ hdr := http.Header{}
+ hdr.Add(HeaderTreeSize, strconv.FormatUint(p.TreeSize, 10))
+ hdr.Add(HeaderLeafIndex, strconv.FormatUint(p.LeafIndex, 10))
+ for _, hash := range p.Path {
+ hdr.Add(HeaderInclusionPath, hex.EncodeToString(hash[:]))
+ }
+ return headerToBytes(hdr)
+}
+
+// ToHTTP returns a leaf as HTTP key-value pairs
+func (l *Leaf) ToHTTP() ([]byte, error) {
+ hdr := http.Header{}
+ hdr.Add(HeaderShardHint, strconv.FormatUint(l.ShardHint, 10))
+ hdr.Add(HeaderChecksum, hex.EncodeToString(l.Checksum[:]))
+ hdr.Add(HeaderSignature, hex.EncodeToString(l.Signature[:]))
+ hdr.Add(HeaderKeyHash, hex.EncodeToString(l.KeyHash[:]))
+ return headerToBytes(hdr)
+}
+
+// SignedTreeHeadFromHTTP parses a signed tree head from HTTP key-value pairs
+func SignedTreeHeadFromHTTP(buf []byte) (*SignedTreeHead, error) {
+ hdr, err := headerFromBuf(buf)
+ if err != nil {
+ return nil, fmt.Errorf("headerFromBuf(): %v", err)
+ }
+
+ // TreeHead
+ var sth SignedTreeHead
+ sth.Timestamp, err = strconv.ParseUint(hdr.Get(HeaderTimestamp), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid timestamp: %v", err)
+ }
+ sth.TreeSize, err = strconv.ParseUint(hdr.Get(HeaderTreeSize), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid tree size: %v", err)
+ }
+ if err := decodeHex(hdr.Get(HeaderRootHash), sth.RootHash[:]); err != nil {
+ return nil, fmt.Errorf("decodeHex(): %v", err)
+ }
+
+ // SigIdent
+ signatures := hdr.Values(HeaderSignature)
+ keyHashes := hdr.Values(HeaderKeyHash)
+ if len(signatures) == 0 {
+ return nil, fmt.Errorf("no signer")
+ }
+ if len(signatures) != len(keyHashes) {
+ return nil, fmt.Errorf("mismatched signature-signer count")
+ }
+ for i := 0; i < len(signatures); i++ {
+ var sigident SigIdent
+ if err := decodeHex(signatures[i], sigident.Signature[:]); err != nil {
+ return nil, fmt.Errorf("decodeHex(): %v", err)
+ }
+ if err := decodeHex(keyHashes[i], sigident.KeyHash[:]); err != nil {
+ return nil, fmt.Errorf("decodeHex(): %v", err)
+ }
+ sth.SigIdent = append(sth.SigIdent, sigident)
+ }
+ return &sth, nil
+}
+
+// ConsistencyProofFromHTTP parses a consistency proof from HTTP key-value pairs
+func ConsistencyProofFromHTTP(buf []byte) (*ConsistencyProof, error) {
+ return nil, nil // TODO
+}
+
+// InclusionProofFromHTTP parses an inclusion proof from HTTP key-value pairs
+func InclusionProofFromHTTP(buf []byte) (*InclusionProof, error) {
+ return nil, nil // TODO
+}
+
+// LeavesFromHTTP parses a list of leaves from HTTP key-value pairs
+func LeavesFromHTTP(buf []byte) ([]*Leaf, error) {
+ return nil, nil // TODO
+}
+
+// headerFromBuf parses ST log HTTP header key-value pairs from a response body
+func headerFromBuf(buf []byte) (http.Header, error) {
+ hdr := http.Header{}
+ lines := strings.Split(string(buf), "\r\n")
+ lines = lines[:len(lines)-1] // skip the final empty line
+ for _, line := range lines {
+ split := strings.Split(line, ":")
+ if len(split) != 2 {
+ return nil, fmt.Errorf("invalid ST log HTTP header: %s", line)
+ }
+ if !strings.HasPrefix(strings.ToLower(split[0]), HeaderPrefix) {
+ return nil, fmt.Errorf("invalid ST log HTTP header prefix: %s", line)
+ }
+ hdr.Add(split[0], strings.TrimSpace(split[1]))
+ }
+ return hdr, nil
+}
+
+// decodeHex decodes a hex-encoded string into a fixed-size output slice
+func decodeHex(str string, out []byte) error {
+ buf, err := hex.DecodeString(str)
+ if err != nil {
+ return fmt.Errorf("hex.DecodeString(): %v", err)
+ }
+ if len(buf) != len(out) {
+ return fmt.Errorf("invalid length: %v", len(buf))
+ }
+ copy(out, buf)
+ return nil
+}
+
+// headerToBytes encodes a header as HTTP key-value pairs
+func headerToBytes(hdr http.Header) ([]byte, error) {
+ buf := bytes.NewBuffer(nil)
+ if err := hdr.Write(buf); err != nil {
+ return nil, fmt.Errorf("hdr.Write(): %v", err) // should not happen
+ }
+ return buf.Bytes(), nil
+}
diff --git a/types/http_test.go b/types/http_test.go
new file mode 100644
index 0000000..527bcdf
--- /dev/null
+++ b/types/http_test.go
@@ -0,0 +1,331 @@
+package types
+
+import (
+ "bytes"
+ "encoding/hex"
+ "net/http"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+var (
+ testZeroBuffer32 = [32]byte{}
+ testZeroBuffer64 = [64]byte{}
+)
+
+func TestSignedTreeHeadToHTTP(t *testing.T) {
+ description := "valid: cosigned tree head with two signatures"
+ sth := &SignedTreeHead{
+ TreeHead: TreeHead{
+ Timestamp: 0,
+ TreeSize: 0,
+ RootHash: testBuffer32,
+ },
+ SigIdent: []SigIdent{
+ SigIdent{
+ Signature: testZeroBuffer64,
+ KeyHash: testZeroBuffer32,
+ },
+ SigIdent{
+ Signature: testBuffer64,
+ KeyHash: testBuffer32,
+ },
+ },
+ }
+ want := map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{
+ hex.EncodeToString(testZeroBuffer64[:]),
+ hex.EncodeToString(testBuffer64[:]),
+ },
+ HeaderKeyHash: []string{
+ hex.EncodeToString(testZeroBuffer32[:]),
+ hex.EncodeToString(testBuffer32[:]),
+ },
+ }
+ buf, err := sth.ToHTTP()
+ if err != nil {
+ t.Fatalf("sth.ToHTTP: %v", err)
+ }
+ hdr, err := headerFromBuf(buf)
+ if err != nil {
+ t.Fatalf("headerFromBuf: %v", err)
+ }
+ compareHeaderWithMap(t, description, hdr, want)
+}
+
+func TestConsistencyProofToHTTP(t *testing.T) { // TODO
+}
+
+func TestInclusionProofToHTTP(t *testing.T) { // TODO
+}
+
+func TestLeafToHTTP(t *testing.T) { // TODO
+}
+
+func TestSignedTreeHeadFromHTTP(t *testing.T) {
+ for _, table := range []struct {
+ description string
+ buf []byte
+ wantErr bool
+ wantSth *SignedTreeHead
+ }{
+ {
+ description: "invalid: not ST log HTTP header",
+ buf: newHeaderBuf(t, map[string][]string{
+ "user-agent": []string{"secret"},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: timestamp",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer64[:])},
+ HeaderKeyHash: []string{hex.EncodeToString(testBuffer32[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: tree size",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer64[:])},
+ HeaderKeyHash: []string{hex.EncodeToString(testBuffer32[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: root hash",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer64[:])},
+ HeaderKeyHash: []string{hex.EncodeToString(testBuffer32[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: signature",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderKeyHash: []string{hex.EncodeToString(testBuffer32[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: key hash",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer64[:])},
+ HeaderKeyHash: []string{hex.EncodeToString(testBuffer64[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: sigident count",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{hex.EncodeToString(testBuffer64[:])},
+ HeaderKeyHash: []string{
+ hex.EncodeToString(testZeroBuffer32[:]),
+ hex.EncodeToString(testBuffer32[:]),
+ },
+ }),
+ wantErr: true,
+ },
+ {
+ description: "invalid: no signer",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ }),
+ wantErr: true,
+ },
+ {
+ description: "valid: cosigned tree head with two signatures",
+ buf: newHeaderBuf(t, map[string][]string{
+ HeaderTimestamp: []string{"0"},
+ HeaderTreeSize: []string{"0"},
+ HeaderRootHash: []string{hex.EncodeToString(testBuffer32[:])},
+ HeaderSignature: []string{
+ hex.EncodeToString(testZeroBuffer64[:]),
+ hex.EncodeToString(testBuffer64[:]),
+ },
+ HeaderKeyHash: []string{
+ hex.EncodeToString(testZeroBuffer32[:]),
+ hex.EncodeToString(testBuffer32[:]),
+ },
+ }),
+ wantSth: &SignedTreeHead{
+ TreeHead: TreeHead{
+ Timestamp: 0,
+ TreeSize: 0,
+ RootHash: testBuffer32,
+ },
+ SigIdent: []SigIdent{
+ SigIdent{
+ Signature: testZeroBuffer64,
+ KeyHash: testZeroBuffer32,
+ },
+ SigIdent{
+ Signature: testBuffer64,
+ KeyHash: testBuffer32,
+ },
+ },
+ },
+ },
+ } {
+ sth, err := SignedTreeHeadFromHTTP(table.buf)
+ if got, want := err != nil, table.wantErr; got != want {
+ t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
+ }
+ if err != nil {
+ continue // nothing more to check on error
+ }
+ if got, want := sth, table.wantSth; !reflect.DeepEqual(got, want) {
+ t.Errorf("got signed tree head\n\t%v\nbut wanted\n\t%v\nin test %q", got, want, table.description)
+ }
+ }
+}
+
+func TestHeaderFromBuf(t *testing.T) {
+ for _, table := range []struct {
+ description string
+ buf []byte
+ wantErr bool
+ wantMap map[string][]string
+ }{
+ {
+ description: "invalid: split",
+ buf: []byte(HeaderPrefix + "k1: v1:v2\r\n"),
+ wantErr: true,
+ },
+ {
+ description: "invalid: prefix",
+ buf: []byte("user-agent: secret\r\n"),
+ wantErr: true,
+ },
+ {
+ description: "valid: one key with funky case",
+ buf: []byte(funkyCase(t, HeaderPrefix) + "k1: v1\r\n"),
+ wantMap: map[string][]string{
+ HeaderPrefix + "k1": []string{"v1"},
+ },
+ },
+ {
+ description: "valid: two keys where one has multiple values",
+ buf: []byte(
+ HeaderPrefix + "k1: v1 \r\n" +
+ HeaderPrefix + "k2: v2\r\n" +
+ HeaderPrefix + "k2: v3\r\n",
+ ),
+ wantMap: map[string][]string{
+ HeaderPrefix + "k1": []string{"v1"},
+ HeaderPrefix + "k2": []string{"v2", "v3"},
+ },
+ },
+ } {
+ hdr, err := headerFromBuf(table.buf)
+ if got, want := err != nil, table.wantErr; got != want {
+ t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
+ }
+ if err != nil {
+ continue // nothing more to check on error
+ }
+ compareHeaderWithMap(t, table.description, hdr, table.wantMap)
+ }
+}
+
+func TestDecodeHex(t *testing.T) {
+ for _, table := range []struct {
+ description string
+ hex string
+ wantErr bool
+ wantBuf [4]byte
+ }{
+ {
+ description: "invalid: too short input",
+ hex: "000102",
+ wantErr: true,
+ },
+ {
+ description: "invalid: too large input",
+ hex: "0001020304",
+ wantErr: true,
+ },
+ {
+ description: "invalid: not hex (1/2)",
+ hex: "000102030",
+ wantErr: true,
+ },
+ {
+ description: "invalid: not hex (2/2)",
+ hex: "0001020q",
+ wantErr: true,
+ },
+ {
+ description: "valid",
+ hex: "00010203",
+ wantBuf: [4]byte{0, 1, 2, 3},
+ },
+ } {
+ var buf [4]byte
+ err := decodeHex(table.hex, buf[:])
+ if got, want := err != nil, table.wantErr; got != want {
+ t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
+ }
+ if err != nil {
+ continue // nothing more to check on error
+ }
+ if got, want := buf[:], table.wantBuf[:]; !bytes.Equal(got, want) {
+ t.Errorf("got buf %v but wanted %v in test %q", got, want, table.description)
+ }
+ }
+}
+
+func newHeaderBuf(t *testing.T, kv map[string][]string) []byte {
+ t.Helper()
+ hdr := http.Header{}
+ for key, values := range kv {
+ for _, value := range values {
+ hdr.Add(key, value)
+ }
+ }
+ buf := bytes.NewBuffer(nil)
+ if err := hdr.Write(buf); err != nil {
+ t.Fatalf("hdr.Write(): %v", err)
+ }
+ return buf.Bytes()
+}
+
+func compareHeaderWithMap(t *testing.T, description string, hdr http.Header, wantMap map[string][]string) {
+ t.Helper()
+ if got, want := len(hdr), len(wantMap); got != want {
+ t.Errorf("got %d keys but wanted %d in test %q", got, want, description)
+ }
+ for key, value := range wantMap {
+ if got, want := hdr.Values(key), value; !reflect.DeepEqual(got, want) {
+ t.Errorf("got value %v but wanted %v for key %v in test %q", got, want, key, description)
+ }
+ }
+}
+
+func funkyCase(t *testing.T, str string) string {
+ t.Helper()
+ splitIndex := len(str) / 2
+ return strings.ToLower(str[:splitIndex]) + strings.ToUpper(str[splitIndex:])
+}
diff --git a/types/namespace.go b/types/namespace.go
deleted file mode 100644
index 376ebcd..0000000
--- a/types/namespace.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package types
-
-import (
- "fmt"
-
- "crypto/ed25519"
-
- "github.com/google/certificate-transparency-go/tls"
-)
-
-// NamespaceFormat defines a particular namespace type that is versioend
-type NamespaceFormat tls.Enum
-
-const (
- NamespaceFormatReserved NamespaceFormat = 0
- NamespaceFormatEd25519V1 NamespaceFormat = 1
-
- NamespaceFingerprintSize = 32
-)
-
-// Namespace references a versioned namespace based on a given format specifier
-type Namespace struct {
- Format NamespaceFormat `tls:"maxval:65535"`
- Ed25519V1 *Ed25519V1 `tls:"selector:Format,val:1"`
-}
-
-// Ed25519V1 uses an Ed25519 verification key as namespace. Encoding,
-// signing, and verification operations are defined by RFC 8032.
-type Ed25519V1 struct {
- Namespace [32]byte
-}
-
-func (f NamespaceFormat) String() string {
- switch f {
- case NamespaceFormatReserved:
- return "reserved"
- case NamespaceFormatEd25519V1:
- return "ed25519_v1"
- default:
- return fmt.Sprintf("unknown NamespaceFormat: %d", f)
- }
-}
-
-func (n Namespace) String() string {
- switch n.Format {
- case NamespaceFormatReserved:
- return fmt.Sprintf("Format(%s)", n.Format)
- case NamespaceFormatEd25519V1:
- return fmt.Sprintf("Format(%s): %+v", n.Format, n.Ed25519V1)
- default:
- return fmt.Sprintf("unknown Namespace: %v", n.Format)
- }
-}
-
-// Fingerprint returns a fixed-size namespace fingerprint that is unique.
-func (n *Namespace) Fingerprint() (*[NamespaceFingerprintSize]byte, error) {
- switch n.Format {
- case NamespaceFormatEd25519V1:
- return &n.Ed25519V1.Namespace, nil
- default:
- return nil, fmt.Errorf("unsupported NamespaceFormat: %v", n.Format)
- }
-}
-
-// Verify checks that signature is valid over message for this namespace
-func (ns *Namespace) Verify(message, signature []byte) error {
- switch ns.Format {
- case NamespaceFormatEd25519V1:
- if !ed25519.Verify(ed25519.PublicKey(ns.Ed25519V1.Namespace[:]), message, signature) {
- return fmt.Errorf("ed25519 signature verification failed")
- }
- default:
- return fmt.Errorf("namespace not supported: %v", ns.Format)
- }
- return nil
-}
-
-// NewNamespaceEd25519V1 returns an new Ed25519V1 namespace based on a
-// verification key.
-func NewNamespaceEd25519V1(vk []byte) (*Namespace, error) {
- if len(vk) != 32 {
- return nil, fmt.Errorf("invalid verification key: must be 32 bytes")
- }
-
- var ed25519v1 Ed25519V1
- copy(ed25519v1.Namespace[:], vk)
- return &Namespace{
- Format: NamespaceFormatEd25519V1,
- Ed25519V1: &ed25519v1,
- }, nil
-}
diff --git a/types/namespace_pool.go b/types/namespace_pool.go
deleted file mode 100644
index 1e9e8f6..0000000
--- a/types/namespace_pool.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package types
-
-import (
- "fmt"
-)
-
-// NamespacePool is a pool of namespaces that contain complete verification keys
-type NamespacePool struct {
- pool map[[NamespaceFingerprintSize]byte]*Namespace
- list []*Namespace
- // If we need to update this structure without a restart => add mutex.
-}
-
-// NewNameSpacePool creates a new namespace pool from a list of namespaces. An
-// error is returned if there are duplicate namespaces or namespaces without a
-// complete verification key. The latter is determined by namespaceWithKey().
-func NewNamespacePool(namespaces []*Namespace) (*NamespacePool, error) {
- np := &NamespacePool{
- pool: make(map[[NamespaceFingerprintSize]byte]*Namespace),
- list: make([]*Namespace, 0),
- }
- for _, namespace := range namespaces {
- if !namespaceWithKey(namespace.Format) {
- return nil, fmt.Errorf("need verification key in namespace pool: %v", namespace.Format)
- }
- fpr, err := namespace.Fingerprint()
- if err != nil {
- return nil, fmt.Errorf("need fingerprint in namespace pool: %v", err)
- }
- if _, ok := np.pool[*fpr]; ok {
- return nil, fmt.Errorf("duplicate namespace: %v", namespace.String())
- }
- np.pool[*fpr] = namespace
- np.list = append(np.list, namespace)
- }
- return np, nil
-}
-
-// Find checks if namespace is a member of the namespace pool.
-func (np *NamespacePool) Find(namespace *Namespace) (*Namespace, bool) {
- fpr, err := namespace.Fingerprint()
- if err != nil {
- return nil, false
- }
- if _, ok := np.pool[*fpr]; !ok {
- return nil, false
- }
- // If the passed namespace is a key fingerprint the actual key needs to be
- // attached before returning. Not applicable for Ed25519. Docdoc later.
- return namespace, true
-}
-
-// List returns a copied list of namespaces that is used by this pool.
-func (np *NamespacePool) List() []*Namespace {
- namespaces := make([]*Namespace, len(np.list))
- copy(namespaces, np.list)
- return namespaces
-}
-
-// namespaceWithKey returns true if a namespace format contains a complete
-// verification key. I.e., some formats might have a key fingerprint instead.
-func namespaceWithKey(format NamespaceFormat) bool {
- switch format {
- case NamespaceFormatEd25519V1:
- return true
- default:
- return false
- }
-}
diff --git a/types/namespace_pool_test.go b/types/namespace_pool_test.go
deleted file mode 100644
index f5810a2..0000000
--- a/types/namespace_pool_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package types
-
-import (
- "bytes"
- "reflect"
- "testing"
-)
-
-func TestNewNamespacePool(t *testing.T) {
- ns1 := mustInitNamespaceEd25519V1(t, 0x00)
- ns2 := mustInitNamespaceEd25519V1(t, 0xff)
- nsr := &Namespace{Format: NamespaceFormatReserved}
- for _, table := range []struct {
- description string
- namespaces []*Namespace
- wantErr bool
- }{
- {
- description: "invalid: duplicate namespace",
- namespaces: []*Namespace{ns1, ns1, ns2},
- wantErr: true,
- },
- {
- description: "invalid: namespace without key",
- namespaces: []*Namespace{ns1, nsr, ns2},
- wantErr: true,
- },
- {
- description: "valid: empty",
- namespaces: []*Namespace{},
- },
- {
- description: "valid: one namespace",
- namespaces: []*Namespace{ns1},
- },
- {
- description: "valid: two namespaces",
- namespaces: []*Namespace{ns1, ns2},
- },
- } {
- _, err := NewNamespacePool(table.namespaces)
- if got, want := err != nil, table.wantErr; got != want {
- t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
- }
- }
-}
-
-func TestFind(t *testing.T) {
- ns1 := mustInitNamespaceEd25519V1(t, 0x00)
- ns2 := mustInitNamespaceEd25519V1(t, 0xff)
-
- // Empty pool
- pool, err := NewNamespacePool(nil)
- if err != nil {
- t.Fatalf("must create new namespace pool: %v", err)
- }
- if _, ok := pool.Find(ns1); ok {
- t.Errorf("found namespace in empty pool")
- }
-
- // Pool with one namespace
- pool, err = NewNamespacePool([]*Namespace{ns1})
- if err != nil {
- t.Fatalf("must create new namespace pool: %v", err)
- }
- if ns, ok := pool.Find(ns1); !ok {
- t.Errorf("could not find namespace that is a member of the pool")
- } else if !reflect.DeepEqual(ns, ns1) {
- t.Errorf("found namespace but it is wrong")
- }
- if _, ok := pool.Find(ns2); ok {
- t.Errorf("found namespace although it is not a member of the pool")
- }
-}
-
-func TestList(t *testing.T) {
- ns1 := mustInitNamespaceEd25519V1(t, 0x00)
- ns2 := mustInitNamespaceEd25519V1(t, 0xff)
- namespaces := []*Namespace{ns1, ns2}
- pool, err := NewNamespacePool(namespaces)
- if err != nil {
- t.Fatalf("must create new namespace pool: %v", err)
- }
- if got, want := len(pool.List()), len(namespaces); got != want {
- t.Errorf("got len %v but wanted %v", got, want)
- }
- pool.List()[0] = ns2
- if got, want := pool.List()[0].Ed25519V1.Namespace[:], ns1.Ed25519V1.Namespace[:]; !bytes.Equal(got, want) {
- t.Errorf("returned list is not a copy")
- }
-}
diff --git a/types/namespace_test.go b/types/namespace_test.go
deleted file mode 100644
index a5847ef..0000000
--- a/types/namespace_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package types
-
-import (
- "bytes"
- "strings"
- "testing"
-
- "crypto/ed25519"
-)
-
-// TestNamespaceString checks that the String() function prints the right
-// format, and that the body is printed without a nil-pointer panic.
-func TestNamespaceString(t *testing.T) {
- wantPrefix := map[NamespaceFormat]string{
- NamespaceFormatReserved: "Format(reserved)",
- NamespaceFormatEd25519V1: "Format(ed25519_v1): &{Namespace",
- NamespaceFormat(1<<16 - 1): "unknown Namespace: unknown NamespaceFormat: 65535",
- }
- tests := append(test_cases_namespace(t), testCaseSerialize{
- description: "valid: unknown Namespace",
- item: Namespace{
- Format: NamespaceFormat(1<<16 - 1),
- },
- })
- for _, table := range tests {
- namespace, ok := table.item.(Namespace)
- if !ok {
- t.Fatalf("must cast to Namespace in test %q", table.description)
- }
-
- prefix, ok := wantPrefix[namespace.Format]
- if !ok {
- t.Fatalf("must have prefix for StFormat %v in test %q", namespace.Format, table.description)
- }
- if got, want := namespace.String(), prefix; !strings.HasPrefix(got, want) {
- t.Errorf("got %q but wanted prefix %q in test %q", got, want, table.description)
- }
- }
-}
-
-func TestFingerprint(t *testing.T) {
- for _, table := range []struct {
- description string
- namespace *Namespace
- wantErr bool
- wantFpr [NamespaceFingerprintSize]byte
- }{
- {
- description: "invalid: no fingerprint for type",
- namespace: &Namespace{
- Format: NamespaceFormatReserved,
- },
- wantErr: true,
- },
- {
- description: "valid: ed25519_v1",
- namespace: mustInitNamespaceEd25519V1(t, 0xaf),
- wantFpr: func() (ret [NamespaceFingerprintSize]byte) {
- for i, _ := range ret {
- ret[i] = 0xaf
- }
- return
- }(),
- },
- } {
- fpr, err := table.namespace.Fingerprint()
- if got, want := err != nil, table.wantErr; got != want {
- t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
- }
- if err != nil {
- continue
- }
- if got, want := *fpr, table.wantFpr; !bytes.Equal(got[:], want[:]) {
- t.Errorf("got fpr %v but wanted %v in test %q", got, want, table.description)
- }
- }
-}
-
-func TestVerify(t *testing.T) {
- var tests []testCaseNamespace
- tests = append(tests, test_cases_verify(t)...)
- tests = append(tests, test_cases_verify_ed25519v1(t)...)
- for _, table := range tests {
- err := table.namespace.Verify(table.msg, table.sig)
- if got, want := err != nil, table.wantErr; got != want {
- t.Errorf("got error=%v but wanted %v in test %q: %v", got, want, table.description, err)
- }
- }
-}
-
-func TestNewNamespaceEd25519V1(t *testing.T) {
- size := 32 // verification key size
- for _, table := range []struct {
- description string
- vk []byte
- wantErr bool
- }{
- {
- description: "invalid",
- vk: make([]byte, size+1),
- wantErr: true,
- },
- {
- description: "valid",
- vk: make([]byte, size),
- },
- } {
- n, err := NewNamespaceEd25519V1(table.vk)
- if got, want := err != nil, table.wantErr; got != want {
- t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
- }
- if err != nil {
- continue
- }
- if got, want := n.Format, NamespaceFormatEd25519V1; got != want {
- t.Errorf("got namespace format %v but wanted %v in test %q", got, want, table.description)
- continue
- }
- if got, want := n.Ed25519V1.Namespace[:], table.vk; !bytes.Equal(got, want) {
- t.Errorf("got namespace %X but wanted %X in test %q", got, want, table.description)
- }
- }
-}
-
-// testCaseNamespace is a common test case used for Namespace.Verify() tests
-type testCaseNamespace struct {
- description string
- namespace *Namespace
- msg, sig []byte
- wantErr bool
-}
-
-// test_cases_verify returns basic namespace.Verify() tests
-func test_cases_verify(t *testing.T) []testCaseNamespace {
- return []testCaseNamespace{
- {
- description: "test_cases_verify: invalid: unsupported namespace",
- namespace: &Namespace{Format: NamespaceFormatReserved},
- msg: []byte("msg"),
- sig: []byte("sig"),
- wantErr: true,
- },
- }
-}
-
-// test_cases_verify_ed25519v1 returns ed25519_v1 Namespace.Verify() tests
-func test_cases_verify_ed25519v1(t *testing.T) []testCaseNamespace {
- testEd25519Sk := [64]byte{230, 122, 195, 152, 194, 195, 147, 153, 80, 120, 153, 79, 102, 27, 52, 187, 136, 218, 150, 234, 107, 9, 167, 4, 92, 21, 11, 113, 42, 29, 129, 69, 75, 60, 249, 150, 229, 93, 75, 32, 103, 126, 244, 37, 53, 182, 68, 82, 249, 109, 49, 94, 10, 19, 146, 244, 58, 191, 169, 107, 78, 37, 45, 210}
- testEd25519Vk := [32]byte{75, 60, 249, 150, 229, 93, 75, 32, 103, 126, 244, 37, 53, 182, 68, 82, 249, 109, 49, 94, 10, 19, 146, 244, 58, 191, 169, 107, 78, 37, 45, 210}
- return []testCaseNamespace{
- {
- description: "test_cases_verify_ed25519v1: invalid: sk signed message, but vk is not for sk",
- namespace: &Namespace{
- Format: NamespaceFormatEd25519V1,
- Ed25519V1: &Ed25519V1{
- Namespace: [32]byte{},
- },
- },
- msg: []byte("message"),
- sig: ed25519.Sign(ed25519.PrivateKey(testEd25519Sk[:]), []byte("message")),
- wantErr: true,
- },
- {
- description: "test_cases_verify_ed25519v1: invalid: vk is for sk, but sk did not sign message",
- namespace: &Namespace{
- Format: NamespaceFormatEd25519V1,
- Ed25519V1: &Ed25519V1{
- Namespace: testEd25519Vk,
- },
- },
- msg: []byte("some message"),
- sig: ed25519.Sign(ed25519.PrivateKey(testEd25519Sk[:]), []byte("another message")),
- wantErr: true,
- },
- {
- description: "test_cases_verify_ed25519v1: valid",
- namespace: &Namespace{
- Format: NamespaceFormatEd25519V1,
- Ed25519V1: &Ed25519V1{
- Namespace: testEd25519Vk,
- },
- },
- msg: []byte("message"),
- sig: ed25519.Sign(ed25519.PrivateKey(testEd25519Sk[:]), []byte("message")),
- },
- }
-}
-
-func mustInitNamespaceEd25519V1(t *testing.T, initByte byte) *Namespace {
- t.Helper()
- buf := make([]byte, 32)
- for i := 0; i < len(buf); i++ {
- buf[i] = initByte
- }
- ns, err := NewNamespaceEd25519V1(buf)
- if err != nil {
- t.Fatalf("must make Ed25519v1 namespace: %v", err)
- }
- return ns
-}
diff --git a/types/serialize.go b/types/serialize.go
deleted file mode 100644
index fd93336..0000000
--- a/types/serialize.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package types
-
-import (
- "fmt"
-
- "github.com/google/certificate-transparency-go/tls"
-)
-
-const (
- HashSizeV1 = 32
-)
-
-// GetProofByHashV1 is a serializable get-proof-by-hash request
-type GetProofByHashV1 struct {
- Hash [HashSizeV1]byte
- TreeSize uint64
-}
-
-// GetConsistencyProofV1 is a serializable get-consistency-proof request
-type GetConsistencyProofV1 struct {
- First uint64
- Second uint64
-}
-
-// GetEntriesV1 is a serializable get-entries request
-type GetEntriesV1 struct {
- Start uint64
- End uint64
-}
-
-// Marshal marshals a TLS-encodable structure
-func Marshal(item interface{}) ([]byte, error) {
- serialized, err := tls.Marshal(item)
- if err != nil {
- return nil, fmt.Errorf("tls.Marshal: %v", err)
- }
- return serialized, nil
-}
-
-// Unmarshal unmarshals a TLS-encoded structure
-func Unmarshal(serialized []byte, out interface{}) error {
- extra, err := tls.Unmarshal(serialized, out)
- if err != nil {
- return fmt.Errorf("tls.Unmarshal: %v", err)
- }
- if len(extra) > 0 {
- return fmt.Errorf("tls.Unmarshal: extra data: %X", extra)
- }
- return nil
-}
diff --git a/types/serialize_test.go b/types/serialize_test.go
deleted file mode 100644
index a06effe..0000000
--- a/types/serialize_test.go
+++ /dev/null
@@ -1,736 +0,0 @@
-package types
-
-import (
- "bytes"
- "testing"
-
- "encoding/binary"
-)
-
-// testCaseSerialize is a common test case used for ST log types
-type testCaseSerialize struct {
- description string
- item interface{}
- wantErr bool
- wantBytes []byte // only used if no error and not equal to nil
-}
-
-// TestMarshalUnmarshal tests that valid ST log structures can be marshalled and
-// then unmarshalled without error, and that invalid ST log structures cannot be
-// marshalled. If wantBytes is non-nil the marshalled result must also match.
-func TestMarshalUnmarshal(t *testing.T) {
- var tests []testCaseSerialize
- tests = append(tests, test_cases_stitemlist(t)...)
- tests = append(tests, test_cases_stitem(t)...)
- tests = append(tests, test_cases_sthv1(t)...)
- tests = append(tests, test_cases_costhv1(t)...)
- tests = append(tests, test_cases_cpv1(t)...)
- tests = append(tests, test_cases_ipv1(t)...)
- tests = append(tests, test_cases_signed_checksumv1(t)...)
- tests = append(tests, test_cases_checksumv1(t)...)
- tests = append(tests, test_cases_thv1(t)...)
- tests = append(tests, test_cases_nh(t)...)
- tests = append(tests, test_cases_sigv1(t)...)
- tests = append(tests, test_cases_namespace(t)...)
- tests = append(tests, test_cases_ed25519v1(t)...)
- tests = append(tests, test_cases_requests(t)...)
- for _, table := range tests {
- b, err := Marshal(table.item)
- if got, want := err != nil, table.wantErr; got != want {
- t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
- }
- if err != nil {
- continue // nothing to unmarshal
- }
- if got, want := b, table.wantBytes; want != nil && !bytes.Equal(got, want) {
- t.Errorf("got bytes \n%v\n\tbut wanted\n%v\n\t in test %q: %v", got, want, table.description, err)
- }
-
- switch table.item.(type) {
- case StItemList:
- var item StItemList
- err = Unmarshal(b, &item)
- case StItem:
- var item StItem
- err = Unmarshal(b, &item)
- case SignedTreeHeadV1:
- var item SignedTreeHeadV1
- err = Unmarshal(b, &item)
- case CosignedTreeHeadV1:
- var item CosignedTreeHeadV1
- err = Unmarshal(b, &item)
- case ConsistencyProofV1:
- var item ConsistencyProofV1
- err = Unmarshal(b, &item)
- case InclusionProofV1:
- var item InclusionProofV1
- err = Unmarshal(b, &item)
- case SignedChecksumV1:
- var item SignedChecksumV1
- err = Unmarshal(b, &item)
- case ChecksumV1:
- var item ChecksumV1
- err = Unmarshal(b, &item)
- case TreeHeadV1:
- var item TreeHeadV1
- err = Unmarshal(b, &item)
- case NodeHash:
- var item NodeHash
- err = Unmarshal(b, &item)
- case SignatureV1:
- var item SignatureV1
- err = Unmarshal(b, &item)
- case Namespace:
- var item Namespace
- err = Unmarshal(b, &item)
- case Ed25519V1:
- var item Ed25519V1
- err = Unmarshal(b, &item)
- case GetProofByHashV1:
- var item GetProofByHashV1
- err = Unmarshal(b, &item)
- case GetConsistencyProofV1:
- var item GetConsistencyProofV1
- err = Unmarshal(b, &item)
- case GetEntriesV1:
- var item GetEntriesV1
- err = Unmarshal(b, &item)
- default:
- t.Errorf("unhandled type in test %q", table.description)
- }
- if err != nil {
- t.Errorf("unmarshal failed but wanted success in test %q: %v", table.description, err)
- }
- }
-}
-
-// TestUnmarshalStItem tests that invalid StItems cannot be unmarshalled
-func TestUnmarshalStItem(t *testing.T) {
- tests := test_cases_stitem(t)[1:] // skip reserved type
- for _, table := range tests {
- description := table.description[7:] // skip "valid: " prefix
- b, err := Marshal(table.item)
- if err != nil {
- t.Fatalf("must marshal in test %q: %v", description, err)
- }
-
- var item StItem
- if err := Unmarshal(append(b[:], []byte{0}...), &item); err == nil {
- t.Errorf("unmarshal suceeded with one extra byte in test %q", description)
- }
- if err := Unmarshal(b[:len(b)-1], &item); err == nil {
- t.Errorf("unmarshal suceeded with one byte short in test %q", description)
- }
- if err := Unmarshal(append(b[:], b[:]...), &item); err == nil {
- t.Errorf("unmarshal succeeded with appended StItem in test %q", description)
- }
- if err := Unmarshal([]byte{0}, &item); err == nil {
- t.Errorf("unmarshal succeeded with a single byte in test %q", description)
- }
- }
-}
-
-// test_cases_stitemlist returns test cases for the StItemList type
-func test_cases_stitemlist(t *testing.T) []testCaseSerialize {
- t.Helper()
- return []testCaseSerialize{
- testCaseSerialize{
- description: "test_cases_stitemlist: valid: StItemList: empty",
- item: StItemList{},
- wantBytes: []byte{0x00, 0x00, 0x00, 0x00},
- }, // skip max len check because it is huge
- testCaseSerialize{
- description: "test_cases_stitemlist: valid: mixed content",
- item: testStItemList,
- wantBytes: testStItemListBytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_stitem returns test cases for the different StItem types
-func test_cases_stitem(t *testing.T) []testCaseSerialize {
- t.Helper()
- return []testCaseSerialize{
- {
- description: "invalid: StItem: reserved",
- item: testStItemReserved,
- wantErr: true,
- },
- {
- description: "valid: StItem: signed_tree_head_v1",
- item: testStItemSignedTreeHeadV1,
- wantBytes: testStItemSignedTreeHeadV1Bytes,
- },
- {
- description: "valid: StItem: cosigned_tree_head_v1",
- item: testStItemCosignedTreeHeadV1,
- wantBytes: testStItemCosignedTreeHeadV1Bytes,
- },
- {
- description: "valid: StItem: consistency_proof_v1",
- item: testStItemConsistencyProofV1,
- wantBytes: testStItemConsistencyProofV1Bytes,
- },
- {
- description: "valid: StItem: inclusion_proof_v1",
- item: testStItemInclusionProofV1,
- wantBytes: testStItemInclusionProofV1Bytes,
- },
- {
- description: "valid: StItem: signed_checksum_v1",
- item: testStItemSignedChecksumV1,
- wantBytes: testStItemSignedChecksumV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_sthv1 returns test cases for the SignedTreeHeadV1 structure
-func test_cases_sthv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- return []testCaseSerialize{
- {
- description: "valid: testSignedTreeHeadV1",
- item: testSignedTreeHeadV1,
- wantBytes: testSignedTreeHeadV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_costhv1 returns test cases for the CosignedTreeHeadV1 structure
-func test_cases_costhv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- return []testCaseSerialize{
- {
- description: "test_cases_costhv1: valid: min",
- item: CosignedTreeHeadV1{
- SignedTreeHead: testSignedTreeHeadV1,
- Cosignatures: make([]SignatureV1, 0),
- },
- }, // skipping "valid: max" because it is huge
- {
- description: "test_cases_costhv1: testCosignedTreeHeadV1",
- item: testCosignedTreeHeadV1,
- wantBytes: testCosignedTreeHeadV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_cpv1 returns test cases for the ConsistencyProofV1 structure
-func test_cases_cpv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- max := 65535 // max consistency proof
- return []testCaseSerialize{
- {
- description: "test_cases_cpv1: invalid: >max",
- item: ConsistencyProofV1{
- LogId: testNamespace,
- TreeSize1: 0,
- TreeSize2: 0,
- ConsistencyPath: func() []NodeHash {
- var path []NodeHash
- for sum := 0; sum < max+1; sum += 1 + len(testNodeHash.Data) {
- path = append(path, testNodeHash)
- }
- return path
- }(),
- },
- wantErr: true,
- },
- {
- description: "test_cases_cpv1: valid: min",
- item: ConsistencyProofV1{
- LogId: testNamespace,
- TreeSize1: 0,
- TreeSize2: 0,
- ConsistencyPath: make([]NodeHash, 0),
- },
- },
- {
- description: "test_cases_cpv1: valid: testConsistencyProofV1",
- item: testConsistencyProofV1,
- wantBytes: testConsistencyProofV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_ipv1 returns test cases for the InclusionProofV1 structure
-func test_cases_ipv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- max := 65535 // max inclusion proof
- return []testCaseSerialize{
- {
- description: "test_cases_ipv1: invalid: >max",
- item: InclusionProofV1{
- LogId: testNamespace,
- TreeSize: 0,
- LeafIndex: 0,
- InclusionPath: func() []NodeHash {
- var path []NodeHash
- for sum := 0; sum < max+1; sum += 1 + len(testNodeHash.Data) {
- path = append(path, testNodeHash)
- }
- return path
- }(),
- },
- wantErr: true,
- },
- {
- description: "test_cases_ipv1: valid: min",
- item: InclusionProofV1{
- LogId: testNamespace,
- TreeSize: 0,
- LeafIndex: 0,
- InclusionPath: make([]NodeHash, 0),
- },
- },
- {
- description: "test_cases_ipv1: valid: testInclusionProofV1",
- item: testInclusionProofV1,
- wantBytes: testInclusionProofV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_signed_checksumv1 returns test cases for the SignedChecksumV1 structure
-func test_cases_signed_checksumv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- return []testCaseSerialize{
- {
- description: "test_cases_signed_checksumv1: valid: testSignedChecksumV1",
- item: testSignedChecksumV1,
- wantBytes: testSignedChecksumV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_checksumv1 returns test cases for the ChecksumV1 structure
-func test_cases_checksumv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- minIdentifier, maxIdentifier, identifier := 1, 128, []byte("foobar-1-2-3")
- minChecksum, maxChecksum, checksum := 1, 64, make([]byte, 32)
- return []testCaseSerialize{
- {
- description: "test_cases_checksumv1: invalid: identifier: min",
- item: ChecksumV1{
- Identifier: make([]byte, minIdentifier-1),
- Checksum: checksum,
- },
- wantErr: true,
- },
- {
- description: "test_cases_checksumv1: invalid: identifier: max",
- item: ChecksumV1{
- Identifier: make([]byte, maxIdentifier+1),
- Checksum: checksum,
- },
- wantErr: true,
- },
- {
- description: "test_cases_checksumv1: invalid: checksum: min",
- item: ChecksumV1{
- Identifier: identifier,
- Checksum: make([]byte, minChecksum-1),
- },
- wantErr: true,
- },
- {
- description: "test_cases_checksumv1: invalid: checksum: max",
- item: ChecksumV1{
- Identifier: identifier,
- Checksum: make([]byte, maxChecksum+1),
- },
- wantErr: true,
- },
- {
- description: "test_cases_checksumv1: valid: testChecksumV1",
- item: testChecksumV1,
- wantBytes: testChecksumV1Bytes,
- },
- }
-}
-
-// test_cases_thv1 returns test cases for the TreeHeadV1 structure
-func test_cases_thv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- min, max := 0, 1<<16-1 // extensions min and max
- return []testCaseSerialize{
- {
- description: "test_cases_thv1: invalid: max",
- item: TreeHeadV1{
- Timestamp: 0,
- TreeSize: 0,
- RootHash: testNodeHash,
- Extension: make([]byte, max+1),
- },
- wantErr: true,
- },
- {
- description: "test_cases_thv1: valid: min",
- item: TreeHeadV1{
- Timestamp: 0,
- TreeSize: 0,
- RootHash: testNodeHash,
- Extension: make([]byte, min),
- },
- },
- {
- description: "test_cases_thv1: valid: max",
- item: TreeHeadV1{
- Timestamp: 0,
- TreeSize: 0,
- RootHash: testNodeHash,
- Extension: make([]byte, max),
- },
- },
- {
- description: "test_cases_thv1: valid: testTreeHeadV1",
- item: testTreeHeadV1,
- wantBytes: testTreeHeadV1Bytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_nh returns test cases for the NodeHash structure
-func test_cases_nh(t *testing.T) []testCaseSerialize {
- t.Helper()
- min, max := 32, 1<<8-1 // NodeHash min and max
- return []testCaseSerialize{
- {
- description: "test_cases_nh: invalid: min",
- item: NodeHash{make([]byte, min-1)},
- wantErr: true,
- },
- {
- description: "test_cases_nh: invalid: max",
- item: NodeHash{make([]byte, max+1)},
- wantErr: true,
- },
- {
- description: "test_cases_nh: valid: min",
- item: NodeHash{make([]byte, min)},
- },
- {
- description: "test_cases_nh: valid: max",
- item: NodeHash{make([]byte, max)},
- },
- {
- description: "test_cases_nh: valid: testNodeHash",
- item: testNodeHash,
- wantBytes: testNodeHashBytes,
- }, // other invalid bounds are already tested in subtypes
- }
-}
-
-// test_cases_sigv1 returns test cases for the SignatureV1 structure
-func test_cases_sigv1(t *testing.T) []testCaseSerialize {
- t.Helper()
- min, max := 1, 1<<16-1 // signature min and max
- return []testCaseSerialize{
- {
- description: "test_cases_sigv1: invalid: min",
- item: SignatureV1{
- Namespace: testNamespace,
- Signature: make([]byte, min-1),
- },
- wantErr: true,
- },
- {
- description: "test_cases_sigv1: invalid: max",
- item: SignatureV1{
- Namespace: testNamespace,
- Signature: make([]byte, max+1),
- },
- wantErr: true,
- },
- {
- description: "test_cases_sigv1: valid: min",
- item: SignatureV1{
- Namespace: testNamespace,
- Signature: make([]byte, min),
- },
- },
- {
- description: "test_cases_sigv1: valid: max",
- item: SignatureV1{
- Namespace: testNamespace,
- Signature: make([]byte, max),
- },
- },
- {
- description: "test_cases_sigV1: valid: testSignatureV1",
- item: testSignatureV1,
- wantBytes: testSignatureV1Bytes,
- },
- }
-}
-
-// test_cases_namespace returns test cases for the different Namespace types.
-func test_cases_namespace(t *testing.T) []testCaseSerialize {
- return []testCaseSerialize{
- {
- description: "invalid: Namespace: reserved",
- item: testNamespaceReserved,
- wantErr: true,
- },
- {
- description: "valid: Namespace: ed25519_v1",
- item: testNamespaceEd25519V1,
- wantBytes: testNamespaceEd25519V1Bytes,
- },
- }
-}
-
-// test_cases_ed25519v1 returns test cases for the Ed25519V1 structure
-func test_cases_ed25519v1(t *testing.T) []testCaseSerialize {
- return []testCaseSerialize{
- {
- description: "valid: testNamespaceEd25519V1",
- item: testEd25519V1,
- wantBytes: testEd25519V1Bytes,
- },
- }
-}
-
-// test_cases_requests returns test cases for proof request types
-func test_cases_requests(t *testing.T) []testCaseSerialize {
- return []testCaseSerialize{
- {
- description: "valid: GetProofByHashV1",
- item: GetProofByHashV1{
- Hash: [HashSizeV1]byte{},
- TreeSize: 16909060,
- },
- wantBytes: bytes.Join([][]byte{
- make([]byte, 32), // hash
- []byte{0x00, 0x00, 0x00, 0x00, 0x1, 0x2, 0x3, 0x4}, // tree size
- }, nil),
- },
- {
- description: "valid: GetConsistencyProofV1",
- item: GetConsistencyProofV1{
- First: 0,
- Second: 16909060,
- },
- wantBytes: bytes.Join([][]byte{
- make([]byte, 8), // first
- []byte{0x00, 0x00, 0x00, 0x00, 0x1, 0x2, 0x3, 0x4}, // second
- }, nil),
- },
- {
- description: "valid: GetEntriesV1",
- item: GetEntriesV1{
- Start: 0,
- End: 16909060,
- },
- wantBytes: bytes.Join([][]byte{
- make([]byte, 8), // start
- []byte{0x00, 0x00, 0x00, 0x00, 0x1, 0x2, 0x3, 0x4}, // end
- }, nil),
- },
- }
-}
-
-var (
- // StItemList
- testStItemList = StItemList{
- Items: []StItem{
- testStItemSignedChecksumV1,
- testStItemInclusionProofV1,
- testStItemCosignedTreeHeadV1,
- },
- }
- testStItemListBytes = bytes.Join([][]byte{
- func() []byte {
- sum := uint32(len(testStItemSignedChecksumV1Bytes))
- sum += uint32(len(testStItemInclusionProofV1Bytes))
- sum += uint32(len(testStItemCosignedTreeHeadV1Bytes))
- buf := make([]byte, 4)
- binary.BigEndian.PutUint32(buf, sum)
- return buf
- }(), // length specifier list
- testStItemSignedChecksumV1Bytes, // first StItem
- testStItemInclusionProofV1Bytes, // second StItem
- testStItemCosignedTreeHeadV1Bytes, // third StItem
- }, nil)
-
- // StItem
- testStItemReserved = StItem{
- Format: StFormatReserved,
- }
-
- testStItemSignedTreeHeadV1 = StItem{
- Format: StFormatSignedTreeHeadV1,
- SignedTreeHeadV1: &testSignedTreeHeadV1,
- }
- testStItemSignedTreeHeadV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x01}, // format signed_tree_head_v1
- testSignedTreeHeadV1Bytes, // SignedTreeHeadV1
- }, nil)
-
- testStItemCosignedTreeHeadV1 = StItem{
- Format: StFormatCosignedTreeHeadV1,
- CosignedTreeHeadV1: &testCosignedTreeHeadV1,
- }
- testStItemCosignedTreeHeadV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x02}, // format cosigned_tree_head_v1
- testCosignedTreeHeadV1Bytes, // CosignedTreeHeadV1,
- }, nil)
-
- testStItemConsistencyProofV1 = StItem{
- Format: StFormatConsistencyProofV1,
- ConsistencyProofV1: &testConsistencyProofV1,
- }
- testStItemConsistencyProofV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x03}, // format consistency_proof_v1
- testConsistencyProofV1Bytes, // ConsistencyProofV1
- }, nil)
-
- testStItemInclusionProofV1 = StItem{
- Format: StFormatInclusionProofV1,
- InclusionProofV1: &testInclusionProofV1,
- }
- testStItemInclusionProofV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x04}, // format inclusion_proof_v1
- testInclusionProofV1Bytes, // InclusionProofV1
- }, nil)
-
- testStItemSignedChecksumV1 = StItem{
- Format: StFormatSignedChecksumV1,
- SignedChecksumV1: &testSignedChecksumV1,
- }
- testStItemSignedChecksumV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x05}, // format signed_checksum_v1
- testSignedChecksumV1Bytes, // SignedChecksumV1
- }, nil)
-
- // Subtypes used by StItem
- testSignedTreeHeadV1 = SignedTreeHeadV1{
- TreeHead: testTreeHeadV1,
- Signature: testSignatureV1,
- }
- testSignedTreeHeadV1Bytes = bytes.Join([][]byte{
- testTreeHeadV1Bytes, // tree head
- testSignatureV1Bytes, // signature
- }, nil)
-
- testCosignedTreeHeadV1 = CosignedTreeHeadV1{
- SignedTreeHead: testSignedTreeHeadV1,
- Cosignatures: []SignatureV1{
- testSignatureV1,
- },
- }
- testCosignedTreeHeadV1Bytes = bytes.Join([][]byte{
- testSignedTreeHeadV1Bytes, // signed tree head
- []byte{0x00, 0x00, 0x00, byte(len(testSignatureV1Bytes))}, // cosignature length specifier
- testSignatureV1Bytes, // the only cosignature in this list
- }, nil)
-
- testConsistencyProofV1 = ConsistencyProofV1{
- LogId: testNamespace,
- TreeSize1: 16909060,
- TreeSize2: 16909060,
- ConsistencyPath: []NodeHash{
- testNodeHash,
- },
- }
- testConsistencyProofV1Bytes = bytes.Join([][]byte{
- testNamespaceBytes, // log id
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // tree size 1
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // tree size 2
- []byte{0x00, byte(len(testNodeHashBytes))}, // consistency path length specifier
- testNodeHashBytes, // the only node hash in this proof
- }, nil)
-
- testInclusionProofV1 = InclusionProofV1{
- LogId: testNamespace,
- TreeSize: 16909060,
- LeafIndex: 16909060,
- InclusionPath: []NodeHash{
- testNodeHash,
- },
- }
- testInclusionProofV1Bytes = bytes.Join([][]byte{
- testNamespaceBytes, // log id
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // tree size
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // leaf index
- []byte{0x00, byte(len(testNodeHashBytes))}, // inclusion path length specifier
- testNodeHashBytes, // the only node hash in this proof
- }, nil)
-
- testSignedChecksumV1 = SignedChecksumV1{
- Data: testChecksumV1,
- Signature: testSignatureV1,
- }
- testSignedChecksumV1Bytes = bytes.Join([][]byte{
- testChecksumV1Bytes, // data
- testSignatureV1Bytes, // signature
- }, nil)
-
- // Additional subtypes
- testChecksumV1 = ChecksumV1{
- Identifier: []byte("foobar-1-2-3"),
- Checksum: make([]byte, 32),
- }
- testChecksumV1Bytes = bytes.Join([][]byte{
- []byte{12}, // identifier length specifier
- []byte("foobar-1-2-3"), // identifier
- []byte{32}, // checksum length specifier
- make([]byte, 32), // checksum
- }, nil)
-
- testTreeHeadV1 = TreeHeadV1{
- Timestamp: 16909060,
- TreeSize: 16909060,
- RootHash: testNodeHash,
- Extension: make([]byte, 0),
- }
- testTreeHeadV1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // timestamp
- []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04}, // tree size
- testNodeHashBytes, // root hash
- []byte{0x00, 0x00}, // extension length specifier
- // no extension
- }, nil)
-
- testNodeHash = NodeHash{
- Data: make([]byte, 32),
- }
- testNodeHashBytes = bytes.Join([][]byte{
- []byte{32}, // node hash length specifier
- make([]byte, 32),
- }, nil)
-
- testSignatureV1 = SignatureV1{
- Namespace: testNamespace,
- Signature: make([]byte, 64),
- }
- testSignatureV1Bytes = bytes.Join([][]byte{
- testNamespaceBytes, // namespace field
- []byte{0, 64}, // signature length specifier
- make([]byte, 64), // signature
- }, nil)
-
- // Namespace
- testNamespaceReserved = Namespace{
- Format: NamespaceFormatReserved,
- }
-
- testNamespace = testNamespaceEd25519V1
- testNamespaceBytes = testNamespaceEd25519V1Bytes
- testNamespaceEd25519V1 = Namespace{
- Format: NamespaceFormatEd25519V1,
- Ed25519V1: &testEd25519V1,
- }
- testNamespaceEd25519V1Bytes = bytes.Join([][]byte{
- []byte{0x00, 0x01}, // format ed25519_v1
- testEd25519V1Bytes, // Ed25519V1
- }, nil)
-
- // Subtypes used by Namespace
- testEd25519V1 = Ed25519V1{
- Namespace: [32]byte{},
- }
- testEd25519V1Bytes = bytes.Join([][]byte{
- make([]byte, 32), // namespace, no length specifier because fixed size
- }, nil)
-)
diff --git a/types/stitem.go b/types/stitem.go
deleted file mode 100644
index 447cad0..0000000
--- a/types/stitem.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package types
-
-import (
- "fmt"
-
- "github.com/google/certificate-transparency-go/tls"
-)
-
-// StFormat defines a particular StItem type that is versioned
-type StFormat tls.Enum
-
-const (
- StFormatReserved StFormat = 0
- StFormatSignedTreeHeadV1 StFormat = 1
- StFormatCosignedTreeHeadV1 StFormat = 2
- StFormatConsistencyProofV1 StFormat = 3
- StFormatInclusionProofV1 StFormat = 4
- StFormatSignedChecksumV1 StFormat = 5
-)
-
-// StItem references a versioned item based on a given format specifier
-type StItem struct {
- Format StFormat `tls:"maxval:65535"`
- SignedTreeHeadV1 *SignedTreeHeadV1 `tls:"selector:Format,val:1"`
- CosignedTreeHeadV1 *CosignedTreeHeadV1 `tls:"selector:Format,val:2"`
- ConsistencyProofV1 *ConsistencyProofV1 `tls:"selector:Format,val:3"`
- InclusionProofV1 *InclusionProofV1 `tls:"selector:Format,val:4"`
- SignedChecksumV1 *SignedChecksumV1 `tls:"selector:Format,val:5"`
-}
-
-type StItemList struct {
- Items []StItem `tls:"minlen:0,maxlen:4294967295"`
-}
-
-type SignedTreeHeadV1 struct {
- TreeHead TreeHeadV1
- Signature SignatureV1
-}
-
-type CosignedTreeHeadV1 struct {
- SignedTreeHead SignedTreeHeadV1
- Cosignatures []SignatureV1 `tls:"minlen:0,maxlen:4294967295"`
-}
-
-type ConsistencyProofV1 struct {
- LogId Namespace
- TreeSize1 uint64
- TreeSize2 uint64
- ConsistencyPath []NodeHash `tls:"minlen:0,maxlen:65535"`
-}
-
-type InclusionProofV1 struct {
- LogId Namespace
- TreeSize uint64
- LeafIndex uint64
- InclusionPath []NodeHash `tls:"minlen:0,maxlen:65535"`
-}
-
-type SignedChecksumV1 struct {
- Data ChecksumV1
- Signature SignatureV1
-}
-
-type ChecksumV1 struct {
- Identifier []byte `tls:"minlen:1,maxlen:128"`
- Checksum []byte `tls:"minlen:1,maxlen:64"`
-}
-
-type TreeHeadV1 struct {
- Timestamp uint64
- TreeSize uint64
- RootHash NodeHash
- Extension []byte `tls:"minlen:0,maxlen:65535"`
-}
-
-type NodeHash struct {
- Data []byte `tls:"minlen:32,maxlen:255"`
-}
-
-type SignatureV1 struct {
- Namespace Namespace
- Signature []byte `tls:"minlen:1,maxlen:65535"`
-}
-
-func (f StFormat) String() string {
- switch f {
- case StFormatReserved:
- return "reserved"
- case StFormatSignedTreeHeadV1:
- return "signed_tree_head_v1"
- case StFormatCosignedTreeHeadV1:
- return "cosigned_tree_head_v1"
- case StFormatConsistencyProofV1:
- return "consistency_proof_v1"
- case StFormatInclusionProofV1:
- return "inclusion_proof_v1"
- case StFormatSignedChecksumV1:
- return "signed_checksum_v1"
- default:
- return fmt.Sprintf("unknown StFormat: %d", f)
- }
-}
-
-func (i StItem) String() string {
- switch i.Format {
- case StFormatReserved:
- return fmt.Sprintf("Format(%s)", i.Format)
- case StFormatSignedTreeHeadV1:
- return fmt.Sprintf("Format(%s): %+v", i.Format, i.SignedTreeHeadV1)
- case StFormatCosignedTreeHeadV1:
- return fmt.Sprintf("Format(%s): %+v", i.Format, i.CosignedTreeHeadV1)
- case StFormatConsistencyProofV1:
- return fmt.Sprintf("Format(%s): %+v", i.Format, i.ConsistencyProofV1)
- case StFormatInclusionProofV1:
- return fmt.Sprintf("Format(%s): %+v", i.Format, i.InclusionProofV1)
- case StFormatSignedChecksumV1:
- return fmt.Sprintf("Format(%s): %+v", i.Format, i.SignedChecksumV1)
- default:
- return fmt.Sprintf("unknown StItem: %v", i.Format)
- }
-}
-
-func NewSignedTreeHeadV1(th *TreeHeadV1, sig *SignatureV1) *StItem {
- return &StItem{
- Format: StFormatSignedTreeHeadV1,
- SignedTreeHeadV1: &SignedTreeHeadV1{
- TreeHead: *th,
- Signature: *sig,
- },
- }
-}
-
-func NewCosignedTreeHeadV1(sth *SignedTreeHeadV1, cosig []SignatureV1) *StItem {
- if cosig == nil {
- cosig = make([]SignatureV1, 0)
- }
- return &StItem{
- Format: StFormatCosignedTreeHeadV1,
- CosignedTreeHeadV1: &CosignedTreeHeadV1{
- SignedTreeHead: *sth,
- Cosignatures: cosig,
- },
- }
-}
-
-func NewConsistencyProofV1(id *Namespace, size1, size2 uint64, path []NodeHash) *StItem {
- return &StItem{
- Format: StFormatConsistencyProofV1,
- ConsistencyProofV1: &ConsistencyProofV1{
- LogId: *id,
- TreeSize1: size1,
- TreeSize2: size2,
- ConsistencyPath: path,
- },
- }
-}
-
-func NewInclusionProofV1(id *Namespace, size, index uint64, path []NodeHash) *StItem {
- return &StItem{
- Format: StFormatInclusionProofV1,
- InclusionProofV1: &InclusionProofV1{
- LogId: *id,
- TreeSize: size,
- LeafIndex: index,
- InclusionPath: path,
- },
- }
-}
-
-func NewSignedChecksumV1(data *ChecksumV1, sig *SignatureV1) *StItem {
- return &StItem{
- Format: StFormatSignedChecksumV1,
- SignedChecksumV1: &SignedChecksumV1{
- Data: *data,
- Signature: *sig,
- },
- }
-}
-
-func NewTreeHeadV1(timestamp, size uint64, hash, extension []byte) *TreeHeadV1 {
- if extension == nil {
- extension = make([]byte, 0)
- }
- return &TreeHeadV1{
- Timestamp: timestamp,
- TreeSize: size,
- RootHash: NodeHash{
- Data: hash,
- },
- Extension: extension,
- }
-}
diff --git a/types/stitem_test.go b/types/stitem_test.go
deleted file mode 100644
index 90d6808..0000000
--- a/types/stitem_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package types
-
-import (
- "strings"
- "testing"
-)
-
-// TestStItemString checks that the String() function prints the right format,
-// and that the body is printed without a nil-pointer panic.
-func TestStItemString(t *testing.T) {
- wantPrefix := map[StFormat]string{
- StFormatReserved: "Format(reserved)",
- StFormatSignedTreeHeadV1: "Format(signed_tree_head_v1): &{TreeHead",
- StFormatCosignedTreeHeadV1: "Format(cosigned_tree_head_v1): &{SignedTreeHead",
- StFormatConsistencyProofV1: "Format(consistency_proof_v1): &{LogId",
- StFormatInclusionProofV1: "Format(inclusion_proof_v1): &{LogId",
- StFormatSignedChecksumV1: "Format(signed_checksum_v1): &{Data",
- StFormat(1<<16 - 1): "unknown StItem: unknown StFormat: 65535",
- }
- tests := append(test_cases_stitem(t), testCaseSerialize{
- description: "valid: unknown StItem",
- item: StItem{
- Format: StFormat(1<<16 - 1),
- },
- })
- for _, table := range tests {
- item, ok := table.item.(StItem)
- if !ok {
- t.Fatalf("must cast to StItem in test %q", table.description)
- }
-
- prefix, ok := wantPrefix[item.Format]
- if !ok {
- t.Fatalf("must have prefix for StFormat %v in test %q", item.Format, table.description)
- }
- if got, want := item.String(), prefix; !strings.HasPrefix(got, want) {
- t.Errorf("got %q but wanted prefix %q in test %q", got, want, table.description)
- }
- }
-}
-
-// TODO: TestNewSignedTreeHeadV1
-func TestNewSignedTreeHeadV1(t *testing.T) {
-}
-
-// TODO: TestNewCosignedTreeHeadV1
-func TestNewCosignedTreeHeadV1(t *testing.T) {
-}
-
-// TODO: TestNewConsistencyProofV1
-func TestNewConsistencyProofV1(t *testing.T) {
-}
-
-// TODO: TestNewInclusionProofV1
-func TestNewInclusionProofV1(t *testing.T) {
-}
-
-// TODO: TestNewSignedChecksumV1
-func TestNewSignedChecksumV1(t *testing.T) {
-}
-
-// TODO: TestNewTreeHeadV1
-func TestNewTreeHeadV1(t *testing.T) {
-}
diff --git a/types/trunnel.go b/types/trunnel.go
new file mode 100644
index 0000000..72ae68d
--- /dev/null
+++ b/types/trunnel.go
@@ -0,0 +1,57 @@
+package types
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+const (
+ // MessageSize is the number of bytes in a Trunnel-encoded leaf message
+ MessageSize = 8 + HashSize
+ // LeafSize is the number of bytes in a Trunnel-encoded leaf
+ LeafSize = MessageSize + SignatureSize + HashSize
+)
+
+// Marshal returns a Trunnel-encoded message
+func (m *Message) Marshal() []byte {
+ buf := make([]byte, MessageSize)
+ binary.BigEndian.PutUint64(buf, m.ShardHint)
+ copy(buf[8:], m.Checksum[:])
+ return buf
+}
+
+// Marshal returns a Trunnel-encoded leaf
+func (l *Leaf) Marshal() []byte {
+ buf := l.Message.Marshal()
+ buf = append(buf, l.SigIdent.Signature[:]...)
+ buf = append(buf, l.SigIdent.KeyHash[:]...)
+ return buf
+}
+
+// Marshal returns a Trunnel-encoded tree head
+func (th *TreeHead) Marshal() []byte {
+ buf := make([]byte, 8+8+HashSize)
+ binary.BigEndian.PutUint64(buf[0:8], th.Timestamp)
+ binary.BigEndian.PutUint64(buf[8:16], th.TreeSize)
+ copy(buf[16:], th.RootHash[:])
+ return buf
+}
+
+// Unmarshal parses the Trunnel-encoded buffer as a leaf
+func (l *Leaf) Unmarshal(buf []byte) error {
+ if len(buf) != LeafSize {
+ return fmt.Errorf("invalid leaf size: %v", len(buf))
+ }
+ // Shard hint
+ l.ShardHint = binary.BigEndian.Uint64(buf)
+ offset := 8
+ // Checksum
+ copy(l.Checksum[:], buf[offset:offset+HashSize])
+ offset += HashSize
+ // Signature
+ copy(l.Signature[:], buf[offset:offset+SignatureSize])
+ offset += SignatureSize
+ // KeyHash
+ copy(l.KeyHash[:], buf[offset:])
+ return nil
+}
diff --git a/types/trunnel_test.go b/types/trunnel_test.go
new file mode 100644
index 0000000..0fa7656
--- /dev/null
+++ b/types/trunnel_test.go
@@ -0,0 +1,114 @@
+package types
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+var (
+ testBuffer32 = [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
+ testBuffer64 = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}
+)
+
+func TestMarshalMessage(t *testing.T) {
+ description := "valid: shard hint 72623859790382856, checksum 0x00,0x01,..."
+ message := &Message{
+ ShardHint: 72623859790382856,
+ Checksum: testBuffer32,
+ }
+ want := bytes.Join([][]byte{
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ testBuffer32[:],
+ }, nil)
+ if got := message.Marshal(); !bytes.Equal(got, want) {
+ t.Errorf("got message\n\t%v\nbut wanted\n\t%v\nin test %q\n", got, want, description)
+ }
+}
+
+func TestMarshalLeaf(t *testing.T) {
+ description := "valid: shard hint 72623859790382856, buffers 0x00,0x01,..."
+ leaf := &Leaf{
+ Message: Message{
+ ShardHint: 72623859790382856,
+ Checksum: testBuffer32,
+ },
+ SigIdent: SigIdent{
+ Signature: testBuffer64,
+ KeyHash: testBuffer32,
+ },
+ }
+ want := bytes.Join([][]byte{
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ testBuffer32[:], testBuffer64[:], testBuffer32[:],
+ }, nil)
+ if got := leaf.Marshal(); !bytes.Equal(got, want) {
+ t.Errorf("got leaf\n\t%v\nbut wanted\n\t%v\nin test %q\n", got, want, description)
+ }
+}
+
+func TestMarshalTreeHead(t *testing.T) {
+ description := "valid: timestamp 16909060, tree size 72623859790382856, root hash 0x00,0x01,..."
+ th := &TreeHead{
+ Timestamp: 16909060,
+ TreeSize: 72623859790382856,
+ RootHash: testBuffer32,
+ }
+ want := bytes.Join([][]byte{
+ []byte{0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04},
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ testBuffer32[:],
+ }, nil)
+ if got := th.Marshal(); !bytes.Equal(got, want) {
+ t.Errorf("got tree head\n\t%v\nbut wanted\n\t%v\nin test %q\n", got, want, description)
+ }
+}
+
+func TestUnmarshalLeaf(t *testing.T) {
+ for _, table := range []struct {
+ description string
+ serialized []byte
+ wantErr bool
+ want *Leaf
+ }{
+ {
+ description: "invalid: not enough bytes",
+ serialized: make([]byte, LeafSize-1),
+ wantErr: true,
+ },
+ {
+ description: "invalid: too many bytes",
+ serialized: make([]byte, LeafSize+1),
+ wantErr: true,
+ },
+ {
+ description: "valid: shard hint 72623859790382856, buffers 0x00,0x01,...",
+ serialized: bytes.Join([][]byte{
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ testBuffer32[:], testBuffer64[:], testBuffer32[:],
+ }, nil),
+ want: &Leaf{
+ Message: Message{
+ ShardHint: 72623859790382856,
+ Checksum: testBuffer32,
+ },
+ SigIdent: SigIdent{
+ Signature: testBuffer64,
+ KeyHash: testBuffer32,
+ },
+ },
+ },
+ } {
+ var leaf Leaf
+ err := leaf.Unmarshal(table.serialized)
+ if got, want := err != nil, table.wantErr; got != want {
+ t.Errorf("got error %v but wanted %v in test %q: %v", got, want, table.description, err)
+ }
+ if err != nil {
+ continue // nothing more to check on error
+ }
+ if got, want := &leaf, table.want; !reflect.DeepEqual(got, want) {
+ t.Errorf("got leaf\n\t%v\nbut wanted\n\t%v\nin test %q\n", got, want, table.description)
+ }
+ }
+}
diff --git a/types/types.go b/types/types.go
new file mode 100644
index 0000000..483dac0
--- /dev/null
+++ b/types/types.go
@@ -0,0 +1,73 @@
+package types
+
+import (
+ "crypto/ed25519"
+ "crypto/sha256"
+)
+
+const (
+ HashSize = sha256.Size
+ SignatureSize = ed25519.SignatureSize
+)
+
+// Leaf is the log's Merkle tree leaf.
+//
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#merkle-tree-leaf
+type Leaf struct {
+ Message
+ SigIdent
+}
+
+// Message is composed of a shard hint and a checksum. The submitter selects
+// these values to fit the log's shard interval and the opaque data in question.
+type Message struct {
+ ShardHint uint64
+ Checksum [HashSize]byte
+}
+
+// SigIdent is composed of a signature-signer pair. The signature is computed
+// over the Trunnel-serialized leaf message. KeyHash identifies the signer.
+type SigIdent struct {
+ Signature [SignatureSize]byte
+ KeyHash [HashSize]byte
+}
+
+// SignedTreeHead is composed of a tree head and a list of signature-signer
+// pairs. Each signature is computed over the Trunnel-serialized tree head.
+//
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#get-tree-head-cosigned
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#get-tree-head-to-sign
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#get-tree-head-latest
+type SignedTreeHead struct {
+ TreeHead
+ SigIdent []SigIdent
+}
+
+// TreeHead is the log's tree head.
+//
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#merkle-tree-head
+type TreeHead struct {
+ Timestamp uint64
+ TreeSize uint64
+ RootHash [HashSize]byte
+}
+
+// ConsistencyProof is a consistency proof that proves the log's append-only
+// property.
+//
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#get-consistency-proof
+type ConsistencyProof struct {
+ NewSize uint64
+ OldSize uint64
+ Path [][HashSize]byte
+}
+
+// InclusionProof is an inclusion proof that proves a leaf is included in the
+// log.
+//
+// Ref: https://github.com/system-transparency/stfe/blob/design/doc/api.md#get-proof-by-hash
+type InclusionProof struct {
+ TreeSize uint64
+ LeafIndex uint64
+ Path [][HashSize]byte
+}