Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions pkg/chain/ethereum/tbtc.go
Original file line number Diff line number Diff line change
Expand Up @@ -1469,13 +1469,22 @@ func (tc *TbtcChain) GetWallet(
)
}

// Fetch wallet registry data on a best-effort basis. Legacy callers
// only use Bridge-sourced fields and never access MembersIDsHash, so a
// registry outage must not block them. The zero value signals that
// registry data is unavailable; downstream consumers that need it
// (e.g. signer_approval_certificate) already guard against this.
var membersIDsHash [32]byte

walletRegistryWallet, err := tc.walletRegistry.GetWallet(wallet.EcdsaWalletID)
if err != nil {
return nil, fmt.Errorf(
logger.Warnf(
"cannot get wallet registry data for wallet [0x%x]: [%v]",
wallet.EcdsaWalletID,
err,
)
} else {
membersIDsHash = walletRegistryWallet.MembersIdsHash
}

walletState, err := parseWalletState(wallet.State)
Expand All @@ -1485,7 +1494,7 @@ func (tc *TbtcChain) GetWallet(

return &tbtc.WalletChainData{
EcdsaWalletID: wallet.EcdsaWalletID,
MembersIDsHash: walletRegistryWallet.MembersIdsHash,
MembersIDsHash: membersIDsHash,
MainUtxoHash: wallet.MainUtxoHash,
PendingRedemptionsValue: wallet.PendingRedemptionsValue,
CreatedAt: time.Unix(int64(wallet.CreatedAt), 0),
Expand Down
50 changes: 49 additions & 1 deletion pkg/covenantsigner/covenantsigner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ import (
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/sha256"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/hex"
"encoding/json"
Expand Down Expand Up @@ -104,6 +104,54 @@ func (fmh *faultingMemoryHandle) Delete(directory string, name string) error {
return fmh.memoryHandle.Delete(directory, name)
}

// faultingDescriptor wraps a memoryDescriptor and returns an injected error
// from Content(), allowing tests to simulate unreadable job files.
type faultingDescriptor struct {
name string
directory string
err error
}

func (fd *faultingDescriptor) Name() string { return fd.name }
func (fd *faultingDescriptor) Directory() string { return fd.directory }
func (fd *faultingDescriptor) Content() ([]byte, error) { return nil, fd.err }

// contentFaultingHandle extends memoryHandle by injecting faulting descriptors
// into the ReadAll channel alongside normal descriptors. This enables testing
// of load() behavior when individual file reads fail.
type contentFaultingHandle struct {
*memoryHandle
faultingDescriptors []*faultingDescriptor
}

func newContentFaultingHandle() *contentFaultingHandle {
return &contentFaultingHandle{
memoryHandle: newMemoryHandle(),
}
}

func (cfh *contentFaultingHandle) AddFaultingDescriptor(name, directory string, err error) {
cfh.faultingDescriptors = append(cfh.faultingDescriptors, &faultingDescriptor{
name: name,
directory: directory,
err: err,
})
}

func (cfh *contentFaultingHandle) ReadAll() (<-chan persistence.DataDescriptor, <-chan error) {
dataChan := make(chan persistence.DataDescriptor, len(cfh.items)+len(cfh.faultingDescriptors))
errorChan := make(chan error)
for _, item := range cfh.items {
dataChan <- item
}
for _, fd := range cfh.faultingDescriptors {
dataChan <- fd
}
close(dataChan)
close(errorChan)
return dataChan, errorChan
}

type scriptedEngine struct {
submit func(*Job) (*Transition, error)
poll func(*Job) (*Transition, error)
Expand Down
50 changes: 40 additions & 10 deletions pkg/covenantsigner/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,30 +169,60 @@ func (s *Store) load() error {

content, err := descriptor.Content()
if err != nil {
return err
logger.Warnf(
"skipping unreadable job file [%s]: [%v]",
descriptor.Name(),
err,
)
continue
}

job := &Job{}
if err := json.Unmarshal(content, job); err != nil {
return err
logger.Warnf(
"skipping malformed job file [%s]: [%v]",
descriptor.Name(),
err,
)
continue
}

existingID, ok := s.byRouteKey[routeKey(job.Route, job.RouteRequestID)]
if ok {
existing := s.byRequestID[existingID]
if existing != nil {
key := routeKey(job.Route, job.RouteRequestID)

if existingID, ok := s.byRouteKey[key]; ok {
if existing := s.byRequestID[existingID]; existing != nil {
existingIsNewerOrSame, err := isNewerOrSameJobRevision(existing, job)
if err != nil {
return err
}
if existingIsNewerOrSame {
// When the timestamp comparison fails, prefer
// whichever job has a parseable timestamp. If the
// candidate's timestamp is valid, the failure is on
// the existing job -- replace it. Otherwise skip the
// candidate.
if _, parseErr := time.Parse(time.RFC3339Nano, job.UpdatedAt); parseErr != nil {
logger.Warnf(
"skipping job [%s] with invalid timestamp on duplicate route key [%s/%s]: [%v]",
job.RequestID,
job.Route,
job.RouteRequestID,
err,
)
continue
}
logger.Warnf(
"replacing job [%s] with invalid timestamp on duplicate route key [%s/%s]: [%v]",
existing.RequestID,
job.Route,
job.RouteRequestID,
err,
)
} else if existingIsNewerOrSame {
continue
}
}
}

s.byRequestID[job.RequestID] = job
s.byRouteKey[routeKey(job.Route, job.RouteRequestID)] = job.RequestID
s.byRouteKey[key] = job.RequestID
case err, ok := <-errorChan:
if !ok {
errorChan = nil
Expand Down
180 changes: 174 additions & 6 deletions pkg/covenantsigner/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,12 +251,180 @@ func TestStoreLoadFailsOnInvalidUpdatedAtForDuplicateRouteKeys(t *testing.T) {
t.Fatal(err)
}

_, err = NewStore(handle, "")
if err == nil {
t.Fatal("expected invalid UpdatedAt error")
store, err := NewStore(handle, "")
if err != nil {
t.Fatalf(
"expected store to load despite invalid timestamp on duplicate route key, got error: %v",
err,
)
}

loaded, ok, err := store.GetByRouteRequest(TemplateSelfV1, "ors_load_invalid_updated_at")
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("expected valid job to be loaded despite invalid-timestamp sibling")
}
if loaded.RequestID != first.RequestID {
t.Fatalf("expected request ID %s, got %s", first.RequestID, loaded.RequestID)
}
}

func TestStoreLoadSkipsUnreadableFile(t *testing.T) {
handle := newContentFaultingHandle()

validJob := &Job{
RequestID: "kcs_self_valid_readable",
RouteRequestID: "ors_readable",
Route: TemplateSelfV1,
IdempotencyKey: "idem_readable",
FacadeRequestID: "rf_readable",
RequestDigest: "0xaaa",
State: JobStatePending,
Detail: "queued",
CreatedAt: "2026-03-09T00:00:00Z",
UpdatedAt: "2026-03-09T00:00:00Z",
Request: baseRequest(TemplateSelfV1),
}

payload, err := json.Marshal(validJob)
if err != nil {
t.Fatal(err)
}
if err := handle.Save(payload, jobsDirectory, validJob.RequestID+".json"); err != nil {
t.Fatal(err)
}

handle.AddFaultingDescriptor(
"corrupted_file.json",
jobsDirectory,
errors.New("simulated disk read error"),
)

store, err := NewStore(handle, "")
if err != nil {
t.Fatalf("expected store to load despite unreadable file, got error: %v", err)
}

loaded, ok, err := store.GetByRouteRequest(TemplateSelfV1, "ors_readable")
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("expected valid job to be loaded despite corrupted sibling")
}
if loaded.RequestID != validJob.RequestID {
t.Fatalf("expected request ID %s, got %s", validJob.RequestID, loaded.RequestID)
}
}

func TestStoreLoadSkipsMalformedJSON(t *testing.T) {
handle := newMemoryHandle()

validJob := &Job{
RequestID: "kcs_self_valid_json",
RouteRequestID: "ors_valid_json",
Route: TemplateSelfV1,
IdempotencyKey: "idem_valid_json",
FacadeRequestID: "rf_valid_json",
RequestDigest: "0xbbb",
State: JobStatePending,
Detail: "queued",
CreatedAt: "2026-03-09T00:00:00Z",
UpdatedAt: "2026-03-09T00:00:00Z",
Request: baseRequest(TemplateSelfV1),
}

validPayload, err := json.Marshal(validJob)
if err != nil {
t.Fatal(err)
}
if err := handle.Save(validPayload, jobsDirectory, validJob.RequestID+".json"); err != nil {
t.Fatal(err)
}

if err := handle.Save([]byte("not valid json content"), jobsDirectory, "malformed.json"); err != nil {
t.Fatal(err)
}

store, err := NewStore(handle, "")
if err != nil {
t.Fatalf("expected store to load despite malformed JSON file, got error: %v", err)
}

loaded, ok, err := store.GetByRouteRequest(TemplateSelfV1, "ors_valid_json")
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("expected valid job to be loaded despite malformed sibling")
}
if loaded.RequestID != validJob.RequestID {
t.Fatalf("expected request ID %s, got %s", validJob.RequestID, loaded.RequestID)
}
}

func TestStoreLoadSkipsInvalidTimestampOnDuplicateRouteKey(t *testing.T) {
handle := newMemoryHandle()

validJob := &Job{
RequestID: "kcs_self_valid_ts",
RouteRequestID: "ors_ts_dupe",
Route: TemplateSelfV1,
IdempotencyKey: "idem_valid_ts",
FacadeRequestID: "rf_valid_ts",
RequestDigest: "0xccc",
State: JobStatePending,
Detail: "queued",
CreatedAt: "2026-03-09T00:00:00Z",
UpdatedAt: "2026-03-09T00:00:00Z",
Request: baseRequest(TemplateSelfV1),
}

badTimestampJob := &Job{
RequestID: "kcs_self_bad_ts",
RouteRequestID: "ors_ts_dupe",
Route: TemplateSelfV1,
IdempotencyKey: "idem_bad_ts",
FacadeRequestID: "rf_bad_ts",
RequestDigest: "0xddd",
State: JobStatePending,
Detail: "queued",
CreatedAt: "2026-03-10T00:00:00Z",
UpdatedAt: "invalid-timestamp",
Request: baseRequest(TemplateSelfV1),
}

validPayload, err := json.Marshal(validJob)
if err != nil {
t.Fatal(err)
}
badPayload, err := json.Marshal(badTimestampJob)
if err != nil {
t.Fatal(err)
}

if err := handle.Save(validPayload, jobsDirectory, validJob.RequestID+".json"); err != nil {
t.Fatal(err)
}
if err := handle.Save(badPayload, jobsDirectory, badTimestampJob.RequestID+".json"); err != nil {
t.Fatal(err)
}

store, err := NewStore(handle, "")
if err != nil {
t.Fatalf("expected store to load despite invalid timestamp on duplicate route key, got error: %v", err)
}

loaded, ok, err := store.GetByRequestID("kcs_self_valid_ts")
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("expected valid job to be accessible after skipping bad-timestamp sibling")
}
if !strings.Contains(err.Error(), "cannot parse candidate job updatedAt") &&
!strings.Contains(err.Error(), "cannot parse existing job updatedAt") {
t.Fatalf("unexpected error: %v", err)
if loaded.RequestID != validJob.RequestID {
t.Fatalf("expected request ID %s, got %s", validJob.RequestID, loaded.RequestID)
}
}
Loading
Loading