Skip to content

Commit

Permalink
Merge pull request #584 from bnb-chain/disallow_change_payment
Browse files Browse the repository at this point in the history
fix: disallow update payment address when there are created/updating objects
  • Loading branch information
keefel authored Mar 11, 2024
2 parents 98f40b7 + 8d672ac commit a74e009
Show file tree
Hide file tree
Showing 18 changed files with 379 additions and 180 deletions.
193 changes: 129 additions & 64 deletions e2e/tests/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"github.com/bnb-chain/greenfield/sdk/types"
storageutils "github.com/bnb-chain/greenfield/testutil/storage"
types2 "github.com/bnb-chain/greenfield/types"
paymenttypes "github.com/bnb-chain/greenfield/x/payment/types"
sptypes "github.com/bnb-chain/greenfield/x/sp/types"
storagetypes "github.com/bnb-chain/greenfield/x/storage/types"
)
Expand Down Expand Up @@ -828,56 +829,11 @@ func (s *StorageTestSuite) TestDiscontinueObject_UserDeleted() {

// DiscontinueObject
msgDiscontinueObject := storagetypes.NewMsgDiscontinueObject(sp.GcKey.GetAddr(), bucketName, []sdkmath.Uint{objectId}, "test")
txRes := s.SendTxBlock(sp.GcKey, msgDiscontinueObject)
deleteAt := filterDiscontinueObjectEventFromTx(txRes).DeleteAt
_ = s.SendTxBlock(sp.GcKey, msgDiscontinueObject)

// DeleteObject before discontinue confirm window
msgDeleteObject := storagetypes.NewMsgDeleteObject(user.GetAddr(), bucketName, objectName)
txRes = s.SendTxBlock(user, msgDeleteObject)
event := filterDeleteObjectEventFromTx(txRes)
s.Require().Equal(event.ObjectId, objectId)

// Wait after the delete timestamp
heightBefore := txRes.Height
heightAfter := int64(0)
for {
time.Sleep(200 * time.Millisecond)
statusRes, err := s.TmClient.TmClient.Status(context.Background())
s.Require().NoError(err)
blockTime := statusRes.SyncInfo.LatestBlockTime.Unix()

s.T().Logf("current blockTime: %d, delete blockTime: %d", blockTime, deleteAt)

if blockTime >= deleteAt {
heightAfter = statusRes.SyncInfo.LatestBlockHeight
break
} else {
heightBefore = statusRes.SyncInfo.LatestBlockHeight
}
}

time.Sleep(200 * time.Millisecond)
events := make([]storagetypes.EventDeleteObject, 0)
for heightBefore <= heightAfter {
blockRes, err := s.TmClient.TmClient.BlockResults(context.Background(), &heightBefore)
s.Require().NoError(err)
events = append(events, filterDeleteObjectEventFromBlock(blockRes)...)
heightBefore++
}

// Already deleted by user
found := false
for _, event := range events {
if event.ObjectId.Equal(objectId) {
found = true
}
}
s.Require().True(!found)

time.Sleep(500 * time.Millisecond)
statusRes, err := s.TmClient.TmClient.Status(context.Background())
s.Require().NoError(err)
s.Require().True(statusRes.SyncInfo.LatestBlockHeight > heightAfter)
s.SendTxBlockWithExpectErrorString(msgDeleteObject, user, "is discontined")
}

func (s *StorageTestSuite) TestDiscontinueBucket_Normal() {
Expand Down Expand Up @@ -1190,23 +1146,6 @@ func filterDeleteObjectEventFromBlock(blockRes *ctypes.ResultBlockResults) []sto
return events
}

func filterDeleteObjectEventFromTx(txRes *sdk.TxResponse) storagetypes.EventDeleteObject {
objectIdStr := ""
for _, event := range txRes.Events {
if event.Type == "greenfield.storage.EventDeleteObject" {
for _, attr := range event.Attributes {
if string(attr.Key) == "object_id" {
objectIdStr = strings.Trim(string(attr.Value), `"`)
}
}
}
}
objectId := sdkmath.NewUintFromString(objectIdStr)
return storagetypes.EventDeleteObject{
ObjectId: objectId,
}
}

func filterDiscontinueBucketEventFromTx(txRes *sdk.TxResponse) storagetypes.EventDiscontinueBucket {
deleteAtStr := ""
for _, event := range txRes.Logs[0].Events {
Expand Down Expand Up @@ -2407,3 +2346,129 @@ func (s *StorageTestSuite) TestDeleteCreateObject_InCreatedStatus() {
_, err = s.Client.HeadObject(ctx, &queryHeadObjectRequest)
s.Require().EqualError(err, "rpc error: code = Unknown desc = No such object: unknown request")
}

func (s *StorageTestSuite) TestDisallowChangePaymentAccount() {
var err error
sp := s.BaseSuite.PickStorageProvider()
gvg, found := sp.GetFirstGlobalVirtualGroup()
s.Require().True(found)
user := s.User
// CreateBucket
bucketName := storageutils.GenRandomBucketName()
msgCreateBucket := storagetypes.NewMsgCreateBucket(
user.GetAddr(), bucketName, storagetypes.VISIBILITY_TYPE_PUBLIC_READ, sp.OperatorKey.GetAddr(),
nil, math.MaxUint, nil, 0)
msgCreateBucket.PrimarySpApproval.GlobalVirtualGroupFamilyId = gvg.FamilyId
msgCreateBucket.PrimarySpApproval.Sig, err = sp.ApprovalKey.Sign(msgCreateBucket.GetApprovalBytes())
s.Require().NoError(err)
s.SendTxBlock(user, msgCreateBucket)

// HeadBucket
ctx := context.Background()
queryHeadBucketRequest := storagetypes.QueryHeadBucketRequest{
BucketName: bucketName,
}
_, err = s.Client.HeadBucket(ctx, &queryHeadBucketRequest)
s.Require().NoError(err)

// create a new payment account
msgCreatePaymentAccount := &paymenttypes.MsgCreatePaymentAccount{
Creator: user.GetAddr().String(),
}
_ = s.SendTxBlock(user, msgCreatePaymentAccount)
// query user's payment accounts
queryGetPaymentAccountsByOwnerRequest := paymenttypes.QueryPaymentAccountsByOwnerRequest{
Owner: user.GetAddr().String(),
}
paymentAccounts, err := s.Client.PaymentAccountsByOwner(ctx, &queryGetPaymentAccountsByOwnerRequest)
s.Require().NoError(err)
s.T().Log(paymentAccounts)
s.Require().Equal(1, len(paymentAccounts.PaymentAccounts))
paymentAccountAddr := sdk.MustAccAddressFromHex(paymentAccounts.PaymentAccounts[0])

msgDeposit := &paymenttypes.MsgDeposit{
Creator: user.GetAddr().String(),
To: paymentAccountAddr.String(),
Amount: types.NewIntFromInt64WithDecimal(2, types.DecimalBNB),
}
_ = s.SendTxBlock(user, msgDeposit)

// UpdateBucketInfo is fine for no created object
msgUpdateBucketInfo := storagetypes.NewMsgUpdateBucketInfo(
user.GetAddr(), bucketName, nil, paymentAccountAddr, storagetypes.VISIBILITY_TYPE_PRIVATE)
s.Require().NoError(err)
s.SendTxBlock(user, msgUpdateBucketInfo)
s.Require().NoError(err)

// CreateObject
objectName := storageutils.GenRandomObjectName()
// create test buffer
var buffer bytes.Buffer
line := `1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,
1234567890,1234567890,1234567890,123`
// Create 1MiB content where each line contains 1024 characters.
for i := 0; i < 1024; i++ {
buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line))
}
payloadSize := buffer.Len()
checksum := sdk.Keccak256(buffer.Bytes())
expectChecksum := [][]byte{checksum, checksum, checksum, checksum, checksum, checksum, checksum}
contextType := "text/event-stream"
msgCreateObject := storagetypes.NewMsgCreateObject(user.GetAddr(), bucketName, objectName, uint64(payloadSize),
storagetypes.VISIBILITY_TYPE_PRIVATE, expectChecksum, contextType, storagetypes.REDUNDANCY_EC_TYPE, math.MaxUint, nil)
msgCreateObject.PrimarySpApproval.Sig, err = sp.ApprovalKey.Sign(msgCreateObject.GetApprovalBytes())
s.Require().NoError(err)
s.SendTxBlock(user, msgCreateObject)

// HeadObject
queryHeadObjectRequest := storagetypes.QueryHeadObjectRequest{
BucketName: bucketName,
ObjectName: objectName,
}
queryHeadObjectResponse, err := s.Client.HeadObject(ctx, &queryHeadObjectRequest)
s.Require().NoError(err)

// UpdateBucketInfo is not fine for there is a created object
msgUpdateBucketInfo = storagetypes.NewMsgUpdateBucketInfo(
user.GetAddr(), bucketName, nil, user.GetAddr(), storagetypes.VISIBILITY_TYPE_PRIVATE)
s.Require().NoError(err)
s.SendTxBlockWithExpectErrorString(msgUpdateBucketInfo, user, "has unseald objects")

// SealObject
gvgId := gvg.Id
msgSealObject := storagetypes.NewMsgSealObject(sp.SealKey.GetAddr(), bucketName, objectName, gvgId, nil)

secondarySigs := make([][]byte, 0)
secondarySPBlsPubKeys := make([]bls.PublicKey, 0)
blsSignHash := storagetypes.NewSecondarySpSealObjectSignDoc(s.GetChainID(), gvgId, queryHeadObjectResponse.ObjectInfo.Id, storagetypes.GenerateHash(queryHeadObjectResponse.ObjectInfo.Checksums[:])).GetBlsSignHash()
// every secondary sp signs the checksums
for _, spID := range gvg.SecondarySpIds {
sig, err := core.BlsSignAndVerify(s.StorageProviders[spID], blsSignHash)
s.Require().NoError(err)
secondarySigs = append(secondarySigs, sig)
pk, err := bls.PublicKeyFromBytes(s.StorageProviders[spID].BlsKey.PubKey().Bytes())
s.Require().NoError(err)
secondarySPBlsPubKeys = append(secondarySPBlsPubKeys, pk)
}
aggBlsSig, err := core.BlsAggregateAndVerify(secondarySPBlsPubKeys, blsSignHash, secondarySigs)
s.Require().NoError(err)
msgSealObject.SecondarySpBlsAggSignatures = aggBlsSig

s.T().Logf("msg %s", msgSealObject.String())
s.SendTxBlock(sp.SealKey, msgSealObject)

// UpdateBucketInfo is fine for there is no created object
msgUpdateBucketInfo = storagetypes.NewMsgUpdateBucketInfo(
user.GetAddr(), bucketName, nil, user.GetAddr(), storagetypes.VISIBILITY_TYPE_PRIVATE)
s.Require().NoError(err)
s.SendTxBlock(user, msgUpdateBucketInfo)
s.Require().NoError(err)
}
2 changes: 1 addition & 1 deletion x/payment/keeper/grpc_query_params_by_timestamp.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func (k Keeper) ParamsByTimestamp(c context.Context, req *types.QueryParamsByTim

ts := req.GetTimestamp()
if ts == 0 {
ts = ctx.BlockTime().Unix()
ts = ctx.BlockTime().Unix() + 1
}

params := k.GetParams(ctx)
Expand Down
4 changes: 2 additions & 2 deletions x/payment/keeper/grpc_query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ func TestParamsByTimestampQuery(t *testing.T) {
require.NoError(t, err)

response, err := keeper.ParamsByTimestamp(ctx, &types.QueryParamsByTimestampRequest{
Timestamp: before.Unix(),
Timestamp: before.Unix() + 1,
})
require.NoError(t, err)
require.True(t, newReserveTime != response.Params.VersionedParams.ReserveTime)

response, err = keeper.ParamsByTimestamp(ctx, &types.QueryParamsByTimestampRequest{
Timestamp: after.Unix(),
Timestamp: after.Unix() + 1,
})
require.NoError(t, err)
require.True(t, newReserveTime == response.Params.VersionedParams.ReserveTime)
Expand Down
7 changes: 5 additions & 2 deletions x/payment/keeper/params.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,11 @@ func (k Keeper) SetVersionedParamsWithTs(ctx sdk.Context, verParams types.Versio
func (k Keeper) GetVersionedParamsWithTs(ctx sdk.Context, ts int64) (verParams types.VersionedParams, err error) {
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.VersionedParamsKeyPrefix)

// ReverseIterator will exclusive end, so we increment ts by 1
startKey := types.VersionedParamsKey(ts + 1)
// params are updated in the endblock, so we do not need to make the ts to be included
// for example, if the params is updated in 100 timestamp: the txs that are executed in 100 timestamp
// will use the old parameter, after 100 timestamp, when we passing 100 to query, we should still get
// the old parameter.
startKey := types.VersionedParamsKey(ts)
iterator := store.ReverseIterator(nil, startKey)
defer iterator.Close()
if !iterator.Valid() {
Expand Down
15 changes: 13 additions & 2 deletions x/payment/keeper/stream_record.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"

"github.com/bnb-chain/greenfield/x/payment/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)

func (k Keeper) CheckStreamRecord(streamRecord *types.StreamRecord) {
Expand Down Expand Up @@ -123,7 +124,12 @@ func (k Keeper) UpdateFrozenStreamRecord(ctx sdk.Context, streamRecord *types.St
streamRecord.LockBalance = streamRecord.LockBalance.Add(change.LockBalanceChange)
streamRecord.StaticBalance = streamRecord.StaticBalance.Sub(change.LockBalanceChange)
if streamRecord.LockBalance.IsNegative() {
return fmt.Errorf("lock balance can not become negative, current: %s", streamRecord.LockBalance)
if ctx.IsUpgraded(upgradetypes.Pawnee) {
streamRecord.StaticBalance = streamRecord.StaticBalance.Add(streamRecord.LockBalance)
streamRecord.LockBalance = sdkmath.ZeroInt()
} else {
return fmt.Errorf("lock balance can not become negative, current: %s", streamRecord.LockBalance)
}
}
}
if !change.RateChange.IsZero() {
Expand Down Expand Up @@ -158,7 +164,12 @@ func (k Keeper) UpdateStreamRecord(ctx sdk.Context, streamRecord *types.StreamRe
streamRecord.LockBalance = streamRecord.LockBalance.Add(change.LockBalanceChange)
streamRecord.StaticBalance = streamRecord.StaticBalance.Sub(change.LockBalanceChange)
if streamRecord.LockBalance.IsNegative() {
return fmt.Errorf("lock balance can not become negative, current: %s", streamRecord.LockBalance)
if ctx.IsUpgraded(upgradetypes.Pawnee) {
streamRecord.StaticBalance = streamRecord.StaticBalance.Add(streamRecord.LockBalance)
streamRecord.LockBalance = sdkmath.ZeroInt()
} else {
return fmt.Errorf("lock balance can not become negative, current: %s", streamRecord.LockBalance)
}
}
}
// update buffer balance
Expand Down
14 changes: 0 additions & 14 deletions x/payment/types/expected_keepers_mocks.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 17 additions & 4 deletions x/storage/keeper/abci.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"

paymenttypes "github.com/bnb-chain/greenfield/x/payment/types"
upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types"
)

func BeginBlocker(ctx sdk.Context, keeper Keeper) {
Expand Down Expand Up @@ -38,11 +39,23 @@ func EndBlocker(ctx sdk.Context, keeper Keeper) {
}

// delete buckets
_, err = keeper.DeleteDiscontinueBucketsUntil(ctx, blockTime, deletionMax-deleted)
if err != nil {
ctx.Logger().Error("should not happen, fail to delete buckets, err " + err.Error())
panic("should not happen")
doDeleteBucket := true
// on testnet, we had a hot fix to disable deleting buckets after discontinue since 5946512 height
if ctx.BlockHeight() > 5946511 && ctx.ChainID() == upgradetypes.TestnetChainID {
doDeleteBucket = false
}
if ctx.IsUpgraded(upgradetypes.Pawnee) {
doDeleteBucket = true
}

if doDeleteBucket {
_, err = keeper.DeleteDiscontinueBucketsUntil(ctx, blockTime, deletionMax-deleted)
if err != nil {
ctx.Logger().Error("should not happen, fail to delete buckets, err " + err.Error())
panic("should not happen")
}
}

keeper.PersistDeleteInfo(ctx)

// Permission GC
Expand Down
2 changes: 1 addition & 1 deletion x/storage/keeper/grpc_query.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func (k Keeper) QueryParamsByTimestamp(c context.Context, req *types.QueryParams

ts := req.GetTimestamp()
if ts == 0 {
ts = ctx.BlockTime().Unix()
ts = ctx.BlockTime().Unix() + 1
}

params := k.GetParams(ctx)
Expand Down
4 changes: 2 additions & 2 deletions x/storage/keeper/grpc_query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,13 +67,13 @@ func (s *TestSuite) TestQueryVersionedParams() {
err = s.storageKeeper.SetParams(s.ctx, params)
s.Require().NoError(err)

responseT1, err := s.storageKeeper.QueryParamsByTimestamp(s.ctx, &types.QueryParamsByTimestampRequest{Timestamp: blockTimeT1})
responseT1, err := s.storageKeeper.QueryParamsByTimestamp(s.ctx, &types.QueryParamsByTimestampRequest{Timestamp: blockTimeT1 + 1})
s.Require().NoError(err)
s.Require().Equal(&types.QueryParamsByTimestampResponse{Params: paramsT1}, responseT1)
getParams := responseT1.GetParams()
s.Require().Equal(getParams.GetMaxSegmentSize(), uint64(1))

responseT2, err := s.storageKeeper.QueryParamsByTimestamp(s.ctx, &types.QueryParamsByTimestampRequest{Timestamp: blockTimeT2})
responseT2, err := s.storageKeeper.QueryParamsByTimestamp(s.ctx, &types.QueryParamsByTimestampRequest{Timestamp: blockTimeT2 + 1})
s.Require().NoError(err)
s.Require().Equal(&types.QueryParamsByTimestampResponse{Params: paramsT2}, responseT2)
p := responseT2.GetParams()
Expand Down
Loading

0 comments on commit a74e009

Please sign in to comment.